Serialize grammar for untitled buffers

This commit is contained in:
Antonio Scandurra 2015-12-16 10:35:23 +01:00
parent 8db49fc08d
commit 19ff676c7b
2 changed files with 62 additions and 13 deletions

View File

@ -26,8 +26,13 @@ describe "TokenizedBuffer", ->
describe "serialization", ->
describe "when the underlying buffer has a path", ->
it "deserializes it searching among the buffers in the current project", ->
beforeEach ->
buffer = atom.project.bufferForPathSync('sample.js')
waitsForPromise ->
atom.packages.activatePackage('language-coffee-script')
it "deserializes it searching among the buffers in the current project", ->
tokenizedBufferA = new TokenizedBuffer({
buffer, config: atom.config, grammarRegistry: atom.grammars, packageManager: atom.packages, assert: atom.assert
})
@ -39,7 +44,6 @@ describe "TokenizedBuffer", ->
expect(tokenizedBufferB.buffer).toBe(tokenizedBufferA.buffer)
it "does not serialize / deserialize the current grammar", ->
buffer = atom.project.bufferForPathSync('sample.js')
tokenizedBufferA = new TokenizedBuffer({
buffer, config: atom.config, grammarRegistry: atom.grammars, packageManager: atom.packages, assert: atom.assert
})
@ -51,12 +55,13 @@ describe "TokenizedBuffer", ->
atom
)
expect(tokenizedBufferB.grammar).toBe(autoSelectedGrammar)
expect(tokenizedBufferB.grammar).toBe(atom.grammars.grammarForScopeName('source.js'))
describe "when the underlying buffer has no path", ->
it "deserializes it searching among the buffers in the current project", ->
beforeEach ->
buffer = atom.project.bufferForPathSync(null)
it "deserializes it searching among the buffers in the current project", ->
tokenizedBufferA = new TokenizedBuffer({
buffer, config: atom.config, grammarRegistry: atom.grammars, packageManager: atom.packages, assert: atom.assert
})
@ -67,6 +72,38 @@ describe "TokenizedBuffer", ->
expect(tokenizedBufferB.buffer).toBe(tokenizedBufferA.buffer)
it "deserializes the previously selected grammar as soon as it's added when not available in the grammar registry", ->
tokenizedBufferA = new TokenizedBuffer({
buffer, config: atom.config, grammarRegistry: atom.grammars, packageManager: atom.packages, assert: atom.assert
})
tokenizedBufferA.setGrammar(atom.grammars.grammarForScopeName("source.js"))
atom.grammars.removeGrammarForScopeName(tokenizedBufferA.grammar.scopeName)
tokenizedBufferB = TokenizedBuffer.deserialize(
JSON.parse(JSON.stringify(tokenizedBufferA.serialize())),
atom
)
expect(tokenizedBufferB.grammar).not.toBeFalsy()
expect(tokenizedBufferB.grammar).not.toBe(tokenizedBufferA.grammar)
atom.grammars.addGrammar(tokenizedBufferA.grammar)
expect(tokenizedBufferB.grammar).toBe(tokenizedBufferA.grammar)
it "deserializes the previously selected grammar on construction when available in the grammar registry", ->
tokenizedBufferA = new TokenizedBuffer({
buffer, config: atom.config, grammarRegistry: atom.grammars, packageManager: atom.packages, assert: atom.assert
})
tokenizedBufferA.setGrammar(atom.grammars.grammarForScopeName("source.js"))
tokenizedBufferB = TokenizedBuffer.deserialize(
JSON.parse(JSON.stringify(tokenizedBufferA.serialize())),
atom
)
expect(tokenizedBufferB.grammar).toBe(tokenizedBufferA.grammar)
describe "when the buffer is destroyed", ->
beforeEach ->
buffer = atom.project.bufferForPathSync('sample.js')

View File

@ -36,7 +36,7 @@ class TokenizedBuffer extends Model
constructor: (params) ->
{
@buffer, @tabLength, @ignoreInvisibles, @largeFileMode, @config,
@grammarRegistry, @packageManager, @assert
@grammarRegistry, @packageManager, @assert, grammarScopeName
} = params
@emitter = new Emitter
@ -49,18 +49,26 @@ class TokenizedBuffer extends Model
@disposables.add @buffer.preemptDidChange (e) => @handleBufferChange(e)
@disposables.add @buffer.onDidChangePath (@bufferPath) => @reloadGrammar()
@reloadGrammar()
if grammar = @grammarRegistry.grammarForScopeName(grammarScopeName)
@setGrammar(grammar)
else
@reloadGrammar()
@grammarToRestoreScopeName = grammarScopeName
destroyed: ->
@disposables.dispose()
serialize: ->
deserializer: 'TokenizedBuffer'
bufferPath: @buffer.getPath()
bufferId: @buffer.getId()
tabLength: @tabLength
ignoreInvisibles: @ignoreInvisibles
largeFileMode: @largeFileMode
state = {
deserializer: 'TokenizedBuffer'
bufferPath: @buffer.getPath()
bufferId: @buffer.getId()
tabLength: @tabLength
ignoreInvisibles: @ignoreInvisibles
largeFileMode: @largeFileMode
}
state.grammarScopeName = @grammar?.scopeName unless @buffer.getPath()
state
observeGrammar: (callback) ->
callback(@grammar)
@ -76,7 +84,9 @@ class TokenizedBuffer extends Model
@emitter.on 'did-tokenize', callback
grammarAddedOrUpdated: (grammar) =>
if grammar.injectionSelector?
if @grammarToRestoreScopeName is grammar.scopeName
@setGrammar(grammar)
else if grammar.injectionSelector?
@retokenizeLines() if @hasTokenForSelector(grammar.injectionSelector)
else
newScore = @grammarRegistry.getGrammarScore(grammar, @buffer.getPath(), @getGrammarSelectionContent())
@ -89,6 +99,8 @@ class TokenizedBuffer extends Model
@rootScopeDescriptor = new ScopeDescriptor(scopes: [@grammar.scopeName])
@currentGrammarScore = score ? @grammarRegistry.getGrammarScore(grammar, @buffer.getPath(), @getGrammarSelectionContent())
@grammarToRestoreScopeName = null
@grammarUpdateDisposable?.dispose()
@grammarUpdateDisposable = @grammar.onDidUpdate => @retokenizeLines()
@disposables.add(@grammarUpdateDisposable)