mirror of
https://github.com/pulsar-edit/pulsar.git
synced 2024-09-21 16:08:24 +03:00
Organize TokenizedBuffer test
This commit is contained in:
parent
15a5728751
commit
4c2680e68a
@ -54,324 +54,350 @@ describe('TokenizedBuffer', () => {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('when the buffer is destroyed', () => {
|
describe('tokenizing', () => {
|
||||||
beforeEach(() => {
|
describe('when the buffer is destroyed', () => {
|
||||||
buffer = atom.project.bufferForPathSync('sample.js')
|
|
||||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
|
|
||||||
startTokenizing(tokenizedBuffer)
|
|
||||||
})
|
|
||||||
|
|
||||||
it('stops tokenization', () => {
|
|
||||||
tokenizedBuffer.destroy()
|
|
||||||
spyOn(tokenizedBuffer, 'tokenizeNextChunk')
|
|
||||||
advanceClock()
|
|
||||||
expect(tokenizedBuffer.tokenizeNextChunk).not.toHaveBeenCalled()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('when the buffer contains soft-tabs', () => {
|
|
||||||
beforeEach(() => {
|
|
||||||
buffer = atom.project.bufferForPathSync('sample.js')
|
|
||||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
|
|
||||||
startTokenizing(tokenizedBuffer)
|
|
||||||
})
|
|
||||||
|
|
||||||
afterEach(() => {
|
|
||||||
tokenizedBuffer.destroy()
|
|
||||||
buffer.release()
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('on construction', () =>
|
|
||||||
it('tokenizes lines chunk at a time in the background', () => {
|
|
||||||
const line0 = tokenizedBuffer.tokenizedLines[0]
|
|
||||||
expect(line0).toBeUndefined()
|
|
||||||
|
|
||||||
const line11 = tokenizedBuffer.tokenizedLines[11]
|
|
||||||
expect(line11).toBeUndefined()
|
|
||||||
|
|
||||||
// tokenize chunk 1
|
|
||||||
advanceClock()
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[0].ruleStack != null).toBeTruthy()
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[4].ruleStack != null).toBeTruthy()
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[5]).toBeUndefined()
|
|
||||||
|
|
||||||
// tokenize chunk 2
|
|
||||||
advanceClock()
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[5].ruleStack != null).toBeTruthy()
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[9].ruleStack != null).toBeTruthy()
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[10]).toBeUndefined()
|
|
||||||
|
|
||||||
// tokenize last chunk
|
|
||||||
advanceClock()
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[10].ruleStack != null).toBeTruthy()
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[12].ruleStack != null).toBeTruthy()
|
|
||||||
})
|
|
||||||
)
|
|
||||||
|
|
||||||
describe('when the buffer is partially tokenized', () => {
|
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
// tokenize chunk 1 only
|
buffer = atom.project.bufferForPathSync('sample.js')
|
||||||
advanceClock()
|
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
|
||||||
|
startTokenizing(tokenizedBuffer)
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('when there is a buffer change inside the tokenized region', () => {
|
it('stops tokenization', () => {
|
||||||
describe('when lines are added', () => {
|
tokenizedBuffer.destroy()
|
||||||
it('pushes the invalid rows down', () => {
|
spyOn(tokenizedBuffer, 'tokenizeNextChunk')
|
||||||
expect(tokenizedBuffer.firstInvalidRow()).toBe(5)
|
advanceClock()
|
||||||
buffer.insert([1, 0], '\n\n')
|
expect(tokenizedBuffer.tokenizeNextChunk).not.toHaveBeenCalled()
|
||||||
expect(tokenizedBuffer.firstInvalidRow()).toBe(7)
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('when the buffer contains soft-tabs', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
buffer = atom.project.bufferForPathSync('sample.js')
|
||||||
|
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
|
||||||
|
startTokenizing(tokenizedBuffer)
|
||||||
|
})
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
tokenizedBuffer.destroy()
|
||||||
|
buffer.release()
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('on construction', () =>
|
||||||
|
it('tokenizes lines chunk at a time in the background', () => {
|
||||||
|
const line0 = tokenizedBuffer.tokenizedLines[0]
|
||||||
|
expect(line0).toBeUndefined()
|
||||||
|
|
||||||
|
const line11 = tokenizedBuffer.tokenizedLines[11]
|
||||||
|
expect(line11).toBeUndefined()
|
||||||
|
|
||||||
|
// tokenize chunk 1
|
||||||
|
advanceClock()
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[0].ruleStack != null).toBeTruthy()
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[4].ruleStack != null).toBeTruthy()
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[5]).toBeUndefined()
|
||||||
|
|
||||||
|
// tokenize chunk 2
|
||||||
|
advanceClock()
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[5].ruleStack != null).toBeTruthy()
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[9].ruleStack != null).toBeTruthy()
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[10]).toBeUndefined()
|
||||||
|
|
||||||
|
// tokenize last chunk
|
||||||
|
advanceClock()
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[10].ruleStack != null).toBeTruthy()
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[12].ruleStack != null).toBeTruthy()
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
|
describe('when the buffer is partially tokenized', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
// tokenize chunk 1 only
|
||||||
|
advanceClock()
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('when there is a buffer change inside the tokenized region', () => {
|
||||||
|
describe('when lines are added', () => {
|
||||||
|
it('pushes the invalid rows down', () => {
|
||||||
|
expect(tokenizedBuffer.firstInvalidRow()).toBe(5)
|
||||||
|
buffer.insert([1, 0], '\n\n')
|
||||||
|
expect(tokenizedBuffer.firstInvalidRow()).toBe(7)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('when lines are removed', () => {
|
||||||
|
it('pulls the invalid rows up', () => {
|
||||||
|
expect(tokenizedBuffer.firstInvalidRow()).toBe(5)
|
||||||
|
buffer.delete([[1, 0], [3, 0]])
|
||||||
|
expect(tokenizedBuffer.firstInvalidRow()).toBe(2)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('when the change invalidates all the lines before the current invalid region', () => {
|
||||||
|
it('retokenizes the invalidated lines and continues into the valid region', () => {
|
||||||
|
expect(tokenizedBuffer.firstInvalidRow()).toBe(5)
|
||||||
|
buffer.insert([2, 0], '/*')
|
||||||
|
expect(tokenizedBuffer.firstInvalidRow()).toBe(3)
|
||||||
|
advanceClock()
|
||||||
|
expect(tokenizedBuffer.firstInvalidRow()).toBe(8)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('when lines are removed', () => {
|
describe('when there is a buffer change surrounding an invalid row', () => {
|
||||||
it('pulls the invalid rows up', () => {
|
it('pushes the invalid row to the end of the change', () => {
|
||||||
expect(tokenizedBuffer.firstInvalidRow()).toBe(5)
|
buffer.setTextInRange([[4, 0], [6, 0]], '\n\n\n')
|
||||||
buffer.delete([[1, 0], [3, 0]])
|
|
||||||
expect(tokenizedBuffer.firstInvalidRow()).toBe(2)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('when the change invalidates all the lines before the current invalid region', () => {
|
|
||||||
it('retokenizes the invalidated lines and continues into the valid region', () => {
|
|
||||||
expect(tokenizedBuffer.firstInvalidRow()).toBe(5)
|
|
||||||
buffer.insert([2, 0], '/*')
|
|
||||||
expect(tokenizedBuffer.firstInvalidRow()).toBe(3)
|
|
||||||
advanceClock()
|
|
||||||
expect(tokenizedBuffer.firstInvalidRow()).toBe(8)
|
expect(tokenizedBuffer.firstInvalidRow()).toBe(8)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
|
||||||
|
|
||||||
describe('when there is a buffer change surrounding an invalid row', () => {
|
describe('when there is a buffer change inside an invalid region', () => {
|
||||||
it('pushes the invalid row to the end of the change', () => {
|
it('does not attempt to tokenize the lines in the change, and preserves the existing invalid row', () => {
|
||||||
buffer.setTextInRange([[4, 0], [6, 0]], '\n\n\n')
|
expect(tokenizedBuffer.firstInvalidRow()).toBe(5)
|
||||||
expect(tokenizedBuffer.firstInvalidRow()).toBe(8)
|
buffer.setTextInRange([[6, 0], [7, 0]], '\n\n\n')
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[6]).toBeUndefined()
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[7]).toBeUndefined()
|
||||||
|
expect(tokenizedBuffer.firstInvalidRow()).toBe(5)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('when there is a buffer change inside an invalid region', () => {
|
describe('when the buffer is fully tokenized', () => {
|
||||||
it('does not attempt to tokenize the lines in the change, and preserves the existing invalid row', () => {
|
beforeEach(() => fullyTokenize(tokenizedBuffer))
|
||||||
expect(tokenizedBuffer.firstInvalidRow()).toBe(5)
|
|
||||||
buffer.setTextInRange([[6, 0], [7, 0]], '\n\n\n')
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[6]).toBeUndefined()
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[7]).toBeUndefined()
|
|
||||||
expect(tokenizedBuffer.firstInvalidRow()).toBe(5)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('when the buffer is fully tokenized', () => {
|
describe('when there is a buffer change that is smaller than the chunk size', () => {
|
||||||
beforeEach(() => fullyTokenize(tokenizedBuffer))
|
describe('when lines are updated, but none are added or removed', () => {
|
||||||
|
it('updates tokens to reflect the change', () => {
|
||||||
|
buffer.setTextInRange([[0, 0], [2, 0]], 'foo()\n7\n')
|
||||||
|
|
||||||
describe('when there is a buffer change that is smaller than the chunk size', () => {
|
expect(tokenizedBuffer.tokenizedLines[0].tokens[1]).toEqual({value: '(', scopes: ['source.js', 'meta.function-call.js', 'meta.arguments.js', 'punctuation.definition.arguments.begin.bracket.round.js']})
|
||||||
describe('when lines are updated, but none are added or removed', () => {
|
expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual({value: '7', scopes: ['source.js', 'constant.numeric.decimal.js']})
|
||||||
it('updates tokens to reflect the change', () => {
|
// line 2 is unchanged
|
||||||
buffer.setTextInRange([[0, 0], [2, 0]], 'foo()\n7\n')
|
expect(tokenizedBuffer.tokenizedLines[2].tokens[1]).toEqual({value: 'if', scopes: ['source.js', 'keyword.control.js']})
|
||||||
|
})
|
||||||
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[0].tokens[1]).toEqual({value: '(', scopes: ['source.js', 'meta.function-call.js', 'meta.arguments.js', 'punctuation.definition.arguments.begin.bracket.round.js']})
|
describe('when the change invalidates the tokenization of subsequent lines', () => {
|
||||||
expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual({value: '7', scopes: ['source.js', 'constant.numeric.decimal.js']})
|
it('schedules the invalidated lines to be tokenized in the background', () => {
|
||||||
// line 2 is unchanged
|
buffer.insert([5, 30], '/* */')
|
||||||
expect(tokenizedBuffer.tokenizedLines[2].tokens[1]).toEqual({value: 'if', scopes: ['source.js', 'keyword.control.js']})
|
buffer.insert([2, 0], '/*')
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual(['source.js'])
|
||||||
|
|
||||||
|
advanceClock()
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
it('resumes highlighting with the state of the previous line', () => {
|
||||||
|
buffer.insert([0, 0], '/*')
|
||||||
|
buffer.insert([5, 0], '*/')
|
||||||
|
|
||||||
|
buffer.insert([1, 0], 'var ')
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[1].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('when lines are both updated and removed', () => {
|
||||||
|
it('updates tokens to reflect the change', () => {
|
||||||
|
buffer.setTextInRange([[1, 0], [3, 0]], 'foo()')
|
||||||
|
|
||||||
|
// previous line 0 remains
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual({value: 'var', scopes: ['source.js', 'storage.type.var.js']})
|
||||||
|
|
||||||
|
// previous line 3 should be combined with input to form line 1
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual({value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']})
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[1].tokens[6]).toEqual({value: '=', scopes: ['source.js', 'keyword.operator.assignment.js']})
|
||||||
|
|
||||||
|
// lines below deleted regions should be shifted upward
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[2].tokens[1]).toEqual({value: 'while', scopes: ['source.js', 'keyword.control.js']})
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[3].tokens[1]).toEqual({value: '=', scopes: ['source.js', 'keyword.operator.assignment.js']})
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[4].tokens[1]).toEqual({value: '<', scopes: ['source.js', 'keyword.operator.comparison.js']})
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('when the change invalidates the tokenization of subsequent lines', () => {
|
describe('when the change invalidates the tokenization of subsequent lines', () => {
|
||||||
it('schedules the invalidated lines to be tokenized in the background', () => {
|
it('schedules the invalidated lines to be tokenized in the background', () => {
|
||||||
buffer.insert([5, 30], '/* */')
|
buffer.insert([5, 30], '/* */')
|
||||||
buffer.insert([2, 0], '/*')
|
buffer.setTextInRange([[2, 0], [3, 0]], '/*')
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[2].tokens[0].scopes).toEqual(['source.js', 'comment.block.js', 'punctuation.definition.comment.begin.js'])
|
||||||
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual(['source.js'])
|
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual(['source.js'])
|
||||||
|
|
||||||
advanceClock()
|
advanceClock()
|
||||||
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
||||||
expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
||||||
expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
it('resumes highlighting with the state of the previous line', () => {
|
describe('when lines are both updated and inserted', () => {
|
||||||
buffer.insert([0, 0], '/*')
|
it('updates tokens to reflect the change', () => {
|
||||||
buffer.insert([5, 0], '*/')
|
buffer.setTextInRange([[1, 0], [2, 0]], 'foo()\nbar()\nbaz()\nquux()')
|
||||||
|
|
||||||
buffer.insert([1, 0], 'var ')
|
// previous line 0 remains
|
||||||
expect(tokenizedBuffer.tokenizedLines[1].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual({ value: 'var', scopes: ['source.js', 'storage.type.var.js']})
|
||||||
|
|
||||||
|
// 3 new lines inserted
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual({value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']})
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[2].tokens[0]).toEqual({value: 'bar', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']})
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[3].tokens[0]).toEqual({value: 'baz', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']})
|
||||||
|
|
||||||
|
// previous line 2 is joined with quux() on line 4
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[4].tokens[0]).toEqual({value: 'quux', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']})
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[4].tokens[4]).toEqual({value: 'if', scopes: ['source.js', 'keyword.control.js']})
|
||||||
|
|
||||||
|
// previous line 3 is pushed down to become line 5
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[5].tokens[3]).toEqual({value: '=', scopes: ['source.js', 'keyword.operator.assignment.js']})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('when the change invalidates the tokenization of subsequent lines', () => {
|
||||||
|
it('schedules the invalidated lines to be tokenized in the background', () => {
|
||||||
|
buffer.insert([5, 30], '/* */')
|
||||||
|
buffer.insert([2, 0], '/*\nabcde\nabcder')
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[2].tokens[0].scopes).toEqual(['source.js', 'comment.block.js', 'punctuation.definition.comment.begin.js'])
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual(['source.js'])
|
||||||
|
|
||||||
|
advanceClock() // tokenize invalidated lines in background
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[6].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[7].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[8].tokens[0].scopes).not.toBe(['source.js', 'comment.block.js'])
|
||||||
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('when lines are both updated and removed', () => {
|
describe('when there is an insertion that is larger than the chunk size', () =>
|
||||||
it('updates tokens to reflect the change', () => {
|
it('tokenizes the initial chunk synchronously, then tokenizes the remaining lines in the background', () => {
|
||||||
buffer.setTextInRange([[1, 0], [3, 0]], 'foo()')
|
const commentBlock = _.multiplyString('// a comment\n', tokenizedBuffer.chunkSize + 2)
|
||||||
|
buffer.insert([0, 0], commentBlock)
|
||||||
// previous line 0 remains
|
expect(tokenizedBuffer.tokenizedLines[0].ruleStack != null).toBeTruthy()
|
||||||
expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual({value: 'var', scopes: ['source.js', 'storage.type.var.js']})
|
expect(tokenizedBuffer.tokenizedLines[4].ruleStack != null).toBeTruthy()
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[5]).toBeUndefined()
|
||||||
// previous line 3 should be combined with input to form line 1
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual({value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']})
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[1].tokens[6]).toEqual({value: '=', scopes: ['source.js', 'keyword.operator.assignment.js']})
|
|
||||||
|
|
||||||
// lines below deleted regions should be shifted upward
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[2].tokens[1]).toEqual({value: 'while', scopes: ['source.js', 'keyword.control.js']})
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[3].tokens[1]).toEqual({value: '=', scopes: ['source.js', 'keyword.operator.assignment.js']})
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[4].tokens[1]).toEqual({value: '<', scopes: ['source.js', 'keyword.operator.comparison.js']})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('when the change invalidates the tokenization of subsequent lines', () => {
|
|
||||||
it('schedules the invalidated lines to be tokenized in the background', () => {
|
|
||||||
buffer.insert([5, 30], '/* */')
|
|
||||||
buffer.setTextInRange([[2, 0], [3, 0]], '/*')
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[2].tokens[0].scopes).toEqual(['source.js', 'comment.block.js', 'punctuation.definition.comment.begin.js'])
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual(['source.js'])
|
|
||||||
|
|
||||||
advanceClock()
|
advanceClock()
|
||||||
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
expect(tokenizedBuffer.tokenizedLines[5].ruleStack != null).toBeTruthy()
|
||||||
expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
expect(tokenizedBuffer.tokenizedLines[6].ruleStack != null).toBeTruthy()
|
||||||
})
|
})
|
||||||
})
|
)
|
||||||
|
|
||||||
describe('when lines are both updated and inserted', () => {
|
it('does not break out soft tabs across a scope boundary', async () => {
|
||||||
it('updates tokens to reflect the change', () => {
|
await atom.packages.activatePackage('language-gfm')
|
||||||
buffer.setTextInRange([[1, 0], [2, 0]], 'foo()\nbar()\nbaz()\nquux()')
|
|
||||||
|
|
||||||
// previous line 0 remains
|
tokenizedBuffer.setTabLength(4)
|
||||||
expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual({ value: 'var', scopes: ['source.js', 'storage.type.var.js']})
|
tokenizedBuffer.setGrammar(atom.grammars.selectGrammar('.md'))
|
||||||
|
buffer.setText(' <![]()\n ')
|
||||||
|
fullyTokenize(tokenizedBuffer)
|
||||||
|
|
||||||
// 3 new lines inserted
|
let length = 0
|
||||||
expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual({value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']})
|
for (let tag of tokenizedBuffer.tokenizedLines[1].tags) {
|
||||||
expect(tokenizedBuffer.tokenizedLines[2].tokens[0]).toEqual({value: 'bar', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']})
|
if (tag > 0) length += tag
|
||||||
expect(tokenizedBuffer.tokenizedLines[3].tokens[0]).toEqual({value: 'baz', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']})
|
}
|
||||||
|
|
||||||
// previous line 2 is joined with quux() on line 4
|
expect(length).toBe(4)
|
||||||
expect(tokenizedBuffer.tokenizedLines[4].tokens[0]).toEqual({value: 'quux', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']})
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[4].tokens[4]).toEqual({value: 'if', scopes: ['source.js', 'keyword.control.js']})
|
|
||||||
|
|
||||||
// previous line 3 is pushed down to become line 5
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[5].tokens[3]).toEqual({value: '=', scopes: ['source.js', 'keyword.operator.assignment.js']})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('when the change invalidates the tokenization of subsequent lines', () => {
|
|
||||||
it('schedules the invalidated lines to be tokenized in the background', () => {
|
|
||||||
buffer.insert([5, 30], '/* */')
|
|
||||||
buffer.insert([2, 0], '/*\nabcde\nabcder')
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[2].tokens[0].scopes).toEqual(['source.js', 'comment.block.js', 'punctuation.definition.comment.begin.js'])
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual(['source.js'])
|
|
||||||
|
|
||||||
advanceClock() // tokenize invalidated lines in background
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[6].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[7].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[8].tokens[0].scopes).not.toBe(['source.js', 'comment.block.js'])
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
})
|
||||||
|
|
||||||
describe('when there is an insertion that is larger than the chunk size', () =>
|
describe('when the buffer contains hard-tabs', () => {
|
||||||
it('tokenizes the initial chunk synchronously, then tokenizes the remaining lines in the background', () => {
|
beforeEach(async () => {
|
||||||
const commentBlock = _.multiplyString('// a comment\n', tokenizedBuffer.chunkSize + 2)
|
atom.packages.activatePackage('language-coffee-script')
|
||||||
buffer.insert([0, 0], commentBlock)
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[0].ruleStack != null).toBeTruthy()
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[4].ruleStack != null).toBeTruthy()
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[5]).toBeUndefined()
|
|
||||||
|
|
||||||
advanceClock()
|
buffer = atom.project.bufferForPathSync('sample-with-tabs.coffee')
|
||||||
expect(tokenizedBuffer.tokenizedLines[5].ruleStack != null).toBeTruthy()
|
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.coffee'), tabLength: 2})
|
||||||
expect(tokenizedBuffer.tokenizedLines[6].ruleStack != null).toBeTruthy()
|
startTokenizing(tokenizedBuffer)
|
||||||
})
|
})
|
||||||
)
|
|
||||||
|
|
||||||
it('does not break out soft tabs across a scope boundary', async () => {
|
afterEach(() => {
|
||||||
await atom.packages.activatePackage('language-gfm')
|
tokenizedBuffer.destroy()
|
||||||
|
buffer.release()
|
||||||
|
})
|
||||||
|
|
||||||
tokenizedBuffer.setTabLength(4)
|
describe('when the buffer is fully tokenized', () => {
|
||||||
tokenizedBuffer.setGrammar(atom.grammars.selectGrammar('.md'))
|
beforeEach(() => fullyTokenize(tokenizedBuffer))
|
||||||
buffer.setText(' <![]()\n ')
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('when tokenization completes', () => {
|
||||||
|
it('emits the `tokenized` event', async () => {
|
||||||
|
const editor = await atom.workspace.open('sample.js')
|
||||||
|
|
||||||
|
const tokenizedHandler = jasmine.createSpy('tokenized handler')
|
||||||
|
editor.tokenizedBuffer.onDidTokenize(tokenizedHandler)
|
||||||
|
fullyTokenize(editor.tokenizedBuffer)
|
||||||
|
expect(tokenizedHandler.callCount).toBe(1)
|
||||||
|
})
|
||||||
|
|
||||||
|
it("doesn't re-emit the `tokenized` event when it is re-tokenized", async () => {
|
||||||
|
const editor = await atom.workspace.open('sample.js')
|
||||||
|
fullyTokenize(editor.tokenizedBuffer)
|
||||||
|
|
||||||
|
const tokenizedHandler = jasmine.createSpy('tokenized handler')
|
||||||
|
editor.tokenizedBuffer.onDidTokenize(tokenizedHandler)
|
||||||
|
editor.getBuffer().insert([0, 0], "'")
|
||||||
|
fullyTokenize(editor.tokenizedBuffer)
|
||||||
|
expect(tokenizedHandler).not.toHaveBeenCalled()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('when the grammar is updated because a grammar it includes is activated', async () => {
|
||||||
|
it('re-emits the `tokenized` event', async () => {
|
||||||
|
const editor = await atom.workspace.open('coffee.coffee')
|
||||||
|
|
||||||
|
const tokenizedHandler = jasmine.createSpy('tokenized handler')
|
||||||
|
editor.tokenizedBuffer.onDidTokenize(tokenizedHandler)
|
||||||
|
fullyTokenize(editor.tokenizedBuffer)
|
||||||
|
tokenizedHandler.reset()
|
||||||
|
|
||||||
|
await atom.packages.activatePackage('language-coffee-script')
|
||||||
|
fullyTokenize(editor.tokenizedBuffer)
|
||||||
|
expect(tokenizedHandler.callCount).toBe(1)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('retokenizes the buffer', async () => {
|
||||||
|
await atom.packages.activatePackage('language-ruby-on-rails')
|
||||||
|
await atom.packages.activatePackage('language-ruby')
|
||||||
|
|
||||||
|
buffer = atom.project.bufferForPathSync()
|
||||||
|
buffer.setText("<div class='name'><%= User.find(2).full_name %></div>")
|
||||||
|
|
||||||
|
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.selectGrammar('test.erb'), tabLength: 2})
|
||||||
fullyTokenize(tokenizedBuffer)
|
fullyTokenize(tokenizedBuffer)
|
||||||
|
expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual({
|
||||||
|
value: "<div class='name'>",
|
||||||
|
scopes: ['text.html.ruby']
|
||||||
|
})
|
||||||
|
|
||||||
let length = 0
|
await atom.packages.activatePackage('language-html')
|
||||||
for (let tag of tokenizedBuffer.tokenizedLines[1].tags) {
|
fullyTokenize(tokenizedBuffer)
|
||||||
if (tag > 0) length += tag
|
expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual({
|
||||||
}
|
value: '<',
|
||||||
|
scopes: ['text.html.ruby', 'meta.tag.block.any.html', 'punctuation.definition.tag.begin.html']
|
||||||
expect(length).toBe(4)
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
|
||||||
|
|
||||||
describe('when the buffer contains hard-tabs', () => {
|
describe('when the buffer is configured with the null grammar', () => {
|
||||||
beforeEach(async () => {
|
it('does not actually tokenize using the grammar', () => {
|
||||||
atom.packages.activatePackage('language-coffee-script')
|
spyOn(NullGrammar, 'tokenizeLine').andCallThrough()
|
||||||
|
buffer = atom.project.bufferForPathSync('sample.will-use-the-null-grammar')
|
||||||
|
buffer.setText('a\nb\nc')
|
||||||
|
tokenizedBuffer = new TokenizedBuffer({buffer, tabLength: 2})
|
||||||
|
const tokenizeCallback = jasmine.createSpy('onDidTokenize')
|
||||||
|
tokenizedBuffer.onDidTokenize(tokenizeCallback)
|
||||||
|
|
||||||
buffer = atom.project.bufferForPathSync('sample-with-tabs.coffee')
|
expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
|
||||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.coffee'), tabLength: 2})
|
expect(tokenizedBuffer.tokenizedLines[1]).toBeUndefined()
|
||||||
startTokenizing(tokenizedBuffer)
|
expect(tokenizedBuffer.tokenizedLines[2]).toBeUndefined()
|
||||||
})
|
expect(tokenizeCallback.callCount).toBe(0)
|
||||||
|
expect(NullGrammar.tokenizeLine).not.toHaveBeenCalled()
|
||||||
|
|
||||||
afterEach(() => {
|
fullyTokenize(tokenizedBuffer)
|
||||||
tokenizedBuffer.destroy()
|
expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
|
||||||
buffer.release()
|
expect(tokenizedBuffer.tokenizedLines[1]).toBeUndefined()
|
||||||
})
|
expect(tokenizedBuffer.tokenizedLines[2]).toBeUndefined()
|
||||||
|
expect(tokenizeCallback.callCount).toBe(0)
|
||||||
describe('when the buffer is fully tokenized', () => {
|
expect(NullGrammar.tokenizeLine).not.toHaveBeenCalled()
|
||||||
beforeEach(() => fullyTokenize(tokenizedBuffer))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('when the grammar is tokenized', () => {
|
|
||||||
it('emits the `tokenized` event', async () => {
|
|
||||||
const editor = await atom.workspace.open('sample.js')
|
|
||||||
|
|
||||||
const tokenizedHandler = jasmine.createSpy('tokenized handler')
|
|
||||||
editor.tokenizedBuffer.onDidTokenize(tokenizedHandler)
|
|
||||||
fullyTokenize(editor.tokenizedBuffer)
|
|
||||||
expect(tokenizedHandler.callCount).toBe(1)
|
|
||||||
})
|
|
||||||
|
|
||||||
it("doesn't re-emit the `tokenized` event when it is re-tokenized", async () => {
|
|
||||||
const editor = await atom.workspace.open('sample.js')
|
|
||||||
fullyTokenize(editor.tokenizedBuffer)
|
|
||||||
|
|
||||||
const tokenizedHandler = jasmine.createSpy('tokenized handler')
|
|
||||||
editor.tokenizedBuffer.onDidTokenize(tokenizedHandler)
|
|
||||||
editor.getBuffer().insert([0, 0], "'")
|
|
||||||
fullyTokenize(editor.tokenizedBuffer)
|
|
||||||
expect(tokenizedHandler).not.toHaveBeenCalled()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('when the grammar is updated because a grammar it includes is activated', async () => {
|
|
||||||
it('re-emits the `tokenized` event', async () => {
|
|
||||||
const editor = await atom.workspace.open('coffee.coffee')
|
|
||||||
|
|
||||||
const tokenizedHandler = jasmine.createSpy('tokenized handler')
|
|
||||||
editor.tokenizedBuffer.onDidTokenize(tokenizedHandler)
|
|
||||||
fullyTokenize(editor.tokenizedBuffer)
|
|
||||||
tokenizedHandler.reset()
|
|
||||||
|
|
||||||
await atom.packages.activatePackage('language-coffee-script')
|
|
||||||
fullyTokenize(editor.tokenizedBuffer)
|
|
||||||
expect(tokenizedHandler.callCount).toBe(1)
|
|
||||||
})
|
|
||||||
|
|
||||||
it('retokenizes the buffer', async () => {
|
|
||||||
await atom.packages.activatePackage('language-ruby-on-rails')
|
|
||||||
await atom.packages.activatePackage('language-ruby')
|
|
||||||
|
|
||||||
buffer = atom.project.bufferForPathSync()
|
|
||||||
buffer.setText("<div class='name'><%= User.find(2).full_name %></div>")
|
|
||||||
|
|
||||||
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.selectGrammar('test.erb'), tabLength: 2})
|
|
||||||
fullyTokenize(tokenizedBuffer)
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual({
|
|
||||||
value: "<div class='name'>",
|
|
||||||
scopes: ['text.html.ruby']
|
|
||||||
})
|
|
||||||
|
|
||||||
await atom.packages.activatePackage('language-html')
|
|
||||||
fullyTokenize(tokenizedBuffer)
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual({
|
|
||||||
value: '<',
|
|
||||||
scopes: ['text.html.ruby', 'meta.tag.block.any.html', 'punctuation.definition.tag.begin.html']
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -502,7 +528,7 @@ describe('TokenizedBuffer', () => {
|
|||||||
})
|
})
|
||||||
}) // }
|
}) // }
|
||||||
|
|
||||||
describe('::isFoldableAtRow(row)', () => {
|
describe('.isFoldableAtRow(row)', () => {
|
||||||
beforeEach(() => {
|
beforeEach(() => {
|
||||||
buffer = atom.project.bufferForPathSync('sample.js')
|
buffer = atom.project.bufferForPathSync('sample.js')
|
||||||
buffer.insert([10, 0], ' // multi-line\n // comment\n // block\n')
|
buffer.insert([10, 0], ' // multi-line\n // comment\n // block\n')
|
||||||
@ -574,7 +600,7 @@ describe('TokenizedBuffer', () => {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('::tokenizedLineForRow(row)', () => {
|
describe('.tokenizedLineForRow(row)', () => {
|
||||||
it("returns the tokenized line for a row, or a placeholder line if it hasn't been tokenized yet", () => {
|
it("returns the tokenized line for a row, or a placeholder line if it hasn't been tokenized yet", () => {
|
||||||
buffer = atom.project.bufferForPathSync('sample.js')
|
buffer = atom.project.bufferForPathSync('sample.js')
|
||||||
const grammar = atom.grammars.grammarForScopeName('source.js')
|
const grammar = atom.grammars.grammarForScopeName('source.js')
|
||||||
@ -613,30 +639,6 @@ describe('TokenizedBuffer', () => {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('when the buffer is configured with the null grammar', () => {
|
|
||||||
it('does not actually tokenize using the grammar', () => {
|
|
||||||
spyOn(NullGrammar, 'tokenizeLine').andCallThrough()
|
|
||||||
buffer = atom.project.bufferForPathSync('sample.will-use-the-null-grammar')
|
|
||||||
buffer.setText('a\nb\nc')
|
|
||||||
tokenizedBuffer = new TokenizedBuffer({buffer, tabLength: 2})
|
|
||||||
const tokenizeCallback = jasmine.createSpy('onDidTokenize')
|
|
||||||
tokenizedBuffer.onDidTokenize(tokenizeCallback)
|
|
||||||
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[1]).toBeUndefined()
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[2]).toBeUndefined()
|
|
||||||
expect(tokenizeCallback.callCount).toBe(0)
|
|
||||||
expect(NullGrammar.tokenizeLine).not.toHaveBeenCalled()
|
|
||||||
|
|
||||||
fullyTokenize(tokenizedBuffer)
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[1]).toBeUndefined()
|
|
||||||
expect(tokenizedBuffer.tokenizedLines[2]).toBeUndefined()
|
|
||||||
expect(tokenizeCallback.callCount).toBe(0)
|
|
||||||
expect(NullGrammar.tokenizeLine).not.toHaveBeenCalled()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
describe('text decoration layer API', () => {
|
describe('text decoration layer API', () => {
|
||||||
describe('iterator', () => {
|
describe('iterator', () => {
|
||||||
it('iterates over the syntactic scope boundaries', () => {
|
it('iterates over the syntactic scope boundaries', () => {
|
||||||
|
Loading…
Reference in New Issue
Block a user