pulsar/spec/tokenized-buffer-spec.js

752 lines
35 KiB
JavaScript
Raw Normal View History

2017-09-21 23:59:04 +03:00
const NullGrammar = require('../src/null-grammar')
const TokenizedBuffer = require('../src/tokenized-buffer')
const TextBuffer = require('text-buffer')
const {Point} = TextBuffer
const _ = require('underscore-plus')
const {it, fit, ffit, fffit, beforeEach, afterEach} = require('./async-spec-helpers')
2017-09-21 23:59:04 +03:00
describe('TokenizedBuffer', () => {
let tokenizedBuffer, buffer
beforeEach(async () => {
2017-09-21 23:59:04 +03:00
// enable async tokenization
TokenizedBuffer.prototype.chunkSize = 5
jasmine.unspy(TokenizedBuffer.prototype, 'tokenizeInBackground')
await atom.packages.activatePackage('language-javascript')
2017-09-21 23:59:04 +03:00
})
afterEach(() => tokenizedBuffer && tokenizedBuffer.destroy())
function startTokenizing (tokenizedBuffer) {
tokenizedBuffer.setVisible(true)
}
function fullyTokenize (tokenizedBuffer) {
tokenizedBuffer.setVisible(true)
while (tokenizedBuffer.firstInvalidRow() != null) {
advanceClock()
}
}
describe('serialization', () => {
describe('when the underlying buffer has a path', () => {
beforeEach(async () => {
2017-09-21 23:59:04 +03:00
buffer = atom.project.bufferForPathSync('sample.js')
await atom.packages.activatePackage('language-coffee-script')
2017-09-21 23:59:04 +03:00
})
it('deserializes it searching among the buffers in the current project', () => {
const tokenizedBufferA = new TokenizedBuffer({buffer, tabLength: 2})
const tokenizedBufferB = TokenizedBuffer.deserialize(JSON.parse(JSON.stringify(tokenizedBufferA.serialize())), atom)
expect(tokenizedBufferB.buffer).toBe(tokenizedBufferA.buffer)
})
})
describe('when the underlying buffer has no path', () => {
beforeEach(() => buffer = atom.project.bufferForPathSync(null))
it('deserializes it searching among the buffers in the current project', () => {
const tokenizedBufferA = new TokenizedBuffer({buffer, tabLength: 2})
const tokenizedBufferB = TokenizedBuffer.deserialize(JSON.parse(JSON.stringify(tokenizedBufferA.serialize())), atom)
expect(tokenizedBufferB.buffer).toBe(tokenizedBufferA.buffer)
})
})
})
describe('when the buffer is destroyed', () => {
beforeEach(() => {
buffer = atom.project.bufferForPathSync('sample.js')
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
startTokenizing(tokenizedBuffer)
})
it('stops tokenization', () => {
tokenizedBuffer.destroy()
spyOn(tokenizedBuffer, 'tokenizeNextChunk')
advanceClock()
expect(tokenizedBuffer.tokenizeNextChunk).not.toHaveBeenCalled()
})
})
describe('when the buffer contains soft-tabs', () => {
beforeEach(() => {
buffer = atom.project.bufferForPathSync('sample.js')
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
startTokenizing(tokenizedBuffer)
})
afterEach(() => {
tokenizedBuffer.destroy()
buffer.release()
})
describe('on construction', () =>
it('tokenizes lines chunk at a time in the background', () => {
const line0 = tokenizedBuffer.tokenizedLines[0]
expect(line0).toBeUndefined()
const line11 = tokenizedBuffer.tokenizedLines[11]
expect(line11).toBeUndefined()
// tokenize chunk 1
advanceClock()
expect(tokenizedBuffer.tokenizedLines[0].ruleStack != null).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[4].ruleStack != null).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[5]).toBeUndefined()
// tokenize chunk 2
advanceClock()
expect(tokenizedBuffer.tokenizedLines[5].ruleStack != null).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[9].ruleStack != null).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[10]).toBeUndefined()
// tokenize last chunk
advanceClock()
expect(tokenizedBuffer.tokenizedLines[10].ruleStack != null).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[12].ruleStack != null).toBeTruthy()
})
)
describe('when the buffer is partially tokenized', () => {
beforeEach(() => {
// tokenize chunk 1 only
advanceClock()
})
describe('when there is a buffer change inside the tokenized region', () => {
describe('when lines are added', () => {
it('pushes the invalid rows down', () => {
expect(tokenizedBuffer.firstInvalidRow()).toBe(5)
buffer.insert([1, 0], '\n\n')
expect(tokenizedBuffer.firstInvalidRow()).toBe(7)
})
})
describe('when lines are removed', () => {
it('pulls the invalid rows up', () => {
expect(tokenizedBuffer.firstInvalidRow()).toBe(5)
buffer.delete([[1, 0], [3, 0]])
expect(tokenizedBuffer.firstInvalidRow()).toBe(2)
})
})
describe('when the change invalidates all the lines before the current invalid region', () => {
it('retokenizes the invalidated lines and continues into the valid region', () => {
expect(tokenizedBuffer.firstInvalidRow()).toBe(5)
buffer.insert([2, 0], '/*')
expect(tokenizedBuffer.firstInvalidRow()).toBe(3)
advanceClock()
expect(tokenizedBuffer.firstInvalidRow()).toBe(8)
})
})
})
describe('when there is a buffer change surrounding an invalid row', () => {
it('pushes the invalid row to the end of the change', () => {
buffer.setTextInRange([[4, 0], [6, 0]], '\n\n\n')
expect(tokenizedBuffer.firstInvalidRow()).toBe(8)
})
})
describe('when there is a buffer change inside an invalid region', () => {
it('does not attempt to tokenize the lines in the change, and preserves the existing invalid row', () => {
expect(tokenizedBuffer.firstInvalidRow()).toBe(5)
buffer.setTextInRange([[6, 0], [7, 0]], '\n\n\n')
expect(tokenizedBuffer.tokenizedLines[6]).toBeUndefined()
expect(tokenizedBuffer.tokenizedLines[7]).toBeUndefined()
expect(tokenizedBuffer.firstInvalidRow()).toBe(5)
})
})
})
describe('when the buffer is fully tokenized', () => {
beforeEach(() => fullyTokenize(tokenizedBuffer))
describe('when there is a buffer change that is smaller than the chunk size', () => {
describe('when lines are updated, but none are added or removed', () => {
it('updates tokens to reflect the change', () => {
buffer.setTextInRange([[0, 0], [2, 0]], 'foo()\n7\n')
expect(tokenizedBuffer.tokenizedLines[0].tokens[1]).toEqual({value: '(', scopes: ['source.js', 'meta.function-call.js', 'meta.arguments.js', 'punctuation.definition.arguments.begin.bracket.round.js']})
expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual({value: '7', scopes: ['source.js', 'constant.numeric.decimal.js']})
// line 2 is unchanged
expect(tokenizedBuffer.tokenizedLines[2].tokens[1]).toEqual({value: 'if', scopes: ['source.js', 'keyword.control.js']})
})
describe('when the change invalidates the tokenization of subsequent lines', () => {
it('schedules the invalidated lines to be tokenized in the background', () => {
buffer.insert([5, 30], '/* */')
buffer.insert([2, 0], '/*')
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual(['source.js'])
advanceClock()
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
})
})
it('resumes highlighting with the state of the previous line', () => {
buffer.insert([0, 0], '/*')
buffer.insert([5, 0], '*/')
buffer.insert([1, 0], 'var ')
expect(tokenizedBuffer.tokenizedLines[1].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
})
})
describe('when lines are both updated and removed', () => {
it('updates tokens to reflect the change', () => {
buffer.setTextInRange([[1, 0], [3, 0]], 'foo()')
// previous line 0 remains
expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual({value: 'var', scopes: ['source.js', 'storage.type.var.js']})
// previous line 3 should be combined with input to form line 1
expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual({value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']})
expect(tokenizedBuffer.tokenizedLines[1].tokens[6]).toEqual({value: '=', scopes: ['source.js', 'keyword.operator.assignment.js']})
// lines below deleted regions should be shifted upward
expect(tokenizedBuffer.tokenizedLines[2].tokens[1]).toEqual({value: 'while', scopes: ['source.js', 'keyword.control.js']})
expect(tokenizedBuffer.tokenizedLines[3].tokens[1]).toEqual({value: '=', scopes: ['source.js', 'keyword.operator.assignment.js']})
expect(tokenizedBuffer.tokenizedLines[4].tokens[1]).toEqual({value: '<', scopes: ['source.js', 'keyword.operator.comparison.js']})
})
})
describe('when the change invalidates the tokenization of subsequent lines', () => {
it('schedules the invalidated lines to be tokenized in the background', () => {
buffer.insert([5, 30], '/* */')
buffer.setTextInRange([[2, 0], [3, 0]], '/*')
expect(tokenizedBuffer.tokenizedLines[2].tokens[0].scopes).toEqual(['source.js', 'comment.block.js', 'punctuation.definition.comment.begin.js'])
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual(['source.js'])
advanceClock()
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
})
})
describe('when lines are both updated and inserted', () => {
it('updates tokens to reflect the change', () => {
buffer.setTextInRange([[1, 0], [2, 0]], 'foo()\nbar()\nbaz()\nquux()')
// previous line 0 remains
expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual({ value: 'var', scopes: ['source.js', 'storage.type.var.js']})
// 3 new lines inserted
expect(tokenizedBuffer.tokenizedLines[1].tokens[0]).toEqual({value: 'foo', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']})
expect(tokenizedBuffer.tokenizedLines[2].tokens[0]).toEqual({value: 'bar', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']})
expect(tokenizedBuffer.tokenizedLines[3].tokens[0]).toEqual({value: 'baz', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']})
// previous line 2 is joined with quux() on line 4
expect(tokenizedBuffer.tokenizedLines[4].tokens[0]).toEqual({value: 'quux', scopes: ['source.js', 'meta.function-call.js', 'entity.name.function.js']})
expect(tokenizedBuffer.tokenizedLines[4].tokens[4]).toEqual({value: 'if', scopes: ['source.js', 'keyword.control.js']})
// previous line 3 is pushed down to become line 5
expect(tokenizedBuffer.tokenizedLines[5].tokens[3]).toEqual({value: '=', scopes: ['source.js', 'keyword.operator.assignment.js']})
})
})
describe('when the change invalidates the tokenization of subsequent lines', () => {
it('schedules the invalidated lines to be tokenized in the background', () => {
buffer.insert([5, 30], '/* */')
buffer.insert([2, 0], '/*\nabcde\nabcder')
expect(tokenizedBuffer.tokenizedLines[2].tokens[0].scopes).toEqual(['source.js', 'comment.block.js', 'punctuation.definition.comment.begin.js'])
expect(tokenizedBuffer.tokenizedLines[3].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
expect(tokenizedBuffer.tokenizedLines[4].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual(['source.js'])
advanceClock() // tokenize invalidated lines in background
expect(tokenizedBuffer.tokenizedLines[5].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
expect(tokenizedBuffer.tokenizedLines[6].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
expect(tokenizedBuffer.tokenizedLines[7].tokens[0].scopes).toEqual(['source.js', 'comment.block.js'])
expect(tokenizedBuffer.tokenizedLines[8].tokens[0].scopes).not.toBe(['source.js', 'comment.block.js'])
})
})
})
describe('when there is an insertion that is larger than the chunk size', () =>
it('tokenizes the initial chunk synchronously, then tokenizes the remaining lines in the background', () => {
const commentBlock = _.multiplyString('// a comment\n', tokenizedBuffer.chunkSize + 2)
buffer.insert([0, 0], commentBlock)
expect(tokenizedBuffer.tokenizedLines[0].ruleStack != null).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[4].ruleStack != null).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[5]).toBeUndefined()
advanceClock()
expect(tokenizedBuffer.tokenizedLines[5].ruleStack != null).toBeTruthy()
expect(tokenizedBuffer.tokenizedLines[6].ruleStack != null).toBeTruthy()
})
)
it('does not break out soft tabs across a scope boundary', async () => {
await atom.packages.activatePackage('language-gfm')
2017-09-21 23:59:04 +03:00
tokenizedBuffer.setTabLength(4)
tokenizedBuffer.setGrammar(atom.grammars.selectGrammar('.md'))
buffer.setText(' <![]()\n ')
fullyTokenize(tokenizedBuffer)
2017-09-21 23:59:04 +03:00
let length = 0
for (let tag of tokenizedBuffer.tokenizedLines[1].tags) {
if (tag > 0) length += tag
}
2017-09-21 23:59:04 +03:00
expect(length).toBe(4)
2017-09-21 23:59:04 +03:00
})
})
})
describe('when the buffer contains hard-tabs', () => {
beforeEach(async () => {
atom.packages.activatePackage('language-coffee-script')
2017-09-21 23:59:04 +03:00
buffer = atom.project.bufferForPathSync('sample-with-tabs.coffee')
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.coffee'), tabLength: 2})
startTokenizing(tokenizedBuffer)
2017-09-21 23:59:04 +03:00
})
afterEach(() => {
tokenizedBuffer.destroy()
buffer.release()
})
describe('when the buffer is fully tokenized', () => {
beforeEach(() => fullyTokenize(tokenizedBuffer))
})
})
describe('when the grammar is tokenized', () => {
it('emits the `tokenized` event', async () => {
const editor = await atom.workspace.open('sample.js')
2017-09-21 23:59:04 +03:00
const tokenizedHandler = jasmine.createSpy('tokenized handler')
editor.tokenizedBuffer.onDidTokenize(tokenizedHandler)
fullyTokenize(editor.tokenizedBuffer)
expect(tokenizedHandler.callCount).toBe(1)
})
2017-09-21 23:59:04 +03:00
it("doesn't re-emit the `tokenized` event when it is re-tokenized", async () => {
const editor = await atom.workspace.open('sample.js')
fullyTokenize(editor.tokenizedBuffer)
2017-09-21 23:59:04 +03:00
const tokenizedHandler = jasmine.createSpy('tokenized handler')
editor.tokenizedBuffer.onDidTokenize(tokenizedHandler)
editor.getBuffer().insert([0, 0], "'")
fullyTokenize(editor.tokenizedBuffer)
expect(tokenizedHandler).not.toHaveBeenCalled()
2017-09-21 23:59:04 +03:00
})
})
describe('when the grammar is updated because a grammar it includes is activated', async () => {
it('re-emits the `tokenized` event', async () => {
const editor = await atom.workspace.open('coffee.coffee')
2017-09-21 23:59:04 +03:00
const tokenizedHandler = jasmine.createSpy('tokenized handler')
editor.tokenizedBuffer.onDidTokenize(tokenizedHandler)
fullyTokenize(editor.tokenizedBuffer)
tokenizedHandler.reset()
2017-09-21 23:59:04 +03:00
await atom.packages.activatePackage('language-coffee-script')
fullyTokenize(editor.tokenizedBuffer)
expect(tokenizedHandler.callCount).toBe(1)
2017-09-21 23:59:04 +03:00
})
it('retokenizes the buffer', async () => {
await atom.packages.activatePackage('language-ruby-on-rails')
await atom.packages.activatePackage('language-ruby')
2017-09-21 23:59:04 +03:00
buffer = atom.project.bufferForPathSync()
buffer.setText("<div class='name'><%= User.find(2).full_name %></div>")
2017-09-21 23:59:04 +03:00
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.selectGrammar('test.erb'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual({
value: "<div class='name'>",
scopes: ['text.html.ruby']
2017-09-21 23:59:04 +03:00
})
await atom.packages.activatePackage('language-html')
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLines[0].tokens[0]).toEqual({
value: '<',
scopes: ['text.html.ruby', 'meta.tag.block.any.html', 'punctuation.definition.tag.begin.html']
2017-09-21 23:59:04 +03:00
})
})
})
describe('.tokenForPosition(position)', () => {
afterEach(() => {
tokenizedBuffer.destroy()
buffer.release()
})
it('returns the correct token (regression)', () => {
buffer = atom.project.bufferForPathSync('sample.js')
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenForPosition([1, 0]).scopes).toEqual(['source.js'])
expect(tokenizedBuffer.tokenForPosition([1, 1]).scopes).toEqual(['source.js'])
expect(tokenizedBuffer.tokenForPosition([1, 2]).scopes).toEqual(['source.js', 'storage.type.var.js'])
})
})
describe('.bufferRangeForScopeAtPosition(selector, position)', () => {
beforeEach(() => {
buffer = atom.project.bufferForPathSync('sample.js')
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
})
describe('when the selector does not match the token at the position', () =>
it('returns a falsy value', () => expect(tokenizedBuffer.bufferRangeForScopeAtPosition('.bogus', [0, 1])).toBeUndefined())
)
describe('when the selector matches a single token at the position', () => {
it('returns the range covered by the token', () => {
expect(tokenizedBuffer.bufferRangeForScopeAtPosition('.storage.type.var.js', [0, 1])).toEqual([[0, 0], [0, 3]])
expect(tokenizedBuffer.bufferRangeForScopeAtPosition('.storage.type.var.js', [0, 3])).toEqual([[0, 0], [0, 3]])
})
})
describe('when the selector matches a run of multiple tokens at the position', () => {
it('returns the range covered by all contiguous tokens (within a single line)', () => {
expect(tokenizedBuffer.bufferRangeForScopeAtPosition('.function', [1, 18])).toEqual([[1, 6], [1, 28]])
})
})
})
describe('.indentLevelForRow(row)', () => {
beforeEach(() => {
buffer = atom.project.bufferForPathSync('sample.js')
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
})
describe('when the line is non-empty', () => {
it('has an indent level based on the leading whitespace on the line', () => {
expect(tokenizedBuffer.indentLevelForRow(0)).toBe(0)
expect(tokenizedBuffer.indentLevelForRow(1)).toBe(1)
expect(tokenizedBuffer.indentLevelForRow(2)).toBe(2)
buffer.insert([2, 0], ' ')
expect(tokenizedBuffer.indentLevelForRow(2)).toBe(2.5)
})
})
describe('when the line is empty', () => {
it('assumes the indentation level of the first non-empty line below or above if one exists', () => {
buffer.insert([12, 0], ' ')
buffer.insert([12, Infinity], '\n\n')
expect(tokenizedBuffer.indentLevelForRow(13)).toBe(2)
expect(tokenizedBuffer.indentLevelForRow(14)).toBe(2)
buffer.insert([1, Infinity], '\n\n')
expect(tokenizedBuffer.indentLevelForRow(2)).toBe(2)
expect(tokenizedBuffer.indentLevelForRow(3)).toBe(2)
buffer.setText('\n\n\n')
expect(tokenizedBuffer.indentLevelForRow(1)).toBe(0)
})
})
describe('when the changed lines are surrounded by whitespace-only lines', () => {
it('updates the indentLevel of empty lines that precede the change', () => {
expect(tokenizedBuffer.indentLevelForRow(12)).toBe(0)
buffer.insert([12, 0], '\n')
buffer.insert([13, 0], ' ')
expect(tokenizedBuffer.indentLevelForRow(12)).toBe(1)
})
it('updates empty line indent guides when the empty line is the last line', () => {
buffer.insert([12, 2], '\n')
// The newline and the tab need to be in two different operations to surface the bug
buffer.insert([12, 0], ' ')
expect(tokenizedBuffer.indentLevelForRow(13)).toBe(1)
buffer.insert([12, 0], ' ')
expect(tokenizedBuffer.indentLevelForRow(13)).toBe(2)
expect(tokenizedBuffer.tokenizedLines[14]).not.toBeDefined()
})
it('updates the indentLevel of empty lines surrounding a change that inserts lines', () => {
buffer.insert([7, 0], '\n\n')
buffer.insert([5, 0], '\n\n')
expect(tokenizedBuffer.indentLevelForRow(5)).toBe(3)
expect(tokenizedBuffer.indentLevelForRow(6)).toBe(3)
expect(tokenizedBuffer.indentLevelForRow(9)).toBe(3)
expect(tokenizedBuffer.indentLevelForRow(10)).toBe(3)
expect(tokenizedBuffer.indentLevelForRow(11)).toBe(2)
buffer.setTextInRange([[7, 0], [8, 65]], ' one\n two\n three\n four')
expect(tokenizedBuffer.indentLevelForRow(5)).toBe(4)
expect(tokenizedBuffer.indentLevelForRow(6)).toBe(4)
expect(tokenizedBuffer.indentLevelForRow(11)).toBe(4)
expect(tokenizedBuffer.indentLevelForRow(12)).toBe(4)
expect(tokenizedBuffer.indentLevelForRow(13)).toBe(2)
})
it('updates the indentLevel of empty lines surrounding a change that removes lines', () => {
buffer.insert([7, 0], '\n\n')
buffer.insert([5, 0], '\n\n')
buffer.setTextInRange([[7, 0], [8, 65]], ' ok')
expect(tokenizedBuffer.indentLevelForRow(5)).toBe(2)
expect(tokenizedBuffer.indentLevelForRow(6)).toBe(2)
expect(tokenizedBuffer.indentLevelForRow(7)).toBe(2) // new text
expect(tokenizedBuffer.indentLevelForRow(8)).toBe(2)
expect(tokenizedBuffer.indentLevelForRow(9)).toBe(2)
expect(tokenizedBuffer.indentLevelForRow(10)).toBe(2)
})
})
}) // }
describe('::isFoldableAtRow(row)', () => {
beforeEach(() => {
buffer = atom.project.bufferForPathSync('sample.js')
buffer.insert([10, 0], ' // multi-line\n // comment\n // block\n')
buffer.insert([0, 0], '// multi-line\n// comment\n// block\n')
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
})
it('includes the first line of multi-line comments', () => {
expect(tokenizedBuffer.isFoldableAtRow(0)).toBe(true)
expect(tokenizedBuffer.isFoldableAtRow(1)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(2)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(3)).toBe(true) // because of indent
expect(tokenizedBuffer.isFoldableAtRow(13)).toBe(true)
expect(tokenizedBuffer.isFoldableAtRow(14)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(15)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(16)).toBe(false)
buffer.insert([0, Infinity], '\n')
expect(tokenizedBuffer.isFoldableAtRow(0)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(1)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(2)).toBe(true)
expect(tokenizedBuffer.isFoldableAtRow(3)).toBe(false)
buffer.undo()
expect(tokenizedBuffer.isFoldableAtRow(0)).toBe(true)
expect(tokenizedBuffer.isFoldableAtRow(1)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(2)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(3)).toBe(true)
}) // because of indent
it('includes non-comment lines that precede an increase in indentation', () => {
buffer.insert([2, 0], ' ') // commented lines preceding an indent aren't foldable
expect(tokenizedBuffer.isFoldableAtRow(1)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(2)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(3)).toBe(true)
expect(tokenizedBuffer.isFoldableAtRow(4)).toBe(true)
expect(tokenizedBuffer.isFoldableAtRow(5)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(6)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(7)).toBe(true)
expect(tokenizedBuffer.isFoldableAtRow(8)).toBe(false)
buffer.insert([7, 0], ' ')
expect(tokenizedBuffer.isFoldableAtRow(6)).toBe(true)
expect(tokenizedBuffer.isFoldableAtRow(7)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(8)).toBe(false)
buffer.undo()
expect(tokenizedBuffer.isFoldableAtRow(6)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(7)).toBe(true)
expect(tokenizedBuffer.isFoldableAtRow(8)).toBe(false)
buffer.insert([7, 0], ' \n x\n')
expect(tokenizedBuffer.isFoldableAtRow(6)).toBe(true)
expect(tokenizedBuffer.isFoldableAtRow(7)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(8)).toBe(false)
buffer.insert([9, 0], ' ')
expect(tokenizedBuffer.isFoldableAtRow(6)).toBe(true)
expect(tokenizedBuffer.isFoldableAtRow(7)).toBe(false)
expect(tokenizedBuffer.isFoldableAtRow(8)).toBe(false)
})
})
describe('::tokenizedLineForRow(row)', () => {
it("returns the tokenized line for a row, or a placeholder line if it hasn't been tokenized yet", () => {
buffer = atom.project.bufferForPathSync('sample.js')
const grammar = atom.grammars.grammarForScopeName('source.js')
tokenizedBuffer = new TokenizedBuffer({buffer, grammar, tabLength: 2})
const line0 = buffer.lineForRow(0)
const jsScopeStartId = grammar.startIdForScope(grammar.scopeName)
const jsScopeEndId = grammar.endIdForScope(grammar.scopeName)
startTokenizing(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0)
expect(tokenizedBuffer.tokenizedLineForRow(0).tags).toEqual([jsScopeStartId, line0.length, jsScopeEndId])
advanceClock(1)
expect(tokenizedBuffer.tokenizedLines[0]).not.toBeUndefined()
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0)
expect(tokenizedBuffer.tokenizedLineForRow(0).tags).not.toEqual([jsScopeStartId, line0.length, jsScopeEndId])
const nullScopeStartId = NullGrammar.startIdForScope(NullGrammar.scopeName)
const nullScopeEndId = NullGrammar.endIdForScope(NullGrammar.scopeName)
tokenizedBuffer.setGrammar(NullGrammar)
startTokenizing(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0)
expect(tokenizedBuffer.tokenizedLineForRow(0).tags).toEqual([nullScopeStartId, line0.length, nullScopeEndId])
advanceClock(1)
expect(tokenizedBuffer.tokenizedLineForRow(0).text).toBe(line0)
expect(tokenizedBuffer.tokenizedLineForRow(0).tags).toEqual([nullScopeStartId, line0.length, nullScopeEndId])
})
it('returns undefined if the requested row is outside the buffer range', () => {
buffer = atom.project.bufferForPathSync('sample.js')
const grammar = atom.grammars.grammarForScopeName('source.js')
tokenizedBuffer = new TokenizedBuffer({buffer, grammar, tabLength: 2})
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLineForRow(999)).toBeUndefined()
})
})
describe('when the buffer is configured with the null grammar', () => {
it('does not actually tokenize using the grammar', () => {
spyOn(NullGrammar, 'tokenizeLine').andCallThrough()
buffer = atom.project.bufferForPathSync('sample.will-use-the-null-grammar')
buffer.setText('a\nb\nc')
tokenizedBuffer = new TokenizedBuffer({buffer, tabLength: 2})
const tokenizeCallback = jasmine.createSpy('onDidTokenize')
tokenizedBuffer.onDidTokenize(tokenizeCallback)
expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
expect(tokenizedBuffer.tokenizedLines[1]).toBeUndefined()
expect(tokenizedBuffer.tokenizedLines[2]).toBeUndefined()
expect(tokenizeCallback.callCount).toBe(0)
expect(NullGrammar.tokenizeLine).not.toHaveBeenCalled()
fullyTokenize(tokenizedBuffer)
expect(tokenizedBuffer.tokenizedLines[0]).toBeUndefined()
expect(tokenizedBuffer.tokenizedLines[1]).toBeUndefined()
expect(tokenizedBuffer.tokenizedLines[2]).toBeUndefined()
expect(tokenizeCallback.callCount).toBe(0)
expect(NullGrammar.tokenizeLine).not.toHaveBeenCalled()
})
})
describe('text decoration layer API', () => {
describe('iterator', () => {
it('iterates over the syntactic scope boundaries', () => {
buffer = new TextBuffer({text: 'var foo = 1 /*\nhello*/var bar = 2\n'})
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.js'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
const iterator = tokenizedBuffer.buildIterator()
iterator.seek(Point(0, 0))
const expectedBoundaries = [
{position: Point(0, 0), closeTags: [], openTags: ['syntax--source syntax--js', 'syntax--storage syntax--type syntax--var syntax--js']},
{position: Point(0, 3), closeTags: ['syntax--storage syntax--type syntax--var syntax--js'], openTags: []},
{position: Point(0, 8), closeTags: [], openTags: ['syntax--keyword syntax--operator syntax--assignment syntax--js']},
{position: Point(0, 9), closeTags: ['syntax--keyword syntax--operator syntax--assignment syntax--js'], openTags: []},
{position: Point(0, 10), closeTags: [], openTags: ['syntax--constant syntax--numeric syntax--decimal syntax--js']},
{position: Point(0, 11), closeTags: ['syntax--constant syntax--numeric syntax--decimal syntax--js'], openTags: []},
{position: Point(0, 12), closeTags: [], openTags: ['syntax--comment syntax--block syntax--js', 'syntax--punctuation syntax--definition syntax--comment syntax--begin syntax--js']},
{position: Point(0, 14), closeTags: ['syntax--punctuation syntax--definition syntax--comment syntax--begin syntax--js'], openTags: []},
{position: Point(1, 5), closeTags: [], openTags: ['syntax--punctuation syntax--definition syntax--comment syntax--end syntax--js']},
{position: Point(1, 7), closeTags: ['syntax--punctuation syntax--definition syntax--comment syntax--end syntax--js', 'syntax--comment syntax--block syntax--js'], openTags: ['syntax--storage syntax--type syntax--var syntax--js']},
{position: Point(1, 10), closeTags: ['syntax--storage syntax--type syntax--var syntax--js'], openTags: []},
{position: Point(1, 15), closeTags: [], openTags: ['syntax--keyword syntax--operator syntax--assignment syntax--js']},
{position: Point(1, 16), closeTags: ['syntax--keyword syntax--operator syntax--assignment syntax--js'], openTags: []},
{position: Point(1, 17), closeTags: [], openTags: ['syntax--constant syntax--numeric syntax--decimal syntax--js']},
{position: Point(1, 18), closeTags: ['syntax--constant syntax--numeric syntax--decimal syntax--js'], openTags: []}
]
while (true) {
const boundary = {
position: iterator.getPosition(),
closeTags: iterator.getCloseScopeIds().map(scopeId => tokenizedBuffer.classNameForScopeId(scopeId)),
openTags: iterator.getOpenScopeIds().map(scopeId => tokenizedBuffer.classNameForScopeId(scopeId))
}
expect(boundary).toEqual(expectedBoundaries.shift())
if (!iterator.moveToSuccessor()) { break }
}
expect(iterator.seek(Point(0, 1)).map(scopeId => tokenizedBuffer.classNameForScopeId(scopeId))).toEqual([
'syntax--source syntax--js',
'syntax--storage syntax--type syntax--var syntax--js'
])
expect(iterator.getPosition()).toEqual(Point(0, 3))
expect(iterator.seek(Point(0, 8)).map(scopeId => tokenizedBuffer.classNameForScopeId(scopeId))).toEqual([
'syntax--source syntax--js'
])
expect(iterator.getPosition()).toEqual(Point(0, 8))
expect(iterator.seek(Point(1, 0)).map(scopeId => tokenizedBuffer.classNameForScopeId(scopeId))).toEqual([
'syntax--source syntax--js',
'syntax--comment syntax--block syntax--js'
])
expect(iterator.getPosition()).toEqual(Point(1, 0))
expect(iterator.seek(Point(1, 18)).map(scopeId => tokenizedBuffer.classNameForScopeId(scopeId))).toEqual([
'syntax--source syntax--js',
'syntax--constant syntax--numeric syntax--decimal syntax--js'
])
expect(iterator.getPosition()).toEqual(Point(1, 18))
expect(iterator.seek(Point(2, 0)).map(scopeId => tokenizedBuffer.classNameForScopeId(scopeId))).toEqual([
'syntax--source syntax--js'
])
iterator.moveToSuccessor()
}) // ensure we don't infinitely loop (regression test)
it('does not report columns beyond the length of the line', async () => {
await atom.packages.activatePackage('language-coffee-script')
2017-09-21 23:59:04 +03:00
buffer = new TextBuffer({text: '# hello\n# world'})
tokenizedBuffer = new TokenizedBuffer({buffer, grammar: atom.grammars.grammarForScopeName('source.coffee'), tabLength: 2})
fullyTokenize(tokenizedBuffer)
2017-09-21 23:59:04 +03:00
const iterator = tokenizedBuffer.buildIterator()
iterator.seek(Point(0, 0))
iterator.moveToSuccessor()
iterator.moveToSuccessor()
expect(iterator.getPosition().column).toBe(7)
2017-09-21 23:59:04 +03:00
iterator.moveToSuccessor()
expect(iterator.getPosition().column).toBe(0)
2017-09-21 23:59:04 +03:00
iterator.seek(Point(0, 7))
expect(iterator.getPosition().column).toBe(7)
2017-09-21 23:59:04 +03:00
iterator.seek(Point(0, 8))
expect(iterator.getPosition().column).toBe(7)
2017-09-21 23:59:04 +03:00
})
it('correctly terminates scopes at the beginning of the line (regression)', () => {
const grammar = atom.grammars.createGrammar('test', {
'scopeName': 'text.broken',
'name': 'Broken grammar',
'patterns': [
{'begin': 'start', 'end': '(?=end)', 'name': 'blue.broken'},
{'match': '.', 'name': 'yellow.broken'}
]
})
buffer = new TextBuffer({text: 'start x\nend x\nx'})
tokenizedBuffer = new TokenizedBuffer({buffer, grammar, tabLength: 2})
fullyTokenize(tokenizedBuffer)
const iterator = tokenizedBuffer.buildIterator()
iterator.seek(Point(1, 0))
expect(iterator.getPosition()).toEqual([1, 0])
expect(iterator.getCloseScopeIds().map(scopeId => tokenizedBuffer.classNameForScopeId(scopeId))).toEqual(['syntax--blue syntax--broken'])
expect(iterator.getOpenScopeIds().map(scopeId => tokenizedBuffer.classNameForScopeId(scopeId))).toEqual(['syntax--yellow syntax--broken'])
})
})
})
})