Start on removing placeholder lines in TokenizedBuffer

This commit is contained in:
Max Brunsfeld 2016-10-10 20:59:39 -07:00 committed by Antonio Scandurra
parent ceae84f537
commit d20372a35f
4 changed files with 41 additions and 61 deletions

View File

@ -2,7 +2,7 @@ TokenizedBuffer = require '../src/tokenized-buffer'
{Point} = TextBuffer = require 'text-buffer'
_ = require 'underscore-plus'
describe "TokenizedBuffer", ->
fdescribe "TokenizedBuffer", ->
[tokenizedBuffer, buffer] = []
beforeEach ->
@ -90,27 +90,24 @@ describe "TokenizedBuffer", ->
buffer.release()
describe "on construction", ->
it "initially creates un-tokenized screen lines, then tokenizes lines chunk at a time in the background", ->
it "tokenizes lines chunk at a time in the background", ->
line0 = tokenizedBuffer.tokenizedLineForRow(0)
expect(line0.tokens).toEqual([value: line0.text, scopes: ['source.js']])
expect(line0).toBe(undefined)
line11 = tokenizedBuffer.tokenizedLineForRow(11)
expect(line11.tokens).toEqual([value: " return sort(Array.apply(this, arguments));", scopes: ['source.js']])
# background tokenization has not begun
expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack).toBeUndefined()
expect(line11).toBe(undefined)
# tokenize chunk 1
advanceClock()
expect(tokenizedBuffer.tokenizedLineForRow(0).ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(4).ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeFalsy()
expect(tokenizedBuffer.tokenizedLineForRow(5)).toBe(undefined)
# tokenize chunk 2
advanceClock()
expect(tokenizedBuffer.tokenizedLineForRow(5).ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(9).ruleStack?).toBeTruthy()
expect(tokenizedBuffer.tokenizedLineForRow(10).ruleStack?).toBeFalsy()
expect(tokenizedBuffer.tokenizedLineForRow(10)).toBe(undefined)
# tokenize last chunk
advanceClock()
@ -588,12 +585,9 @@ describe "TokenizedBuffer", ->
expect(tokenizeCallback.callCount).toBe 1
expect(atom.grammars.nullGrammar.tokenizeLine.callCount).toBe 0
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens.length).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(0).tokens[0].value).toBe 'a'
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens.length).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(1).tokens[0].value).toBe 'b'
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens.length).toBe 1
expect(tokenizedBuffer.tokenizedLineForRow(2).tokens[0].value).toBe 'c'
expect(tokenizedBuffer.tokenizedLineForRow(0)).toBe null
expect(tokenizedBuffer.tokenizedLineForRow(1)).toBe null
expect(tokenizedBuffer.tokenizedLineForRow(2)).toBe null
describe "text decoration layer API", ->
describe "iterator", ->

View File

@ -2868,7 +2868,7 @@ class TextEditor extends Model
# whitespace.
usesSoftTabs: ->
for bufferRow in [0..@buffer.getLastRow()]
continue if @tokenizedBuffer.tokenizedLineForRow(bufferRow).isComment()
continue if @tokenizedBuffer.tokenizedLineForRow(bufferRow)?.isComment()
line = @buffer.lineForRow(bufferRow)
return true if line[0] is ' '

View File

@ -1,5 +1,7 @@
{Point} = require 'text-buffer'
EMPTY = Object.freeze([])
module.exports =
class TokenizedBufferIterator
constructor: (@tokenizedBuffer) ->
@ -12,11 +14,17 @@ class TokenizedBufferIterator
@closeTags = []
@tagIndex = null
currentLine = @tokenizedBuffer.tokenizedLineForRow(position.row)
@currentTags = currentLine.tags
@currentLineOpenTags = currentLine.openScopes
@currentLineLength = currentLine.text.length
@containingTags = @currentLineOpenTags.map (id) => @tokenizedBuffer.grammar.scopeForId(id)
if currentLine = @tokenizedBuffer.tokenizedLineForRow(position.row)
@currentTags = currentLine.tags
@currentLineOpenTags = currentLine.openScopes
@currentLineLength = currentLine.text.length
@containingTags = @currentLineOpenTags.map (id) => @tokenizedBuffer.grammar.scopeForId(id)
else
@currentTags = EMPTY
@currentLineOpenTags = EMPTY
@currentLineLength = @tokenizedBuffer.buffer.lineLengthForRow(position.row)
@containingTags = []
currentColumn = 0
for tag, index in @currentTags

View File

@ -211,18 +211,7 @@ class TokenizedBuffer extends Model
# Returns a {Boolean} indicating whether the given buffer row starts
# a a foldable row range due to the code's indentation patterns.
isFoldableCodeAtRow: (row) ->
# Investigating an exception that's occurring here due to the line being
# undefined. This should paper over the problem but we want to figure out
# what is happening:
tokenizedLine = @tokenizedLineForRow(row)
@assert tokenizedLine?, "TokenizedLine is undefined", (error) =>
error.metadata = {
row: row
rowCount: @tokenizedLines.length
tokenizedBufferChangeCount: @changeCount
bufferChangeCount: @buffer.changeCount
}
return false unless tokenizedLine?
return false if @buffer.isRowBlank(row) or tokenizedLine.isComment()
@ -236,21 +225,21 @@ class TokenizedBuffer extends Model
nextRow = row + 1
return false if nextRow > @buffer.getLastRow()
(row is 0 or not @tokenizedLineForRow(previousRow).isComment()) and
@tokenizedLineForRow(row).isComment() and
@tokenizedLineForRow(nextRow).isComment()
(not @tokenizedLineForRow(previousRow)?.isComment()) and
@tokenizedLineForRow(row)?.isComment() and
@tokenizedLineForRow(nextRow)?.isComment()
buildTokenizedLinesForRows: (startRow, endRow, startingStack, startingopenScopes) ->
ruleStack = startingStack
openScopes = startingopenScopes
stopTokenizingAt = startRow + @chunkSize
tokenizedLines = for row in [startRow..endRow]
tokenizedLines = for row in [startRow..endRow] by 1
if (ruleStack or row is 0) and row < stopTokenizingAt
tokenizedLine = @buildTokenizedLineForRow(row, ruleStack, openScopes)
ruleStack = tokenizedLine.ruleStack
openScopes = @scopesFromTags(openScopes, tokenizedLine.tags)
else
tokenizedLine = @buildPlaceholderTokenizedLineForRow(row, openScopes)
tokenizedLine = null
tokenizedLine
if endRow >= stopTokenizingAt
@ -260,19 +249,7 @@ class TokenizedBuffer extends Model
tokenizedLines
buildPlaceholderTokenizedLinesForRows: (startRow, endRow) ->
@buildPlaceholderTokenizedLineForRow(row) for row in [startRow..endRow] by 1
buildPlaceholderTokenizedLineForRow: (row) ->
@buildPlaceholderTokenizedLineForRowWithText(row, @buffer.lineForRow(row))
buildPlaceholderTokenizedLineForRowWithText: (row, text) ->
if @grammar isnt NullGrammar
openScopes = [@grammar.startIdForScope(@grammar.scopeName)]
else
openScopes = []
tags = [text.length]
lineEnding = @buffer.lineEndingForRow(row)
new TokenizedLine({openScopes, text, tags, lineEnding, @tokenIterator})
null for row in [startRow..endRow] by 1
buildTokenizedLineForRow: (row, ruleStack, openScopes) ->
@buildTokenizedLineForRowWithText(row, @buffer.lineForRow(row), ruleStack, openScopes)
@ -283,8 +260,7 @@ class TokenizedBuffer extends Model
new TokenizedLine({openScopes, text, tags, ruleStack, lineEnding, @tokenIterator})
tokenizedLineForRow: (bufferRow) ->
if 0 <= bufferRow < @tokenizedLines.length
@tokenizedLines[bufferRow] ?= @buildPlaceholderTokenizedLineForRow(bufferRow)
@tokenizedLines[bufferRow]
tokenizedLinesForRows: (startRow, endRow) ->
for row in [startRow..endRow] by 1
@ -366,16 +342,18 @@ class TokenizedBuffer extends Model
scopeDescriptorForPosition: (position) ->
{row, column} = @buffer.clipPosition(Point.fromObject(position))
iterator = @tokenizedLineForRow(row).getTokenIterator()
while iterator.next()
if iterator.getBufferEnd() > column
scopes = iterator.getScopes()
break
if iterator = @tokenizedLineForRow(row)?.getTokenIterator()
while iterator.next()
if iterator.getBufferEnd() > column
scopes = iterator.getScopes()
break
# rebuild scope of last token if we iterated off the end
unless scopes?
scopes = iterator.getScopes()
scopes.push(iterator.getScopeEnds().reverse()...)
# rebuild scope of last token if we iterated off the end
unless scopes?
scopes = iterator.getScopes()
scopes.push(iterator.getScopeEnds().reverse()...)
else
scopes = []
new ScopeDescriptor({scopes})