diff --git a/src/tokenized-buffer.coffee b/src/tokenized-buffer.coffee index a828950b3..8fca6c06b 100644 --- a/src/tokenized-buffer.coffee +++ b/src/tokenized-buffer.coffee @@ -268,7 +268,7 @@ class TokenizedBuffer extends Model buildTokenizedLineForRowWithText: (row, text, ruleStack = @stackForRow(row - 1), openScopes = @openScopesForRow(row)) -> lineEnding = @buffer.lineEndingForRow(row) {tags, ruleStack} = @grammar.tokenizeLine(text, ruleStack, row is 0, false) - new TokenizedLine({openScopes, text, tags, ruleStack, lineEnding, @tokenIterator}) + new TokenizedLine({openScopes, text, tags, ruleStack, lineEnding, @tokenIterator, @grammar}) tokenizedLineForRow: (bufferRow) -> if 0 <= bufferRow <= @buffer.getLastRow() @@ -278,7 +278,7 @@ class TokenizedBuffer extends Model text = @buffer.lineForRow(bufferRow) lineEnding = @buffer.lineEndingForRow(bufferRow) tags = [@grammar.startIdForScope(@grammar.scopeName), text.length, @grammar.endIdForScope(@grammar.scopeName)] - @tokenizedLines[bufferRow] = new TokenizedLine({openScopes: [], text, tags, lineEnding, @tokenIterator}) + @tokenizedLines[bufferRow] = new TokenizedLine({openScopes: [], text, tags, lineEnding, @tokenIterator, @grammar}) tokenizedLinesForRows: (startRow, endRow) -> for row in [startRow..endRow] by 1 @@ -344,17 +344,16 @@ class TokenizedBuffer extends Model @indentLevelForLine(line) indentLevelForLine: (line) -> - if match = line.match(/^[\t ]+/) - indentLength = 0 - for character in match[0] - if character is '\t' - indentLength += @getTabLength() - (indentLength % @getTabLength()) - else - indentLength++ + indentLength = 0 + for char in line + if char is '\t' + indentLength += @getTabLength() - (indentLength % @getTabLength()) + else if char is ' ' + indentLength++ + else + break - indentLength / @getTabLength() - else - 0 + indentLength / @getTabLength() scopeDescriptorForPosition: (position) -> {row, column} = @buffer.clipPosition(Point.fromObject(position)) diff --git a/src/tokenized-line.coffee b/src/tokenized-line.coffee index c039109f4..5a22a297a 100644 --- a/src/tokenized-line.coffee +++ b/src/tokenized-line.coffee @@ -1,5 +1,5 @@ Token = require './token' -CommentScopeRegex = /(\b|\.)comment/ +CommentScopeRegex = /(\b|\.)comment/ idCounter = 1 @@ -10,7 +10,7 @@ class TokenizedLine return unless properties? - {@openScopes, @text, @tags, @ruleStack, @tokenIterator} = properties + {@openScopes, @text, @tags, @ruleStack, @tokenIterator, @grammar} = properties getTokenIterator: -> @tokenIterator.reset(this) @@ -48,17 +48,26 @@ class TokenizedLine return @isCommentLine if @isCommentLine? @isCommentLine = false - iterator = @getTokenIterator() - while iterator.next() - scopes = iterator.getScopes() - continue if scopes.length is 1 - for scope in scopes - if CommentScopeRegex.test(scope) - @isCommentLine = true - break - break + + for tag in @openScopes + if @isCommentOpenTag(tag) + @isCommentLine = true + return @isCommentLine + + for tag in @tags + if @isCommentOpenTag(tag) + @isCommentLine = true + return @isCommentLine + @isCommentLine + isCommentOpenTag: (tag) -> + if tag < 0 and (tag & 1) is 1 + scope = @grammar.scopeForId(tag) + if CommentScopeRegex.test(scope) + return true + false + tokenAtIndex: (index) -> @tokens[index]