mirror of
https://github.com/pulsar-edit/pulsar.git
synced 2024-09-21 07:58:04 +03:00
Merge pull request #16090 from atom/decaf-token-iterator
☠☕ Decaffeinate TokenIterator
This commit is contained in:
commit
935c63932f
@ -1,37 +0,0 @@
|
||||
TextBuffer = require 'text-buffer'
|
||||
TokenizedBuffer = require '../src/tokenized-buffer'
|
||||
|
||||
describe "TokenIterator", ->
|
||||
it "correctly terminates scopes at the beginning of the line (regression)", ->
|
||||
grammar = atom.grammars.createGrammar('test', {
|
||||
'scopeName': 'text.broken'
|
||||
'name': 'Broken grammar'
|
||||
'patterns': [
|
||||
{
|
||||
'begin': 'start'
|
||||
'end': '(?=end)'
|
||||
'name': 'blue.broken'
|
||||
}
|
||||
{
|
||||
'match': '.'
|
||||
'name': 'yellow.broken'
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
buffer = new TextBuffer(text: """
|
||||
start x
|
||||
end x
|
||||
x
|
||||
""")
|
||||
tokenizedBuffer = new TokenizedBuffer({
|
||||
buffer, config: atom.config, grammarRegistry: atom.grammars, packageManager: atom.packages, assert: atom.assert
|
||||
})
|
||||
tokenizedBuffer.setGrammar(grammar)
|
||||
|
||||
tokenIterator = tokenizedBuffer.tokenizedLines[1].getTokenIterator()
|
||||
tokenIterator.next()
|
||||
|
||||
expect(tokenIterator.getBufferStart()).toBe 0
|
||||
expect(tokenIterator.getScopeEnds()).toEqual []
|
||||
expect(tokenIterator.getScopeStarts()).toEqual ['text.broken', 'yellow.broken']
|
43
spec/token-iterator-spec.js
Normal file
43
spec/token-iterator-spec.js
Normal file
@ -0,0 +1,43 @@
|
||||
const TextBuffer = require('text-buffer')
|
||||
const TokenizedBuffer = require('../src/tokenized-buffer')
|
||||
|
||||
describe('TokenIterator', () =>
|
||||
it('correctly terminates scopes at the beginning of the line (regression)', () => {
|
||||
const grammar = atom.grammars.createGrammar('test', {
|
||||
'scopeName': 'text.broken',
|
||||
'name': 'Broken grammar',
|
||||
'patterns': [
|
||||
{
|
||||
'begin': 'start',
|
||||
'end': '(?=end)',
|
||||
'name': 'blue.broken'
|
||||
},
|
||||
{
|
||||
'match': '.',
|
||||
'name': 'yellow.broken'
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
const buffer = new TextBuffer({text: `\
|
||||
start x
|
||||
end x
|
||||
x\
|
||||
`})
|
||||
const tokenizedBuffer = new TokenizedBuffer({
|
||||
buffer,
|
||||
config: atom.config,
|
||||
grammarRegistry: atom.grammars,
|
||||
packageManager: atom.packages,
|
||||
assert: atom.assert
|
||||
})
|
||||
tokenizedBuffer.setGrammar(grammar)
|
||||
|
||||
const tokenIterator = tokenizedBuffer.tokenizedLines[1].getTokenIterator()
|
||||
tokenIterator.next()
|
||||
|
||||
expect(tokenIterator.getBufferStart()).toBe(0)
|
||||
expect(tokenIterator.getScopeEnds()).toEqual([])
|
||||
expect(tokenIterator.getScopeStarts()).toEqual(['text.broken', 'yellow.broken'])
|
||||
})
|
||||
)
|
@ -1,56 +0,0 @@
|
||||
module.exports =
|
||||
class TokenIterator
|
||||
constructor: (@tokenizedBuffer) ->
|
||||
|
||||
reset: (@line) ->
|
||||
@index = null
|
||||
@startColumn = 0
|
||||
@endColumn = 0
|
||||
@scopes = @line.openScopes.map (id) => @tokenizedBuffer.grammar.scopeForId(id)
|
||||
@scopeStarts = @scopes.slice()
|
||||
@scopeEnds = []
|
||||
this
|
||||
|
||||
next: ->
|
||||
{tags} = @line
|
||||
|
||||
if @index?
|
||||
@startColumn = @endColumn
|
||||
@scopeEnds.length = 0
|
||||
@scopeStarts.length = 0
|
||||
@index++
|
||||
else
|
||||
@index = 0
|
||||
|
||||
while @index < tags.length
|
||||
tag = tags[@index]
|
||||
if tag < 0
|
||||
scope = @tokenizedBuffer.grammar.scopeForId(tag)
|
||||
if tag % 2 is 0
|
||||
if @scopeStarts[@scopeStarts.length - 1] is scope
|
||||
@scopeStarts.pop()
|
||||
else
|
||||
@scopeEnds.push(scope)
|
||||
@scopes.pop()
|
||||
else
|
||||
@scopeStarts.push(scope)
|
||||
@scopes.push(scope)
|
||||
@index++
|
||||
else
|
||||
@endColumn += tag
|
||||
@text = @line.text.substring(@startColumn, @endColumn)
|
||||
return true
|
||||
|
||||
false
|
||||
|
||||
getScopes: -> @scopes
|
||||
|
||||
getScopeStarts: -> @scopeStarts
|
||||
|
||||
getScopeEnds: -> @scopeEnds
|
||||
|
||||
getText: -> @text
|
||||
|
||||
getBufferStart: -> @startColumn
|
||||
|
||||
getBufferEnd: -> @endColumn
|
79
src/token-iterator.js
Normal file
79
src/token-iterator.js
Normal file
@ -0,0 +1,79 @@
|
||||
module.exports =
|
||||
class TokenIterator {
|
||||
constructor (tokenizedBuffer) {
|
||||
this.tokenizedBuffer = tokenizedBuffer
|
||||
}
|
||||
|
||||
reset (line) {
|
||||
this.line = line
|
||||
this.index = null
|
||||
this.startColumn = 0
|
||||
this.endColumn = 0
|
||||
this.scopes = this.line.openScopes.map(id => this.tokenizedBuffer.grammar.scopeForId(id))
|
||||
this.scopeStarts = this.scopes.slice()
|
||||
this.scopeEnds = []
|
||||
return this
|
||||
}
|
||||
|
||||
next () {
|
||||
const {tags} = this.line
|
||||
|
||||
if (this.index != null) {
|
||||
this.startColumn = this.endColumn
|
||||
this.scopeEnds.length = 0
|
||||
this.scopeStarts.length = 0
|
||||
this.index++
|
||||
} else {
|
||||
this.index = 0
|
||||
}
|
||||
|
||||
while (this.index < tags.length) {
|
||||
const tag = tags[this.index]
|
||||
if (tag < 0) {
|
||||
const scope = this.tokenizedBuffer.grammar.scopeForId(tag)
|
||||
if ((tag % 2) === 0) {
|
||||
if (this.scopeStarts[this.scopeStarts.length - 1] === scope) {
|
||||
this.scopeStarts.pop()
|
||||
} else {
|
||||
this.scopeEnds.push(scope)
|
||||
}
|
||||
this.scopes.pop()
|
||||
} else {
|
||||
this.scopeStarts.push(scope)
|
||||
this.scopes.push(scope)
|
||||
}
|
||||
this.index++
|
||||
} else {
|
||||
this.endColumn += tag
|
||||
this.text = this.line.text.substring(this.startColumn, this.endColumn)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
getScopes () {
|
||||
return this.scopes
|
||||
}
|
||||
|
||||
getScopeStarts () {
|
||||
return this.scopeStarts
|
||||
}
|
||||
|
||||
getScopeEnds () {
|
||||
return this.scopeEnds
|
||||
}
|
||||
|
||||
getText () {
|
||||
return this.text
|
||||
}
|
||||
|
||||
getBufferStart () {
|
||||
return this.startColumn
|
||||
}
|
||||
|
||||
getBufferEnd () {
|
||||
return this.endColumn
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user