Merge pull request #632 from pulsar-edit/language--/manual-decaf

[language-*]: Manual Spec Decaf (Part 1)
This commit is contained in:
confused_techie 2023-07-20 18:25:06 -07:00 committed by GitHub
commit 3fdf991693
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 9983 additions and 8933 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,394 +0,0 @@
describe "Clojure grammar", ->
grammar = null
beforeEach ->
atom.config.set('core.useTreeSitterParsers', false)
atom.config.set('core.useExperimentalModernTreeSitter', false)
waitsForPromise ->
atom.packages.activatePackage("language-clojure")
runs ->
grammar = atom.grammars.grammarForScopeName("source.clojure")
it "parses the grammar", ->
expect(grammar).toBeDefined()
expect(grammar.scopeName).toBe "source.clojure"
it "tokenizes semicolon comments", ->
{tokens} = grammar.tokenizeLine "; clojure"
expect(tokens[0]).toEqual value: ";", scopes: ["source.clojure", "comment.line.semicolon.clojure", "punctuation.definition.comment.clojure"]
expect(tokens[1]).toEqual value: " clojure", scopes: ["source.clojure", "comment.line.semicolon.clojure"]
it "does not tokenize escaped semicolons as comments", ->
{tokens} = grammar.tokenizeLine "\\; clojure"
expect(tokens[0]).toEqual value: "\\; ", scopes: ["source.clojure"]
expect(tokens[1]).toEqual value: "clojure", scopes: ["source.clojure", "meta.symbol.clojure"]
it "tokenizes shebang comments", ->
{tokens} = grammar.tokenizeLine "#!/usr/bin/env clojure"
expect(tokens[0]).toEqual value: "#!", scopes: ["source.clojure", "comment.line.shebang.clojure", "punctuation.definition.comment.shebang.clojure"]
expect(tokens[1]).toEqual value: "/usr/bin/env clojure", scopes: ["source.clojure", "comment.line.shebang.clojure"]
it "tokenizes strings", ->
{tokens} = grammar.tokenizeLine '"foo bar"'
expect(tokens[0]).toEqual value: '"', scopes: ["source.clojure", "string.quoted.double.clojure", "punctuation.definition.string.begin.clojure"]
expect(tokens[1]).toEqual value: 'foo bar', scopes: ["source.clojure", "string.quoted.double.clojure"]
expect(tokens[2]).toEqual value: '"', scopes: ["source.clojure", "string.quoted.double.clojure", "punctuation.definition.string.end.clojure"]
it "tokenizes character escape sequences", ->
{tokens} = grammar.tokenizeLine '"\\n"'
expect(tokens[0]).toEqual value: '"', scopes: ["source.clojure", "string.quoted.double.clojure", "punctuation.definition.string.begin.clojure"]
expect(tokens[1]).toEqual value: '\\n', scopes: ["source.clojure", "string.quoted.double.clojure", "constant.character.escape.clojure"]
expect(tokens[2]).toEqual value: '"', scopes: ["source.clojure", "string.quoted.double.clojure", "punctuation.definition.string.end.clojure"]
it "tokenizes regexes", ->
{tokens} = grammar.tokenizeLine '#"foo"'
expect(tokens[0]).toEqual value: '#"', scopes: ["source.clojure", "string.regexp.clojure", "punctuation.definition.regexp.begin.clojure"]
expect(tokens[1]).toEqual value: 'foo', scopes: ["source.clojure", "string.regexp.clojure"]
expect(tokens[2]).toEqual value: '"', scopes: ["source.clojure", "string.regexp.clojure", "punctuation.definition.regexp.end.clojure"]
it "tokenizes backslash escape character in regexes", ->
{tokens} = grammar.tokenizeLine '#"\\\\" "/"'
expect(tokens[0]).toEqual value: '#"', scopes: ["source.clojure", "string.regexp.clojure", "punctuation.definition.regexp.begin.clojure"]
expect(tokens[1]).toEqual value: "\\\\", scopes: ['source.clojure', 'string.regexp.clojure', 'constant.character.escape.clojure']
expect(tokens[2]).toEqual value: '"', scopes: ['source.clojure', 'string.regexp.clojure', "punctuation.definition.regexp.end.clojure"]
expect(tokens[4]).toEqual value: '"', scopes: ['source.clojure', 'string.quoted.double.clojure', 'punctuation.definition.string.begin.clojure']
expect(tokens[5]).toEqual value: "/", scopes: ['source.clojure', 'string.quoted.double.clojure']
expect(tokens[6]).toEqual value: '"', scopes: ['source.clojure', 'string.quoted.double.clojure', 'punctuation.definition.string.end.clojure']
it "tokenizes escaped double quote in regexes", ->
{tokens} = grammar.tokenizeLine '#"\\""'
expect(tokens[0]).toEqual value: '#"', scopes: ["source.clojure", "string.regexp.clojure", "punctuation.definition.regexp.begin.clojure"]
expect(tokens[1]).toEqual value: '\\"', scopes: ['source.clojure', 'string.regexp.clojure', 'constant.character.escape.clojure']
expect(tokens[2]).toEqual value: '"', scopes: ['source.clojure', 'string.regexp.clojure', "punctuation.definition.regexp.end.clojure"]
it "tokenizes numerics", ->
numbers =
"constant.numeric.ratio.clojure": ["1/2", "123/456", "+0/2", "-23/1"]
"constant.numeric.arbitrary-radix.clojure": ["2R1011", "16rDEADBEEF", "16rDEADBEEFN", "36rZebra"]
"constant.numeric.hexadecimal.clojure": ["0xDEADBEEF", "0XDEADBEEF", "0xDEADBEEFN", "0x0"]
"constant.numeric.octal.clojure": ["0123", "0123N", "00"]
"constant.numeric.double.clojure": ["123.45", "123.45e6", "123.45E6", "123.456M", "42.", "42.M", "42E+9M", "42E-0", "0M", "+0M", "42.E-23M"]
"constant.numeric.long.clojure": ["123", "12321", "123N", "+123N", "-123", "0"]
"constant.numeric.symbol.clojure": ["##Inf", "##-Inf", "##NaN"]
for scope, nums of numbers
for num in nums
{tokens} = grammar.tokenizeLine num
expect(tokens[0]).toEqual value: num, scopes: ["source.clojure", scope]
it "tokenizes booleans", ->
booleans =
"constant.language.boolean.clojure": ["true", "false"]
for scope, bools of booleans
for bool in bools
{tokens} = grammar.tokenizeLine bool
expect(tokens[0]).toEqual value: bool, scopes: ["source.clojure", scope]
it "tokenizes nil", ->
{tokens} = grammar.tokenizeLine "nil"
expect(tokens[0]).toEqual value: "nil", scopes: ["source.clojure", "constant.language.nil.clojure"]
it "tokenizes keywords", ->
tests =
"meta.expression.clojure": ["(:foo)"]
"meta.map.clojure": ["{:foo}"]
"meta.vector.clojure": ["[:foo]"]
"meta.quoted-expression.clojure": ["'(:foo)", "`(:foo)"]
for metaScope, lines of tests
for line in lines
{tokens} = grammar.tokenizeLine line
expect(tokens[1]).toEqual value: ":foo", scopes: ["source.clojure", metaScope, "constant.keyword.clojure"]
{tokens} = grammar.tokenizeLine "(def foo :bar)"
expect(tokens[5]).toEqual value: ":bar", scopes: ["source.clojure", "meta.expression.clojure", "meta.definition.global.clojure", "constant.keyword.clojure"]
# keywords can start with an uppercase non-ASCII letter
{tokens} = grammar.tokenizeLine "(def foo :Öπ)"
expect(tokens[5]).toEqual value: ":Öπ", scopes: ["source.clojure", "meta.expression.clojure", "meta.definition.global.clojure", "constant.keyword.clojure"]
it "tokenizes keyfns (keyword control)", ->
keyfns = ["declare", "declare-", "ns", "in-ns", "import", "use", "require", "load", "compile", "def", "defn", "defn-", "defmacro", "defåπç"]
for keyfn in keyfns
{tokens} = grammar.tokenizeLine "(#{keyfn})"
expect(tokens[1]).toEqual value: keyfn, scopes: ["source.clojure", "meta.expression.clojure", "keyword.control.clojure"]
it "tokenizes keyfns (storage control)", ->
keyfns = ["if", "when", "for", "cond", "do", "let", "binding", "loop", "recur", "fn", "throw", "try", "catch", "finally", "case"]
for keyfn in keyfns
{tokens} = grammar.tokenizeLine "(#{keyfn})"
expect(tokens[1]).toEqual value: keyfn, scopes: ["source.clojure", "meta.expression.clojure", "storage.control.clojure"]
it "tokenizes global definitions", ->
macros = ["ns", "declare", "def", "defn", "defn-", "defroutes", "compojure/defroutes", "rum.core/defc123-", "some.nested-ns/def-nested->symbol!?*", "def+!.?abc8:<>", "ns/def+!.?abc8:<>", "ns/defåÄÖπç"]
for macro in macros
{tokens} = grammar.tokenizeLine "(#{macro} foo 'bar)"
expect(tokens[1]).toEqual value: macro, scopes: ["source.clojure", "meta.expression.clojure", "meta.definition.global.clojure", "keyword.control.clojure"]
expect(tokens[3]).toEqual value: "foo", scopes: ["source.clojure", "meta.expression.clojure", "meta.definition.global.clojure", "entity.global.clojure"]
it "tokenizes dynamic variables", ->
mutables = ["*ns*", "*foo-bar*", "*åÄÖπç*"]
for mutable in mutables
{tokens} = grammar.tokenizeLine mutable
expect(tokens[0]).toEqual value: mutable, scopes: ["source.clojure", "meta.symbol.dynamic.clojure"]
it "tokenizes metadata", ->
{tokens} = grammar.tokenizeLine "^Foo"
expect(tokens[0]).toEqual value: "^", scopes: ["source.clojure", "meta.metadata.simple.clojure"]
expect(tokens[1]).toEqual value: "Foo", scopes: ["source.clojure", "meta.metadata.simple.clojure", "meta.symbol.clojure"]
# non-ASCII letters
{tokens} = grammar.tokenizeLine "^Öπ"
expect(tokens[0]).toEqual value: "^", scopes: ["source.clojure", "meta.metadata.simple.clojure"]
expect(tokens[1]).toEqual value: "Öπ", scopes: ["source.clojure", "meta.metadata.simple.clojure", "meta.symbol.clojure"]
{tokens} = grammar.tokenizeLine "^{:foo true}"
expect(tokens[0]).toEqual value: "^{", scopes: ["source.clojure", "meta.metadata.map.clojure", "punctuation.section.metadata.map.begin.clojure"]
expect(tokens[1]).toEqual value: ":foo", scopes: ["source.clojure", "meta.metadata.map.clojure", "constant.keyword.clojure"]
expect(tokens[2]).toEqual value: " ", scopes: ["source.clojure", "meta.metadata.map.clojure"]
expect(tokens[3]).toEqual value: "true", scopes: ["source.clojure", "meta.metadata.map.clojure", "constant.language.boolean.clojure"]
expect(tokens[4]).toEqual value: "}", scopes: ["source.clojure", "meta.metadata.map.clojure", "punctuation.section.metadata.map.end.trailing.clojure"]
it "tokenizes functions", ->
expressions = ["(foo)", "(foo 1 10)"]
for expr in expressions
{tokens} = grammar.tokenizeLine expr
expect(tokens[1]).toEqual value: "foo", scopes: ["source.clojure", "meta.expression.clojure", "entity.name.function.clojure"]
#non-ASCII letters
{tokens} = grammar.tokenizeLine "(Öπ 2 20)"
expect(tokens[1]).toEqual value: "Öπ", scopes: ["source.clojure", "meta.expression.clojure", "entity.name.function.clojure"]
it "tokenizes vars", ->
{tokens} = grammar.tokenizeLine "(func #'foo)"
expect(tokens[2]).toEqual value: " #", scopes: ["source.clojure", "meta.expression.clojure"]
expect(tokens[3]).toEqual value: "'foo", scopes: ["source.clojure", "meta.expression.clojure", "meta.var.clojure"]
# non-ASCII letters
{tokens} = grammar.tokenizeLine "(func #'Öπ)"
expect(tokens[2]).toEqual value: " #", scopes: ["source.clojure", "meta.expression.clojure"]
expect(tokens[3]).toEqual value: "'Öπ", scopes: ["source.clojure", "meta.expression.clojure", "meta.var.clojure"]
it "tokenizes symbols", ->
{tokens} = grammar.tokenizeLine "x"
expect(tokens[0]).toEqual value: "x", scopes: ["source.clojure", "meta.symbol.clojure"]
# non-ASCII letters
{tokens} = grammar.tokenizeLine "Öπ"
expect(tokens[0]).toEqual value: "Öπ", scopes: ["source.clojure", "meta.symbol.clojure"]
# Should not be tokenized as a symbol
{tokens} = grammar.tokenizeLine "1foobar"
expect(tokens[0]).toEqual value: "1", scopes: ["source.clojure", "constant.numeric.long.clojure"]
it "tokenizes namespaces", ->
{tokens} = grammar.tokenizeLine "foo/bar"
expect(tokens[0]).toEqual value: "foo", scopes: ["source.clojure", "meta.symbol.namespace.clojure"]
expect(tokens[1]).toEqual value: "/", scopes: ["source.clojure"]
expect(tokens[2]).toEqual value: "bar", scopes: ["source.clojure", "meta.symbol.clojure"]
# non-ASCII letters
{tokens} = grammar.tokenizeLine "Öπ/Åä"
expect(tokens[0]).toEqual value: "Öπ", scopes: ["source.clojure", "meta.symbol.namespace.clojure"]
expect(tokens[1]).toEqual value: "/", scopes: ["source.clojure"]
expect(tokens[2]).toEqual value: "Åä", scopes: ["source.clojure", "meta.symbol.clojure"]
testMetaSection = (metaScope, puncScope, startsWith, endsWith) ->
# Entire expression on one line.
{tokens} = grammar.tokenizeLine "#{startsWith}foo, bar#{endsWith}"
[start, mid..., end] = tokens
expect(start).toEqual value: startsWith, scopes: ["source.clojure", "meta.#{metaScope}.clojure", "punctuation.section.#{puncScope}.begin.clojure"]
expect(end).toEqual value: endsWith, scopes: ["source.clojure", "meta.#{metaScope}.clojure", "punctuation.section.#{puncScope}.end.trailing.clojure"]
for token in mid
expect(token.scopes.slice(0, 2)).toEqual ["source.clojure", "meta.#{metaScope}.clojure"]
# Expression broken over multiple lines.
tokens = grammar.tokenizeLines("#{startsWith}foo\n bar#{endsWith}")
[start, mid..., after] = tokens[0]
expect(start).toEqual value: startsWith, scopes: ["source.clojure", "meta.#{metaScope}.clojure", "punctuation.section.#{puncScope}.begin.clojure"]
for token in mid
expect(token.scopes.slice(0, 2)).toEqual ["source.clojure", "meta.#{metaScope}.clojure"]
[mid..., end] = tokens[1]
expect(end).toEqual value: endsWith, scopes: ["source.clojure", "meta.#{metaScope}.clojure", "punctuation.section.#{puncScope}.end.trailing.clojure"]
for token in mid
expect(token.scopes.slice(0, 2)).toEqual ["source.clojure", "meta.#{metaScope}.clojure"]
it "tokenizes expressions", ->
testMetaSection "expression", "expression", "(", ")"
it "tokenizes quoted expressions", ->
testMetaSection "quoted-expression", "expression", "'(", ")"
testMetaSection "quoted-expression", "expression", "`(", ")"
it "tokenizes vectors", ->
testMetaSection "vector", "vector", "[", "]"
it "tokenizes maps", ->
testMetaSection "map", "map", "{", "}"
it "tokenizes sets", ->
testMetaSection "set", "set", "\#{", "}"
it "tokenizes functions in nested sexp", ->
{tokens} = grammar.tokenizeLine "((foo bar) baz)"
expect(tokens[0]).toEqual value: "(", scopes: ["source.clojure", "meta.expression.clojure", "punctuation.section.expression.begin.clojure"]
expect(tokens[1]).toEqual value: "(", scopes: ["source.clojure", "meta.expression.clojure", "meta.expression.clojure", "punctuation.section.expression.begin.clojure"]
expect(tokens[2]).toEqual value: "foo", scopes: ["source.clojure", "meta.expression.clojure", "meta.expression.clojure", "entity.name.function.clojure"]
expect(tokens[3]).toEqual value: " ", scopes: ["source.clojure", "meta.expression.clojure", "meta.expression.clojure"]
expect(tokens[4]).toEqual value: "bar", scopes: ["source.clojure", "meta.expression.clojure", "meta.expression.clojure", "meta.symbol.clojure"]
expect(tokens[5]).toEqual value: ")", scopes: ["source.clojure", "meta.expression.clojure", "meta.expression.clojure", "punctuation.section.expression.end.clojure"]
expect(tokens[6]).toEqual value: " ", scopes: ["source.clojure", "meta.expression.clojure"]
expect(tokens[7]).toEqual value: "baz", scopes: ["source.clojure", "meta.expression.clojure", "meta.symbol.clojure"]
expect(tokens[8]).toEqual value: ")", scopes: ["source.clojure", "meta.expression.clojure", "punctuation.section.expression.end.trailing.clojure"]
it "tokenizes maps used as functions", ->
{tokens} = grammar.tokenizeLine "({:foo bar} :foo)"
expect(tokens[0]).toEqual value: "(", scopes: ["source.clojure", "meta.expression.clojure", "punctuation.section.expression.begin.clojure"]
expect(tokens[1]).toEqual value: "{", scopes: ["source.clojure", "meta.expression.clojure", "meta.map.clojure", "punctuation.section.map.begin.clojure"]
expect(tokens[2]).toEqual value: ":foo", scopes: ["source.clojure", "meta.expression.clojure", "meta.map.clojure", "constant.keyword.clojure"]
expect(tokens[3]).toEqual value: " ", scopes: ["source.clojure", "meta.expression.clojure", "meta.map.clojure"]
expect(tokens[4]).toEqual value: "bar", scopes: ["source.clojure", "meta.expression.clojure", "meta.map.clojure", "meta.symbol.clojure"]
expect(tokens[5]).toEqual value: "}", scopes: ["source.clojure", "meta.expression.clojure", "meta.map.clojure", "punctuation.section.map.end.clojure"]
expect(tokens[6]).toEqual value: " ", scopes: ["source.clojure", "meta.expression.clojure"]
expect(tokens[7]).toEqual value: ":foo", scopes: ["source.clojure", "meta.expression.clojure", "constant.keyword.clojure"]
expect(tokens[8]).toEqual value: ")", scopes: ["source.clojure", "meta.expression.clojure", "punctuation.section.expression.end.trailing.clojure"]
it "tokenizes sets used in functions", ->
{tokens} = grammar.tokenizeLine "(\#{:foo :bar})"
expect(tokens[0]).toEqual value: "(", scopes: ["source.clojure", "meta.expression.clojure", "punctuation.section.expression.begin.clojure"]
expect(tokens[1]).toEqual value: "\#{", scopes: ["source.clojure", "meta.expression.clojure", "meta.set.clojure", "punctuation.section.set.begin.clojure"]
expect(tokens[2]).toEqual value: ":foo", scopes: ["source.clojure", "meta.expression.clojure", "meta.set.clojure", "constant.keyword.clojure"]
expect(tokens[3]).toEqual value: " ", scopes: ["source.clojure", "meta.expression.clojure", "meta.set.clojure"]
expect(tokens[4]).toEqual value: ":bar", scopes: ["source.clojure", "meta.expression.clojure", "meta.set.clojure", "constant.keyword.clojure"]
expect(tokens[5]).toEqual value: "}", scopes: ["source.clojure", "meta.expression.clojure", "meta.set.clojure", "punctuation.section.set.end.trailing.clojure"]
expect(tokens[6]).toEqual value: ")", scopes: ["source.clojure", "meta.expression.clojure", "punctuation.section.expression.end.trailing.clojure"]
describe "firstLineMatch", ->
it "recognises interpreter directives", ->
valid = """
#!/usr/sbin/boot foo
#!/usr/bin/boot foo=bar/
#!/usr/sbin/boot
#!/usr/sbin/boot foo bar baz
#!/usr/bin/boot perl
#!/usr/bin/boot bin/perl
#!/usr/bin/boot
#!/bin/boot
#!/usr/bin/boot --script=usr/bin
#! /usr/bin/env A=003 B=149 C=150 D=xzd E=base64 F=tar G=gz H=head I=tail boot
#!\t/usr/bin/env --foo=bar boot --quu=quux
#! /usr/bin/boot
#!/usr/bin/env boot
"""
for line in valid.split /\n/
expect(grammar.firstLineRegex.findNextMatchSync(line)).not.toBeNull()
invalid = """
\x20#!/usr/sbin/boot
\t#!/usr/sbin/boot
#!/usr/bin/env-boot/node-env/
#!/usr/bin/das-boot
#! /usr/binboot
#!\t/usr/bin/env --boot=bar
"""
for line in invalid.split /\n/
expect(grammar.firstLineRegex.findNextMatchSync(line)).toBeNull()
it "recognises Emacs modelines", ->
valid = """
#-*- Clojure -*-
#-*- mode: ClojureScript -*-
/* -*-clojureScript-*- */
// -*- Clojure -*-
/* -*- mode:Clojure -*- */
// -*- font:bar;mode:Clojure -*-
// -*- font:bar;mode:Clojure;foo:bar; -*-
// -*-font:mode;mode:Clojure-*-
// -*- foo:bar mode: clojureSCRIPT bar:baz -*-
" -*-foo:bar;mode:clojure;bar:foo-*- ";
" -*-font-mode:foo;mode:clojure;foo-bar:quux-*-"
"-*-font:x;foo:bar; mode : clojure; bar:foo;foooooo:baaaaar;fo:ba;-*-";
"-*- font:x;foo : bar ; mode : ClojureScript ; bar : foo ; foooooo:baaaaar;fo:ba-*-";
"""
for line in valid.split /\n/
expect(grammar.firstLineRegex.findNextMatchSync(line)).not.toBeNull()
invalid = """
/* --*clojure-*- */
/* -*-- clojure -*-
/* -*- -- Clojure -*-
/* -*- Clojure -;- -*-
// -*- iClojure -*-
// -*- Clojure; -*-
// -*- clojure-door -*-
/* -*- model:clojure -*-
/* -*- indent-mode:clojure -*-
// -*- font:mode;Clojure -*-
// -*- mode: -*- Clojure
// -*- mode: das-clojure -*-
// -*-font:mode;mode:clojure--*-
"""
for line in invalid.split /\n/
expect(grammar.firstLineRegex.findNextMatchSync(line)).toBeNull()
it "recognises Vim modelines", ->
valid = """
vim: se filetype=clojure:
# vim: se ft=clojure:
# vim: set ft=Clojure:
# vim: set filetype=Clojure:
# vim: ft=Clojure
# vim: syntax=Clojure
# vim: se syntax=Clojure:
# ex: syntax=Clojure
# vim:ft=clojure
# vim600: ft=clojure
# vim>600: set ft=clojure:
# vi:noai:sw=3 ts=6 ft=clojure
# vi::::::::::noai:::::::::::: ft=clojure
# vim:ts=4:sts=4:sw=4:noexpandtab:ft=clojure
# vi:: noai : : : : sw =3 ts =6 ft =clojure
# vim: ts=4: pi sts=4: ft=clojure: noexpandtab: sw=4:
# vim: ts=4 sts=4: ft=clojure noexpandtab:
# vim:noexpandtab sts=4 ft=clojure ts=4
# vim:noexpandtab:ft=clojure
# vim:ts=4:sts=4 ft=clojure:noexpandtab:\x20
# vim:noexpandtab titlestring=hi\|there\\\\ ft=clojure ts=4
"""
for line in valid.split /\n/
expect(grammar.firstLineRegex.findNextMatchSync(line)).not.toBeNull()
invalid = """
ex: se filetype=clojure:
_vi: se filetype=clojure:
vi: se filetype=clojure
# vim set ft=klojure
# vim: soft=clojure
# vim: clean-syntax=clojure:
# vim set ft=clojure:
# vim: setft=clojure:
# vim: se ft=clojure backupdir=tmp
# vim: set ft=clojure set cmdheight=1
# vim:noexpandtab sts:4 ft:clojure ts:4
# vim:noexpandtab titlestring=hi\\|there\\ ft=clojure ts=4
# vim:noexpandtab titlestring=hi\\|there\\\\\\ ft=clojure ts=4
"""
for line in invalid.split /\n/
expect(grammar.firstLineRegex.findNextMatchSync(line)).toBeNull()

View File

@ -0,0 +1,506 @@
describe("Clojure grammar", function() {
let grammar = null;
beforeEach(function() {
atom.config.set('core.useTreeSitterParsers', false);
atom.config.set('core.useExperimentalModernTreeSitter', false);
waitsForPromise(() => atom.packages.activatePackage("language-clojure"));
runs(() => grammar = atom.grammars.grammarForScopeName("source.clojure"));
});
it("parses the grammar", function() {
expect(grammar).toBeDefined();
expect(grammar.scopeName).toBe("source.clojure");
});
it("tokenizes semicolon comments", function() {
const {tokens} = grammar.tokenizeLine("; clojure");
expect(tokens[0]).toEqual({value: ";", scopes: ["source.clojure", "comment.line.semicolon.clojure", "punctuation.definition.comment.clojure"]});
expect(tokens[1]).toEqual({value: " clojure", scopes: ["source.clojure", "comment.line.semicolon.clojure"]});
});
it("does not tokenize escaped semicolons as comments", function() {
const {tokens} = grammar.tokenizeLine("\\; clojure");
expect(tokens[0]).toEqual({value: "\\; ", scopes: ["source.clojure"]});
expect(tokens[1]).toEqual({value: "clojure", scopes: ["source.clojure", "meta.symbol.clojure"]});
});
it("tokenizes shebang comments", function() {
const {tokens} = grammar.tokenizeLine("#!/usr/bin/env clojure");
expect(tokens[0]).toEqual({value: "#!", scopes: ["source.clojure", "comment.line.shebang.clojure", "punctuation.definition.comment.shebang.clojure"]});
expect(tokens[1]).toEqual({value: "/usr/bin/env clojure", scopes: ["source.clojure", "comment.line.shebang.clojure"]});
});
it("tokenizes strings", function() {
const {tokens} = grammar.tokenizeLine('"foo bar"');
expect(tokens[0]).toEqual({value: '"', scopes: ["source.clojure", "string.quoted.double.clojure", "punctuation.definition.string.begin.clojure"]});
expect(tokens[1]).toEqual({value: 'foo bar', scopes: ["source.clojure", "string.quoted.double.clojure"]});
expect(tokens[2]).toEqual({value: '"', scopes: ["source.clojure", "string.quoted.double.clojure", "punctuation.definition.string.end.clojure"]});
});
it("tokenizes character escape sequences", function() {
const {tokens} = grammar.tokenizeLine('"\\n"');
expect(tokens[0]).toEqual({value: '"', scopes: ["source.clojure", "string.quoted.double.clojure", "punctuation.definition.string.begin.clojure"]});
expect(tokens[1]).toEqual({value: '\\n', scopes: ["source.clojure", "string.quoted.double.clojure", "constant.character.escape.clojure"]});
expect(tokens[2]).toEqual({value: '"', scopes: ["source.clojure", "string.quoted.double.clojure", "punctuation.definition.string.end.clojure"]});
});
it("tokenizes regexes", function() {
const {tokens} = grammar.tokenizeLine('#"foo"');
expect(tokens[0]).toEqual({value: '#"', scopes: ["source.clojure", "string.regexp.clojure", "punctuation.definition.regexp.begin.clojure"]});
expect(tokens[1]).toEqual({value: 'foo', scopes: ["source.clojure", "string.regexp.clojure"]});
expect(tokens[2]).toEqual({value: '"', scopes: ["source.clojure", "string.regexp.clojure", "punctuation.definition.regexp.end.clojure"]});
});
it("tokenizes backslash escape character in regexes", function() {
const {tokens} = grammar.tokenizeLine('#"\\\\" "/"');
expect(tokens[0]).toEqual({value: '#"', scopes: ["source.clojure", "string.regexp.clojure", "punctuation.definition.regexp.begin.clojure"]});
expect(tokens[1]).toEqual({value: "\\\\", scopes: ['source.clojure', 'string.regexp.clojure', 'constant.character.escape.clojure']});
expect(tokens[2]).toEqual({value: '"', scopes: ['source.clojure', 'string.regexp.clojure', "punctuation.definition.regexp.end.clojure"]});
expect(tokens[4]).toEqual({value: '"', scopes: ['source.clojure', 'string.quoted.double.clojure', 'punctuation.definition.string.begin.clojure']});
expect(tokens[5]).toEqual({value: "/", scopes: ['source.clojure', 'string.quoted.double.clojure']});
expect(tokens[6]).toEqual({value: '"', scopes: ['source.clojure', 'string.quoted.double.clojure', 'punctuation.definition.string.end.clojure']});
});
it("tokenizes escaped double quote in regexes", function() {
const {tokens} = grammar.tokenizeLine('#"\\""');
expect(tokens[0]).toEqual({value: '#"', scopes: ["source.clojure", "string.regexp.clojure", "punctuation.definition.regexp.begin.clojure"]});
expect(tokens[1]).toEqual({value: '\\"', scopes: ['source.clojure', 'string.regexp.clojure', 'constant.character.escape.clojure']});
expect(tokens[2]).toEqual({value: '"', scopes: ['source.clojure', 'string.regexp.clojure', "punctuation.definition.regexp.end.clojure"]});
});
it("tokenizes numerics", function() {
const numbers = {
"constant.numeric.ratio.clojure": ["1/2", "123/456", "+0/2", "-23/1"],
"constant.numeric.arbitrary-radix.clojure": ["2R1011", "16rDEADBEEF", "16rDEADBEEFN", "36rZebra"],
"constant.numeric.hexadecimal.clojure": ["0xDEADBEEF", "0XDEADBEEF", "0xDEADBEEFN", "0x0"],
"constant.numeric.octal.clojure": ["0123", "0123N", "00"],
"constant.numeric.double.clojure": ["123.45", "123.45e6", "123.45E6", "123.456M", "42.", "42.M", "42E+9M", "42E-0", "0M", "+0M", "42.E-23M"],
"constant.numeric.long.clojure": ["123", "12321", "123N", "+123N", "-123", "0"],
"constant.numeric.symbol.clojure": ["##Inf", "##-Inf", "##NaN"]
};
return (() => {
const result = [];
for (var scope in numbers) {
var nums = numbers[scope];
result.push((() => {
const result1 = [];
for (let num of Array.from(nums)) {
const {tokens} = grammar.tokenizeLine(num);
result1.push(expect(tokens[0]).toEqual({value: num, scopes: ["source.clojure", scope]}));
}
return result1;
})());
}
return result;
})();
});
it("tokenizes booleans", function() {
const booleans =
{"constant.language.boolean.clojure": ["true", "false"]};
return (() => {
const result = [];
for (var scope in booleans) {
var bools = booleans[scope];
result.push((() => {
const result1 = [];
for (let bool of Array.from(bools)) {
const {tokens} = grammar.tokenizeLine(bool);
result1.push(expect(tokens[0]).toEqual({value: bool, scopes: ["source.clojure", scope]}));
}
return result1;
})());
}
return result;
})();
});
it("tokenizes nil", function() {
const {tokens} = grammar.tokenizeLine("nil");
expect(tokens[0]).toEqual({value: "nil", scopes: ["source.clojure", "constant.language.nil.clojure"]});
});
it("tokenizes keywords", function() {
let tokens;
const tests = {
"meta.expression.clojure": ["(:foo)"],
"meta.map.clojure": ["{:foo}"],
"meta.vector.clojure": ["[:foo]"],
"meta.quoted-expression.clojure": ["'(:foo)", "`(:foo)"]
};
for (let metaScope in tests) {
const lines = tests[metaScope];
for (let line of Array.from(lines)) {
({tokens} = grammar.tokenizeLine(line));
expect(tokens[1]).toEqual({value: ":foo", scopes: ["source.clojure", metaScope, "constant.keyword.clojure"]});
}
}
({tokens} = grammar.tokenizeLine("(def foo :bar)"));
expect(tokens[5]).toEqual({value: ":bar", scopes: ["source.clojure", "meta.expression.clojure", "meta.definition.global.clojure", "constant.keyword.clojure"]});
// keywords can start with an uppercase non-ASCII letter
({tokens} = grammar.tokenizeLine("(def foo :Öπ)"));
expect(tokens[5]).toEqual({value: ":Öπ", scopes: ["source.clojure", "meta.expression.clojure", "meta.definition.global.clojure", "constant.keyword.clojure"]});
});
it("tokenizes keyfns (keyword control)", function() {
const keyfns = ["declare", "declare-", "ns", "in-ns", "import", "use", "require", "load", "compile", "def", "defn", "defn-", "defmacro", "defåπç"];
return (() => {
const result = [];
for (let keyfn of Array.from(keyfns)) {
const {tokens} = grammar.tokenizeLine(`(${keyfn})`);
result.push(expect(tokens[1]).toEqual({value: keyfn, scopes: ["source.clojure", "meta.expression.clojure", "keyword.control.clojure"]}));
}
return result;
})();
});
it("tokenizes keyfns (storage control)", function() {
const keyfns = ["if", "when", "for", "cond", "do", "let", "binding", "loop", "recur", "fn", "throw", "try", "catch", "finally", "case"];
return (() => {
const result = [];
for (let keyfn of Array.from(keyfns)) {
const {tokens} = grammar.tokenizeLine(`(${keyfn})`);
result.push(expect(tokens[1]).toEqual({value: keyfn, scopes: ["source.clojure", "meta.expression.clojure", "storage.control.clojure"]}));
}
return result;
})();
});
it("tokenizes global definitions", function() {
const macros = ["ns", "declare", "def", "defn", "defn-", "defroutes", "compojure/defroutes", "rum.core/defc123-", "some.nested-ns/def-nested->symbol!?*", "def+!.?abc8:<>", "ns/def+!.?abc8:<>", "ns/defåÄÖπç"];
return (() => {
const result = [];
for (let macro of Array.from(macros)) {
const {tokens} = grammar.tokenizeLine(`(${macro} foo 'bar)`);
expect(tokens[1]).toEqual({value: macro, scopes: ["source.clojure", "meta.expression.clojure", "meta.definition.global.clojure", "keyword.control.clojure"]});
result.push(expect(tokens[3]).toEqual({value: "foo", scopes: ["source.clojure", "meta.expression.clojure", "meta.definition.global.clojure", "entity.global.clojure"]}));
}
return result;
})();
});
it("tokenizes dynamic variables", function() {
const mutables = ["*ns*", "*foo-bar*", "*åÄÖπç*"];
return (() => {
const result = [];
for (let mutable of Array.from(mutables)) {
const {tokens} = grammar.tokenizeLine(mutable);
result.push(expect(tokens[0]).toEqual({value: mutable, scopes: ["source.clojure", "meta.symbol.dynamic.clojure"]}));
}
return result;
})();
});
it("tokenizes metadata", function() {
let {tokens} = grammar.tokenizeLine("^Foo");
expect(tokens[0]).toEqual({value: "^", scopes: ["source.clojure", "meta.metadata.simple.clojure"]});
expect(tokens[1]).toEqual({value: "Foo", scopes: ["source.clojure", "meta.metadata.simple.clojure", "meta.symbol.clojure"]});
// non-ASCII letters
({tokens} = grammar.tokenizeLine("^Öπ"));
expect(tokens[0]).toEqual({value: "^", scopes: ["source.clojure", "meta.metadata.simple.clojure"]});
expect(tokens[1]).toEqual({value: "Öπ", scopes: ["source.clojure", "meta.metadata.simple.clojure", "meta.symbol.clojure"]});
({tokens} = grammar.tokenizeLine("^{:foo true}"));
expect(tokens[0]).toEqual({value: "^{", scopes: ["source.clojure", "meta.metadata.map.clojure", "punctuation.section.metadata.map.begin.clojure"]});
expect(tokens[1]).toEqual({value: ":foo", scopes: ["source.clojure", "meta.metadata.map.clojure", "constant.keyword.clojure"]});
expect(tokens[2]).toEqual({value: " ", scopes: ["source.clojure", "meta.metadata.map.clojure"]});
expect(tokens[3]).toEqual({value: "true", scopes: ["source.clojure", "meta.metadata.map.clojure", "constant.language.boolean.clojure"]});
expect(tokens[4]).toEqual({value: "}", scopes: ["source.clojure", "meta.metadata.map.clojure", "punctuation.section.metadata.map.end.trailing.clojure"]});
});
it("tokenizes functions", function() {
let tokens;
const expressions = ["(foo)", "(foo 1 10)"];
for (let expr of Array.from(expressions)) {
({tokens} = grammar.tokenizeLine(expr));
expect(tokens[1]).toEqual({value: "foo", scopes: ["source.clojure", "meta.expression.clojure", "entity.name.function.clojure"]});
}
//non-ASCII letters
({tokens} = grammar.tokenizeLine("(Öπ 2 20)"));
expect(tokens[1]).toEqual({value: "Öπ", scopes: ["source.clojure", "meta.expression.clojure", "entity.name.function.clojure"]});
});
it("tokenizes vars", function() {
let {tokens} = grammar.tokenizeLine("(func #'foo)");
expect(tokens[2]).toEqual({value: " #", scopes: ["source.clojure", "meta.expression.clojure"]});
expect(tokens[3]).toEqual({value: "'foo", scopes: ["source.clojure", "meta.expression.clojure", "meta.var.clojure"]});
// non-ASCII letters
({tokens} = grammar.tokenizeLine("(func #'Öπ)"));
expect(tokens[2]).toEqual({value: " #", scopes: ["source.clojure", "meta.expression.clojure"]});
expect(tokens[3]).toEqual({value: "'Öπ", scopes: ["source.clojure", "meta.expression.clojure", "meta.var.clojure"]});
});
it("tokenizes symbols", function() {
let {tokens} = grammar.tokenizeLine("x");
expect(tokens[0]).toEqual({value: "x", scopes: ["source.clojure", "meta.symbol.clojure"]});
// non-ASCII letters
({tokens} = grammar.tokenizeLine("Öπ"));
expect(tokens[0]).toEqual({value: "Öπ", scopes: ["source.clojure", "meta.symbol.clojure"]});
// Should not be tokenized as a symbol
({tokens} = grammar.tokenizeLine("1foobar"));
expect(tokens[0]).toEqual({value: "1", scopes: ["source.clojure", "constant.numeric.long.clojure"]});
});
it("tokenizes namespaces", function() {
let {tokens} = grammar.tokenizeLine("foo/bar");
expect(tokens[0]).toEqual({value: "foo", scopes: ["source.clojure", "meta.symbol.namespace.clojure"]});
expect(tokens[1]).toEqual({value: "/", scopes: ["source.clojure"]});
expect(tokens[2]).toEqual({value: "bar", scopes: ["source.clojure", "meta.symbol.clojure"]});
// non-ASCII letters
({tokens} = grammar.tokenizeLine("Öπ/Åä"));
expect(tokens[0]).toEqual({value: "Öπ", scopes: ["source.clojure", "meta.symbol.namespace.clojure"]});
expect(tokens[1]).toEqual({value: "/", scopes: ["source.clojure"]});
expect(tokens[2]).toEqual({value: "Åä", scopes: ["source.clojure", "meta.symbol.clojure"]});
});
const testMetaSection = function(metaScope, puncScope, startsWith, endsWith) {
// Entire expression on one line.
let adjustedLength1, adjustedLength2, after, token;
let {tokens} = grammar.tokenizeLine(`${startsWith}foo, bar${endsWith}`);
let start = tokens[0], adjustedLength = Math.max(tokens.length, 2), mid = tokens.slice(1, adjustedLength - 1), end = tokens[adjustedLength - 1];
expect(start).toEqual({value: startsWith, scopes: ["source.clojure", `meta.${metaScope}.clojure`, `punctuation.section.${puncScope}.begin.clojure`]});
expect(end).toEqual({value: endsWith, scopes: ["source.clojure", `meta.${metaScope}.clojure`, `punctuation.section.${puncScope}.end.trailing.clojure`]});
for (token of Array.from(mid)) {
expect(token.scopes.slice(0, 2)).toEqual(["source.clojure", `meta.${metaScope}.clojure`]);
}
// Expression broken over multiple lines.
tokens = grammar.tokenizeLines(`${startsWith}foo\n bar${endsWith}`);
start = tokens[0][0],
adjustedLength1 = Math.max(tokens[0].length, 2),
mid = tokens[0].slice(1, adjustedLength1 - 1),
after = tokens[0][adjustedLength1 - 1];
expect(start).toEqual({value: startsWith, scopes: ["source.clojure", `meta.${metaScope}.clojure`, `punctuation.section.${puncScope}.begin.clojure`]});
for (token of Array.from(mid)) {
expect(token.scopes.slice(0, 2)).toEqual(["source.clojure", `meta.${metaScope}.clojure`]);
}
adjustedLength2 = Math.max(tokens[1].length, 1),
mid = tokens[1].slice(0, adjustedLength2 - 1),
end = tokens[1][adjustedLength2 - 1];
expect(end).toEqual({value: endsWith, scopes: ["source.clojure", `meta.${metaScope}.clojure`, `punctuation.section.${puncScope}.end.trailing.clojure`]});
return (() => {
const result = [];
for (token of Array.from(mid)) {
result.push(expect(token.scopes.slice(0, 2)).toEqual(["source.clojure", `meta.${metaScope}.clojure`]));
}
return result;
})();
};
it("tokenizes expressions", () => testMetaSection("expression", "expression", "(", ")"));
it("tokenizes quoted expressions", function() {
testMetaSection("quoted-expression", "expression", "'(", ")");
testMetaSection("quoted-expression", "expression", "`(", ")");
});
it("tokenizes vectors", () => testMetaSection("vector", "vector", "[", "]"));
it("tokenizes maps", () => testMetaSection("map", "map", "{", "}"));
it("tokenizes sets", () => testMetaSection("set", "set", "\#{", "}"));
it("tokenizes functions in nested sexp", function() {
const {tokens} = grammar.tokenizeLine("((foo bar) baz)");
expect(tokens[0]).toEqual({value: "(", scopes: ["source.clojure", "meta.expression.clojure", "punctuation.section.expression.begin.clojure"]});
expect(tokens[1]).toEqual({value: "(", scopes: ["source.clojure", "meta.expression.clojure", "meta.expression.clojure", "punctuation.section.expression.begin.clojure"]});
expect(tokens[2]).toEqual({value: "foo", scopes: ["source.clojure", "meta.expression.clojure", "meta.expression.clojure", "entity.name.function.clojure"]});
expect(tokens[3]).toEqual({value: " ", scopes: ["source.clojure", "meta.expression.clojure", "meta.expression.clojure"]});
expect(tokens[4]).toEqual({value: "bar", scopes: ["source.clojure", "meta.expression.clojure", "meta.expression.clojure", "meta.symbol.clojure"]});
expect(tokens[5]).toEqual({value: ")", scopes: ["source.clojure", "meta.expression.clojure", "meta.expression.clojure", "punctuation.section.expression.end.clojure"]});
expect(tokens[6]).toEqual({value: " ", scopes: ["source.clojure", "meta.expression.clojure"]});
expect(tokens[7]).toEqual({value: "baz", scopes: ["source.clojure", "meta.expression.clojure", "meta.symbol.clojure"]});
expect(tokens[8]).toEqual({value: ")", scopes: ["source.clojure", "meta.expression.clojure", "punctuation.section.expression.end.trailing.clojure"]});
});
it("tokenizes maps used as functions", function() {
const {tokens} = grammar.tokenizeLine("({:foo bar} :foo)");
expect(tokens[0]).toEqual({value: "(", scopes: ["source.clojure", "meta.expression.clojure", "punctuation.section.expression.begin.clojure"]});
expect(tokens[1]).toEqual({value: "{", scopes: ["source.clojure", "meta.expression.clojure", "meta.map.clojure", "punctuation.section.map.begin.clojure"]});
expect(tokens[2]).toEqual({value: ":foo", scopes: ["source.clojure", "meta.expression.clojure", "meta.map.clojure", "constant.keyword.clojure"]});
expect(tokens[3]).toEqual({value: " ", scopes: ["source.clojure", "meta.expression.clojure", "meta.map.clojure"]});
expect(tokens[4]).toEqual({value: "bar", scopes: ["source.clojure", "meta.expression.clojure", "meta.map.clojure", "meta.symbol.clojure"]});
expect(tokens[5]).toEqual({value: "}", scopes: ["source.clojure", "meta.expression.clojure", "meta.map.clojure", "punctuation.section.map.end.clojure"]});
expect(tokens[6]).toEqual({value: " ", scopes: ["source.clojure", "meta.expression.clojure"]});
expect(tokens[7]).toEqual({value: ":foo", scopes: ["source.clojure", "meta.expression.clojure", "constant.keyword.clojure"]});
expect(tokens[8]).toEqual({value: ")", scopes: ["source.clojure", "meta.expression.clojure", "punctuation.section.expression.end.trailing.clojure"]});
});
it("tokenizes sets used in functions", function() {
const {tokens} = grammar.tokenizeLine("(\#{:foo :bar})");
expect(tokens[0]).toEqual({value: "(", scopes: ["source.clojure", "meta.expression.clojure", "punctuation.section.expression.begin.clojure"]});
expect(tokens[1]).toEqual({value: "\#{", scopes: ["source.clojure", "meta.expression.clojure", "meta.set.clojure", "punctuation.section.set.begin.clojure"]});
expect(tokens[2]).toEqual({value: ":foo", scopes: ["source.clojure", "meta.expression.clojure", "meta.set.clojure", "constant.keyword.clojure"]});
expect(tokens[3]).toEqual({value: " ", scopes: ["source.clojure", "meta.expression.clojure", "meta.set.clojure"]});
expect(tokens[4]).toEqual({value: ":bar", scopes: ["source.clojure", "meta.expression.clojure", "meta.set.clojure", "constant.keyword.clojure"]});
expect(tokens[5]).toEqual({value: "}", scopes: ["source.clojure", "meta.expression.clojure", "meta.set.clojure", "punctuation.section.set.end.trailing.clojure"]});
expect(tokens[6]).toEqual({value: ")", scopes: ["source.clojure", "meta.expression.clojure", "punctuation.section.expression.end.trailing.clojure"]});
});
describe("firstLineMatch", function() {
it("recognises interpreter directives", function() {
let line;
const valid = `\
#!/usr/sbin/boot foo
#!/usr/bin/boot foo=bar/
#!/usr/sbin/boot
#!/usr/sbin/boot foo bar baz
#!/usr/bin/boot perl
#!/usr/bin/boot bin/perl
#!/usr/bin/boot
#!/bin/boot
#!/usr/bin/boot --script=usr/bin
#! /usr/bin/env A=003 B=149 C=150 D=xzd E=base64 F=tar G=gz H=head I=tail boot
#!\t/usr/bin/env --foo=bar boot --quu=quux
#! /usr/bin/boot
#!/usr/bin/env boot\
`;
for (line of Array.from(valid.split(/\n/))) {
expect(grammar.firstLineRegex.findNextMatchSync(line)).not.toBeNull();
}
const invalid = `\
\x20#!/usr/sbin/boot
\t#!/usr/sbin/boot
#!/usr/bin/env-boot/node-env/
#!/usr/bin/das-boot
#! /usr/binboot
#!\t/usr/bin/env --boot=bar\
`;
return (() => {
const result = [];
for (line of Array.from(invalid.split(/\n/))) {
result.push(expect(grammar.firstLineRegex.findNextMatchSync(line)).toBeNull());
}
return result;
})();
});
it("recognises Emacs modelines", function() {
let line;
const valid = `\
#-*- Clojure -*-
#-*- mode: ClojureScript -*-
/* -*-clojureScript-*- */
// -*- Clojure -*-
/* -*- mode:Clojure -*- */
// -*- font:bar;mode:Clojure -*-
// -*- font:bar;mode:Clojure;foo:bar; -*-
// -*-font:mode;mode:Clojure-*-
// -*- foo:bar mode: clojureSCRIPT bar:baz -*-
" -*-foo:bar;mode:clojure;bar:foo-*- ";
" -*-font-mode:foo;mode:clojure;foo-bar:quux-*-"
"-*-font:x;foo:bar; mode : clojure; bar:foo;foooooo:baaaaar;fo:ba;-*-";
"-*- font:x;foo : bar ; mode : ClojureScript ; bar : foo ; foooooo:baaaaar;fo:ba-*-";\
`;
for (line of Array.from(valid.split(/\n/))) {
expect(grammar.firstLineRegex.findNextMatchSync(line)).not.toBeNull();
}
const invalid = `\
/* --*clojure-*- */
/* -*-- clojure -*-
/* -*- -- Clojure -*-
/* -*- Clojure -;- -*-
// -*- iClojure -*-
// -*- Clojure; -*-
// -*- clojure-door -*-
/* -*- model:clojure -*-
/* -*- indent-mode:clojure -*-
// -*- font:mode;Clojure -*-
// -*- mode: -*- Clojure
// -*- mode: das-clojure -*-
// -*-font:mode;mode:clojure--*-\
`;
return (() => {
const result = [];
for (line of Array.from(invalid.split(/\n/))) {
result.push(expect(grammar.firstLineRegex.findNextMatchSync(line)).toBeNull());
}
return result;
})();
});
it("recognises Vim modelines", function() {
let line;
const valid = `\
vim: se filetype=clojure:
# vim: se ft=clojure:
# vim: set ft=Clojure:
# vim: set filetype=Clojure:
# vim: ft=Clojure
# vim: syntax=Clojure
# vim: se syntax=Clojure:
# ex: syntax=Clojure
# vim:ft=clojure
# vim600: ft=clojure
# vim>600: set ft=clojure:
# vi:noai:sw=3 ts=6 ft=clojure
# vi::::::::::noai:::::::::::: ft=clojure
# vim:ts=4:sts=4:sw=4:noexpandtab:ft=clojure
# vi:: noai : : : : sw =3 ts =6 ft =clojure
# vim: ts=4: pi sts=4: ft=clojure: noexpandtab: sw=4:
# vim: ts=4 sts=4: ft=clojure noexpandtab:
# vim:noexpandtab sts=4 ft=clojure ts=4
# vim:noexpandtab:ft=clojure
# vim:ts=4:sts=4 ft=clojure:noexpandtab:\x20
# vim:noexpandtab titlestring=hi\|there\\\\ ft=clojure ts=4\
`;
for (line of Array.from(valid.split(/\n/))) {
expect(grammar.firstLineRegex.findNextMatchSync(line)).not.toBeNull();
}
const invalid = `\
ex: se filetype=clojure:
_vi: se filetype=clojure:
vi: se filetype=clojure
# vim set ft=klojure
# vim: soft=clojure
# vim: clean-syntax=clojure:
# vim set ft=clojure:
# vim: setft=clojure:
# vim: se ft=clojure backupdir=tmp
# vim: set ft=clojure set cmdheight=1
# vim:noexpandtab sts:4 ft:clojure ts:4
# vim:noexpandtab titlestring=hi\\|there\\ ft=clojure ts=4
# vim:noexpandtab titlestring=hi\\|there\\\\\\ ft=clojure ts=4\
`;
return (() => {
const result = [];
for (line of Array.from(invalid.split(/\n/))) {
result.push(expect(grammar.firstLineRegex.findNextMatchSync(line)).toBeNull());
}
return result;
})();
});
});
});

View File

@ -1,122 +0,0 @@
describe "CoffeeScript (Literate) grammar", ->
grammar = null
beforeEach ->
waitsForPromise ->
atom.packages.activatePackage("language-coffee-script")
runs ->
grammar = atom.grammars.grammarForScopeName("source.litcoffee")
it "parses the grammar", ->
expect(grammar).toBeTruthy()
expect(grammar.scopeName).toBe "source.litcoffee"
it "recognizes a code block after a list", ->
tokens = grammar.tokenizeLines '''
1. Example
2. List
1 + 2
'''
expect(tokens[3][1]).toEqual value: "1", scopes: ["source.litcoffee", "markup.raw.block.markdown", "constant.numeric.decimal.coffee"]
describe "firstLineMatch", ->
it "recognises interpreter directives", ->
valid = """
#!/usr/local/bin/coffee --no-header --literate -w
#!/usr/local/bin/coffee -l
#!/usr/local/bin/env coffee --literate -w
"""
for line in valid.split /\n/
expect(grammar.firstLineRegex.findNextMatchSync(line)).not.toBeNull()
invalid = """
#!/usr/local/bin/coffee --no-head -literate -w
#!/usr/local/bin/coffee --wl
#!/usr/local/bin/env coffee --illiterate -w=l
"""
for line in invalid.split /\n/
expect(grammar.firstLineRegex.findNextMatchSync(line)).toBeNull()
it "recognises Emacs modelines", ->
valid = """
#-*- litcoffee -*-
#-*- mode: litcoffee -*-
/* -*-litcoffee-*- */
// -*- litcoffee -*-
/* -*- mode:LITCOFFEE -*- */
// -*- font:bar;mode:LitCoffee -*-
// -*- font:bar;mode:litcoffee;foo:bar; -*-
// -*-font:mode;mode:litcoffee-*-
// -*- foo:bar mode: litcoffee bar:baz -*-
" -*-foo:bar;mode:litcoffee;bar:foo-*- ";
" -*-font-mode:foo;mode:LITcofFEE;foo-bar:quux-*-"
"-*-font:x;foo:bar; mode : litCOFFEE; bar:foo;foooooo:baaaaar;fo:ba;-*-";
"-*- font:x;foo : bar ; mode : LiTcOFFEe ; bar : foo ; foooooo:baaaaar;fo:ba-*-";
"""
for line in valid.split /\n/
expect(grammar.firstLineRegex.findNextMatchSync(line)).not.toBeNull()
invalid = """
/* --*litcoffee-*- */
/* -*-- litcoffee -*-
/* -*- -- litcoffee -*-
/* -*- LITCOFFEE -;- -*-
// -*- itsLitCoffeeFam -*-
// -*- litcoffee; -*-
// -*- litcoffee-stuff -*-
/* -*- model:litcoffee -*-
/* -*- indent-mode:litcoffee -*-
// -*- font:mode;litcoffee -*-
// -*- mode: -*- litcoffee
// -*- mode: burnt-because-litcoffee -*-
// -*-font:mode;mode:litcoffee--*-
"""
for line in invalid.split /\n/
expect(grammar.firstLineRegex.findNextMatchSync(line)).toBeNull()
it "recognises Vim modelines", ->
valid = """
vim: se filetype=litcoffee:
# vim: se ft=litcoffee:
# vim: set ft=LITCOFFEE:
# vim: set filetype=litcoffee:
# vim: ft=LITCOFFEE
# vim: syntax=litcoffee
# vim: se syntax=litcoffee:
# ex: syntax=litcoffee
# vim:ft=LitCoffee
# vim600: ft=litcoffee
# vim>600: set ft=litcoffee:
# vi:noai:sw=3 ts=6 ft=litcoffee
# vi::::::::::noai:::::::::::: ft=litcoffee
# vim:ts=4:sts=4:sw=4:noexpandtab:ft=LITCOFFEE
# vi:: noai : : : : sw =3 ts =6 ft =litCoffee
# vim: ts=4: pi sts=4: ft=litcoffee: noexpandtab: sw=4:
# vim: ts=4 sts=4: ft=litcoffee noexpandtab:
# vim:noexpandtab sts=4 ft=LitCOffEE ts=4
# vim:noexpandtab:ft=litcoffee
# vim:ts=4:sts=4 ft=litcoffee:noexpandtab:\x20
# vim:noexpandtab titlestring=hi\|there\\\\ ft=litcoffee ts=4
"""
for line in valid.split /\n/
expect(grammar.firstLineRegex.findNextMatchSync(line)).not.toBeNull()
invalid = """
ex: se filetype=litcoffee:
_vi: se filetype=litcoffee:
vi: se filetype=litcoffee
# vim set ft=illitcoffee
# vim: soft=litcoffee
# vim: clean-syntax=litcoffee:
# vim set ft=litcoffee:
# vim: setft=litcoffee:
# vim: se ft=litcoffee backupdir=tmp
# vim: set ft=LITCOFFEE set cmdheight=1
# vim:noexpandtab sts:4 ft:litcoffee ts:4
# vim:noexpandtab titlestring=hi\\|there\\ ft=litcoffee ts=4
# vim:noexpandtab titlestring=hi\\|there\\\\\\ ft=litcoffee ts=4
"""
for line in invalid.split /\n/
expect(grammar.firstLineRegex.findNextMatchSync(line)).toBeNull()

View File

@ -0,0 +1,151 @@
describe("CoffeeScript (Literate) grammar", function() {
let grammar = null;
beforeEach(function() {
waitsForPromise(() => atom.packages.activatePackage("language-coffee-script"));
runs(() => grammar = atom.grammars.grammarForScopeName("source.litcoffee"));
});
it("parses the grammar", function() {
expect(grammar).toBeTruthy();
expect(grammar.scopeName).toBe("source.litcoffee");
});
it("recognizes a code block after a list", function() {
const tokens = grammar.tokenizeLines(`\
1. Example
2. List
1 + 2\
`
);
expect(tokens[3][1]).toEqual({value: "1", scopes: ["source.litcoffee", "markup.raw.block.markdown", "constant.numeric.decimal.coffee"]});
});
describe("firstLineMatch", function() {
it("recognises interpreter directives", function() {
let line;
const valid = `\
#!/usr/local/bin/coffee --no-header --literate -w
#!/usr/local/bin/coffee -l
#!/usr/local/bin/env coffee --literate -w\
`;
for (line of Array.from(valid.split(/\n/))) {
expect(grammar.firstLineRegex.findNextMatchSync(line)).not.toBeNull();
}
const invalid = `\
#!/usr/local/bin/coffee --no-head -literate -w
#!/usr/local/bin/coffee --wl
#!/usr/local/bin/env coffee --illiterate -w=l\
`;
return (() => {
const result = [];
for (line of Array.from(invalid.split(/\n/))) {
result.push(expect(grammar.firstLineRegex.findNextMatchSync(line)).toBeNull());
}
return result;
})();
});
it("recognises Emacs modelines", function() {
let line;
const valid = `\
#-*- litcoffee -*-
#-*- mode: litcoffee -*-
/* -*-litcoffee-*- */
// -*- litcoffee -*-
/* -*- mode:LITCOFFEE -*- */
// -*- font:bar;mode:LitCoffee -*-
// -*- font:bar;mode:litcoffee;foo:bar; -*-
// -*-font:mode;mode:litcoffee-*-
// -*- foo:bar mode: litcoffee bar:baz -*-
" -*-foo:bar;mode:litcoffee;bar:foo-*- ";
" -*-font-mode:foo;mode:LITcofFEE;foo-bar:quux-*-"
"-*-font:x;foo:bar; mode : litCOFFEE; bar:foo;foooooo:baaaaar;fo:ba;-*-";
"-*- font:x;foo : bar ; mode : LiTcOFFEe ; bar : foo ; foooooo:baaaaar;fo:ba-*-";\
`;
for (line of Array.from(valid.split(/\n/))) {
expect(grammar.firstLineRegex.findNextMatchSync(line)).not.toBeNull();
}
const invalid = `\
/* --*litcoffee-*- */
/* -*-- litcoffee -*-
/* -*- -- litcoffee -*-
/* -*- LITCOFFEE -;- -*-
// -*- itsLitCoffeeFam -*-
// -*- litcoffee; -*-
// -*- litcoffee-stuff -*-
/* -*- model:litcoffee -*-
/* -*- indent-mode:litcoffee -*-
// -*- font:mode;litcoffee -*-
// -*- mode: -*- litcoffee
// -*- mode: burnt-because-litcoffee -*-
// -*-font:mode;mode:litcoffee--*-\
`;
return (() => {
const result = [];
for (line of Array.from(invalid.split(/\n/))) {
result.push(expect(grammar.firstLineRegex.findNextMatchSync(line)).toBeNull());
}
return result;
})();
});
it("recognises Vim modelines", function() {
let line;
const valid = `\
vim: se filetype=litcoffee:
# vim: se ft=litcoffee:
# vim: set ft=LITCOFFEE:
# vim: set filetype=litcoffee:
# vim: ft=LITCOFFEE
# vim: syntax=litcoffee
# vim: se syntax=litcoffee:
# ex: syntax=litcoffee
# vim:ft=LitCoffee
# vim600: ft=litcoffee
# vim>600: set ft=litcoffee:
# vi:noai:sw=3 ts=6 ft=litcoffee
# vi::::::::::noai:::::::::::: ft=litcoffee
# vim:ts=4:sts=4:sw=4:noexpandtab:ft=LITCOFFEE
# vi:: noai : : : : sw =3 ts =6 ft =litCoffee
# vim: ts=4: pi sts=4: ft=litcoffee: noexpandtab: sw=4:
# vim: ts=4 sts=4: ft=litcoffee noexpandtab:
# vim:noexpandtab sts=4 ft=LitCOffEE ts=4
# vim:noexpandtab:ft=litcoffee
# vim:ts=4:sts=4 ft=litcoffee:noexpandtab:\x20
# vim:noexpandtab titlestring=hi\|there\\\\ ft=litcoffee ts=4\
`;
for (line of Array.from(valid.split(/\n/))) {
expect(grammar.firstLineRegex.findNextMatchSync(line)).not.toBeNull();
}
const invalid = `\
ex: se filetype=litcoffee:
_vi: se filetype=litcoffee:
vi: se filetype=litcoffee
# vim set ft=illitcoffee
# vim: soft=litcoffee
# vim: clean-syntax=litcoffee:
# vim set ft=litcoffee:
# vim: setft=litcoffee:
# vim: se ft=litcoffee backupdir=tmp
# vim: set ft=LITCOFFEE set cmdheight=1
# vim:noexpandtab sts:4 ft:litcoffee ts:4
# vim:noexpandtab titlestring=hi\\|there\\ ft=litcoffee ts=4
# vim:noexpandtab titlestring=hi\\|there\\\\\\ ft=litcoffee ts=4\
`;
return (() => {
const result = [];
for (line of Array.from(invalid.split(/\n/))) {
result.push(expect(grammar.firstLineRegex.findNextMatchSync(line)).toBeNull());
}
return result;
})();
});
});
});

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +0,0 @@
describe "Language C# package", ->
beforeEach ->
waitsForPromise ->
atom.packages.activatePackage("language-csharp")
describe "C# Script grammar", ->
it "parses the grammar", ->
grammar = atom.grammars.grammarForScopeName("source.csx")
expect(grammar).toBeDefined()
expect(grammar.scopeName).toBe "source.csx"
describe "C# Cake grammar", ->
it "parses the grammar", ->
grammar = atom.grammars.grammarForScopeName("source.cake")
expect(grammar).toBeDefined()
expect(grammar.scopeName).toBe "source.cake"

View File

@ -0,0 +1,17 @@
describe("Language C# package", function() {
beforeEach(() => waitsForPromise(() => atom.packages.activatePackage("language-csharp")));
describe("C# Script grammar", () => it("parses the grammar", function() {
const grammar = atom.grammars.grammarForScopeName("source.csx");
expect(grammar).toBeDefined();
expect(grammar.scopeName).toBe("source.csx");
}));
describe("C# Cake grammar", () => it("parses the grammar", function() {
const grammar = atom.grammars.grammarForScopeName("source.cake");
expect(grammar).toBeDefined();
expect(grammar.scopeName).toBe("source.cake");
}));
});

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,897 +0,0 @@
describe "GitHub Flavored Markdown grammar", ->
grammar = null
beforeEach ->
waitsForPromise ->
atom.packages.activatePackage("language-gfm")
runs ->
grammar = atom.grammars.grammarForScopeName("source.gfm")
it "parses the grammar", ->
expect(grammar).toBeDefined()
expect(grammar.scopeName).toBe "source.gfm"
it "tokenizes spaces", ->
{tokens} = grammar.tokenizeLine(" ")
expect(tokens[0]).toEqual value: " ", scopes: ["source.gfm"]
it "tokenizes horizontal rules", ->
{tokens} = grammar.tokenizeLine("***")
expect(tokens[0]).toEqual value: "***", scopes: ["source.gfm", "comment.hr.gfm"]
{tokens} = grammar.tokenizeLine("---")
expect(tokens[0]).toEqual value: "---", scopes: ["source.gfm", "comment.hr.gfm"]
{tokens} = grammar.tokenizeLine("___")
expect(tokens[0]).toEqual value: "___", scopes: ["source.gfm", "comment.hr.gfm"]
it "tokenizes escaped characters", ->
{tokens} = grammar.tokenizeLine("\\*")
expect(tokens[0]).toEqual value: "\\*", scopes: ["source.gfm", "constant.character.escape.gfm"]
{tokens} = grammar.tokenizeLine("\\\\")
expect(tokens[0]).toEqual value: "\\\\", scopes: ["source.gfm", "constant.character.escape.gfm"]
{tokens} = grammar.tokenizeLine("\\abc")
expect(tokens[0]).toEqual value: "\\a", scopes: ["source.gfm", "constant.character.escape.gfm"]
expect(tokens[1]).toEqual value: "bc", scopes: ["source.gfm"]
it "tokenizes ***bold italic*** text", ->
{tokens} = grammar.tokenizeLine("this is ***bold italic*** text")
expect(tokens[0]).toEqual value: "this is ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "***", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(tokens[2]).toEqual value: "bold italic", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(tokens[3]).toEqual value: "***", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(tokens[4]).toEqual value: " text", scopes: ["source.gfm"]
[firstLineTokens, secondLineTokens] = grammar.tokenizeLines("this is ***bold\nitalic***!")
expect(firstLineTokens[0]).toEqual value: "this is ", scopes: ["source.gfm"]
expect(firstLineTokens[1]).toEqual value: "***", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(firstLineTokens[2]).toEqual value: "bold", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(secondLineTokens[0]).toEqual value: "italic", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(secondLineTokens[1]).toEqual value: "***", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(secondLineTokens[2]).toEqual value: "!", scopes: ["source.gfm"]
it "tokenizes ___bold italic___ text", ->
{tokens} = grammar.tokenizeLine("this is ___bold italic___ text")
expect(tokens[0]).toEqual value: "this is ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "___", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(tokens[2]).toEqual value: "bold italic", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(tokens[3]).toEqual value: "___", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(tokens[4]).toEqual value: " text", scopes: ["source.gfm"]
[firstLineTokens, secondLineTokens] = grammar.tokenizeLines("this is ___bold\nitalic___!")
expect(firstLineTokens[0]).toEqual value: "this is ", scopes: ["source.gfm"]
expect(firstLineTokens[1]).toEqual value: "___", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(firstLineTokens[2]).toEqual value: "bold", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(secondLineTokens[0]).toEqual value: "italic", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(secondLineTokens[1]).toEqual value: "___", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(secondLineTokens[2]).toEqual value: "!", scopes: ["source.gfm"]
it "tokenizes **bold** text", ->
{tokens} = grammar.tokenizeLine("**bold**")
expect(tokens[0]).toEqual value: "**", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[1]).toEqual value: "bold", scopes: ["source.gfm", "markup.bold.gfm"]
expect(tokens[2]).toEqual value: "**", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]
[firstLineTokens, secondLineTokens] = grammar.tokenizeLines("this is **not\nbold**!")
expect(firstLineTokens[0]).toEqual value: "this is **not", scopes: ["source.gfm"]
expect(secondLineTokens[0]).toEqual value: "bold**!", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("not**bold**")
expect(tokens[0]).toEqual value: "not", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "**", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[2]).toEqual value: "bold", scopes: ["source.gfm", "markup.bold.gfm"]
expect(tokens[3]).toEqual value: "**", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]
it "tokenizes __bold__ text", ->
{tokens} = grammar.tokenizeLine("____")
expect(tokens[0]).toEqual value: "____", scopes: ["source.gfm", "comment.hr.gfm"]
{tokens} = grammar.tokenizeLine("__bold__")
expect(tokens[0]).toEqual value: "__", scopes: [ 'source.gfm', 'markup.bold.gfm', 'punctuation.definition.entity.gfm' ]
expect(tokens[1]).toEqual value: "bold", scopes: ["source.gfm", "markup.bold.gfm"]
expect(tokens[2]).toEqual value: "__", scopes: [ 'source.gfm', 'markup.bold.gfm', 'punctuation.definition.entity.gfm' ]
[firstLineTokens, secondLineTokens] = grammar.tokenizeLines("this is __not\nbold__!")
expect(firstLineTokens[0]).toEqual value: "this is __not", scopes: ["source.gfm"]
expect(secondLineTokens[0]).toEqual value: "bold__!", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("not__bold__")
expect(tokens[0]).toEqual value: "not__bold__", scopes: ["source.gfm"]
it "tokenizes *italic* text", ->
{tokens} = grammar.tokenizeLine("**")
expect(tokens[0]).toEqual value: "**", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("this is *italic* text")
expect(tokens[0]).toEqual value: "this is ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "*", scopes: [ "source.gfm", "markup.italic.gfm", "punctuation.definition.entity.gfm" ]
expect(tokens[2]).toEqual value: "italic", scopes: ["source.gfm", "markup.italic.gfm"]
expect(tokens[3]).toEqual value: "*", scopes: [ "source.gfm", "markup.italic.gfm", "punctuation.definition.entity.gfm" ]
expect(tokens[4]).toEqual value: " text", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("is*italic*")
expect(tokens[0]).toEqual value: "is", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "*", scopes: [ "source.gfm", "markup.italic.gfm", "punctuation.definition.entity.gfm" ]
expect(tokens[2]).toEqual value: "italic", scopes: ["source.gfm", "markup.italic.gfm"]
expect(tokens[3]).toEqual value: "*", scopes: [ "source.gfm", "markup.italic.gfm", "punctuation.definition.entity.gfm" ]
{tokens} = grammar.tokenizeLine("* not italic")
expect(tokens[0]).toEqual value: "*", scopes: ["source.gfm", "variable.unordered.list.gfm"]
expect(tokens[1]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[2]).toEqual value: "not italic", scopes: ["source.gfm"]
[firstLineTokens, secondLineTokens] = grammar.tokenizeLines("this is *not\nitalic*!")
expect(firstLineTokens[0]).toEqual value: "this is *not", scopes: ["source.gfm"]
expect(secondLineTokens[0]).toEqual value: "italic*!", scopes: ["source.gfm"]
it "tokenizes _italic_ text", ->
{tokens} = grammar.tokenizeLine("__")
expect(tokens[0]).toEqual value: "__", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("this is _italic_ text")
expect(tokens[0]).toEqual value: "this is ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "_", scopes: [ 'source.gfm', 'markup.italic.gfm', 'punctuation.definition.entity.gfm' ]
expect(tokens[2]).toEqual value: "italic", scopes: ["source.gfm", "markup.italic.gfm"]
expect(tokens[3]).toEqual value: "_", scopes: [ 'source.gfm', 'markup.italic.gfm', 'punctuation.definition.entity.gfm' ]
expect(tokens[4]).toEqual value: " text", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("not_italic_")
expect(tokens[0]).toEqual value: "not_italic_", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("not x^{a}_m y^{b}_n italic")
expect(tokens[0]).toEqual value: "not x^{a}_m y^{b}_n italic", scopes: ["source.gfm"]
[firstLineTokens, secondLineTokens] = grammar.tokenizeLines("this is _not\nitalic_!")
expect(firstLineTokens[0]).toEqual value: "this is _not", scopes: ["source.gfm"]
expect(secondLineTokens[0]).toEqual value: "italic_!", scopes: ["source.gfm"]
it "tokenizes ~~strike~~ text", ->
{tokens} = grammar.tokenizeLine("~~strike~~")
expect(tokens[0]).toEqual value: "~~", scopes: ["source.gfm", "markup.strike.gfm"]
expect(tokens[1]).toEqual value: "strike", scopes: ["source.gfm", "markup.strike.gfm"]
expect(tokens[2]).toEqual value: "~~", scopes: ["source.gfm", "markup.strike.gfm"]
[firstLineTokens, secondLineTokens] = grammar.tokenizeLines("this is ~~str\nike~~!")
expect(firstLineTokens[0]).toEqual value: "this is ", scopes: ["source.gfm"]
expect(firstLineTokens[1]).toEqual value: "~~", scopes: ["source.gfm", "markup.strike.gfm"]
expect(firstLineTokens[2]).toEqual value: "str", scopes: ["source.gfm", "markup.strike.gfm"]
expect(secondLineTokens[0]).toEqual value: "ike", scopes: ["source.gfm", "markup.strike.gfm"]
expect(secondLineTokens[1]).toEqual value: "~~", scopes: ["source.gfm", "markup.strike.gfm"]
expect(secondLineTokens[2]).toEqual value: "!", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("not~~strike~~")
expect(tokens[0]).toEqual value: "not~~strike~~", scopes: ["source.gfm"]
it "tokenizes headings", ->
{tokens} = grammar.tokenizeLine("# Heading 1")
expect(tokens[0]).toEqual value: "#", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "markup.heading.marker.gfm"]
expect(tokens[1]).toEqual value: " ", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "markup.heading.space.gfm"]
expect(tokens[2]).toEqual value: "Heading 1", scopes: ["source.gfm", "markup.heading.heading-1.gfm"]
{tokens} = grammar.tokenizeLine("## Heading 2")
expect(tokens[0]).toEqual value: "##", scopes: ["source.gfm", "markup.heading.heading-2.gfm", "markup.heading.marker.gfm"]
expect(tokens[1]).toEqual value: " ", scopes: ["source.gfm", "markup.heading.heading-2.gfm", "markup.heading.space.gfm"]
expect(tokens[2]).toEqual value: "Heading 2", scopes: ["source.gfm", "markup.heading.heading-2.gfm"]
{tokens} = grammar.tokenizeLine("### Heading 3")
expect(tokens[0]).toEqual value: "###", scopes: ["source.gfm", "markup.heading.heading-3.gfm", "markup.heading.marker.gfm"]
expect(tokens[1]).toEqual value: " ", scopes: ["source.gfm", "markup.heading.heading-3.gfm", "markup.heading.space.gfm"]
expect(tokens[2]).toEqual value: "Heading 3", scopes: ["source.gfm", "markup.heading.heading-3.gfm"]
{tokens} = grammar.tokenizeLine("#### Heading 4")
expect(tokens[0]).toEqual value: "####", scopes: ["source.gfm", "markup.heading.heading-4.gfm", "markup.heading.marker.gfm"]
expect(tokens[1]).toEqual value: " ", scopes: ["source.gfm", "markup.heading.heading-4.gfm", "markup.heading.space.gfm"]
expect(tokens[2]).toEqual value: "Heading 4", scopes: ["source.gfm", "markup.heading.heading-4.gfm"]
{tokens} = grammar.tokenizeLine("##### Heading 5")
expect(tokens[0]).toEqual value: "#####", scopes: ["source.gfm", "markup.heading.heading-5.gfm", "markup.heading.marker.gfm"]
expect(tokens[1]).toEqual value: " ", scopes: ["source.gfm", "markup.heading.heading-5.gfm", "markup.heading.space.gfm"]
expect(tokens[2]).toEqual value: "Heading 5", scopes: ["source.gfm", "markup.heading.heading-5.gfm"]
{tokens} = grammar.tokenizeLine("###### Heading 6")
expect(tokens[0]).toEqual value: "######", scopes: ["source.gfm", "markup.heading.heading-6.gfm", "markup.heading.marker.gfm"]
expect(tokens[1]).toEqual value: " ", scopes: ["source.gfm", "markup.heading.heading-6.gfm", "markup.heading.space.gfm"]
expect(tokens[2]).toEqual value: "Heading 6", scopes: ["source.gfm", "markup.heading.heading-6.gfm"]
it "tokenizes matches inside of headers", ->
{tokens} = grammar.tokenizeLine("# Heading :one:")
expect(tokens[0]).toEqual value: "#", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "markup.heading.marker.gfm"]
expect(tokens[1]).toEqual value: " ", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "markup.heading.space.gfm"]
expect(tokens[2]).toEqual value: "Heading ", scopes: ["source.gfm", "markup.heading.heading-1.gfm"]
expect(tokens[3]).toEqual value: ":", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "string.emoji.gfm", "string.emoji.start.gfm"]
expect(tokens[4]).toEqual value: "one", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "string.emoji.gfm", "string.emoji.word.gfm"]
expect(tokens[5]).toEqual value: ":", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "string.emoji.gfm", "string.emoji.end.gfm"]
it "tokenizes an :emoji:", ->
{tokens} = grammar.tokenizeLine("this is :no_good:")
expect(tokens[0]).toEqual value: "this is ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: ":", scopes: ["source.gfm", "string.emoji.gfm", "string.emoji.start.gfm"]
expect(tokens[2]).toEqual value: "no_good", scopes: ["source.gfm", "string.emoji.gfm", "string.emoji.word.gfm"]
expect(tokens[3]).toEqual value: ":", scopes: ["source.gfm", "string.emoji.gfm", "string.emoji.end.gfm"]
{tokens} = grammar.tokenizeLine("this is :no good:")
expect(tokens[0]).toEqual value: "this is :no good:", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("http://localhost:8080")
expect(tokens[0]).toEqual value: "http://localhost:8080", scopes: ["source.gfm"]
it "tokenizes a ``` code block", ->
{tokens, ruleStack} = grammar.tokenizeLine("```")
expect(tokens[0]).toEqual value: "```", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]
{tokens, ruleStack} = grammar.tokenizeLine("-> 'hello'", ruleStack)
expect(tokens[0]).toEqual value: "-> 'hello'", scopes: ["source.gfm", "markup.raw.gfm"]
{tokens} = grammar.tokenizeLine("```", ruleStack)
expect(tokens[0]).toEqual value: "```", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]
it "tokenizes a ~~~ code block", ->
{tokens, ruleStack} = grammar.tokenizeLine("~~~")
expect(tokens[0]).toEqual value: "~~~", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]
{tokens, ruleStack} = grammar.tokenizeLine("-> 'hello'", ruleStack)
expect(tokens[0]).toEqual value: "-> 'hello'", scopes: ["source.gfm", "markup.raw.gfm"]
{tokens} = grammar.tokenizeLine("~~~", ruleStack)
expect(tokens[0]).toEqual value: "~~~", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]
it "doesn't tokenise ~`~ as a code block", ->
{tokens} = grammar.tokenizeLine("~`~")
expect(tokens[0]).toEqual value: '~', scopes: ['source.gfm']
expect(tokens[1]).toEqual value: '`', scopes: ['source.gfm', 'markup.raw.gfm']
expect(tokens[2]).toEqual value: '~', scopes: ['source.gfm', 'markup.raw.gfm']
it "tokenises code-blocks with borders of differing lengths", ->
[firstLineTokens, secondLineTokens, thirdLineTokens] = grammar.tokenizeLines("~~~\nfoo bar\n~~~~~~~")
expect(firstLineTokens[0]).toEqual value: '~~~', scopes: ['source.gfm', 'markup.raw.gfm', 'support.gfm']
expect(secondLineTokens[0]).toEqual value: 'foo bar', scopes: ['source.gfm', 'markup.raw.gfm']
expect(thirdLineTokens[0]).toEqual value: '~~~~~~~', scopes: ['source.gfm', 'markup.raw.gfm', 'support.gfm']
[firstLineTokens, secondLineTokens, thirdLineTokens] = grammar.tokenizeLines("~~~~~~~\nfoo bar\n~~~")
expect(firstLineTokens[0]).toEqual value: '~~~~~~~', scopes: ['source.gfm', 'markup.raw.gfm', 'support.gfm']
expect(secondLineTokens[0]).toEqual value: 'foo bar', scopes: ['source.gfm', 'markup.raw.gfm']
expect(thirdLineTokens[0]).toEqual value: '~~~', scopes: ['source.gfm', 'markup.raw.gfm']
it "tokenizes a ``` code block with trailing whitespace", ->
{tokens, ruleStack} = grammar.tokenizeLine("```")
expect(tokens[0]).toEqual value: "```", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]
{tokens, ruleStack} = grammar.tokenizeLine("-> 'hello'", ruleStack)
expect(tokens[0]).toEqual value: "-> 'hello'", scopes: ["source.gfm", "markup.raw.gfm"]
{tokens} = grammar.tokenizeLine("``` ", ruleStack)
expect(tokens[0]).toEqual value: "``` ", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]
it "tokenizes a ~~~ code block with trailing whitespace", ->
{tokens, ruleStack} = grammar.tokenizeLine("~~~")
expect(tokens[0]).toEqual value: "~~~", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]
{tokens, ruleStack} = grammar.tokenizeLine("-> 'hello'", ruleStack)
expect(tokens[0]).toEqual value: "-> 'hello'", scopes: ["source.gfm", "markup.raw.gfm"]
{tokens} = grammar.tokenizeLine("~~~ ", ruleStack)
expect(tokens[0]).toEqual value: "~~~ ", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]
it "tokenises a ``` code block with an unknown language", ->
{tokens, ruleStack} = grammar.tokenizeLine("``` myLanguage")
expect(tokens[0]).toEqual value: '``` myLanguage', scopes: ['source.gfm', 'markup.code.other.gfm', 'support.gfm']
{tokens, ruleStack} = grammar.tokenizeLine("-> 'hello'", ruleStack)
expect(tokens[0]).toEqual value: "-> 'hello'", scopes: ['source.gfm', 'markup.code.other.gfm', 'source.embedded.mylanguage']
{tokens} = grammar.tokenizeLine("```", ruleStack)
expect(tokens[0]).toEqual value: '```', scopes: ['source.gfm', 'markup.code.other.gfm', 'support.gfm']
it "tokenizes a ``` code block with a known language", ->
{tokens, ruleStack} = grammar.tokenizeLine("``` bash")
expect(tokens[0]).toEqual value: "``` bash", scopes: ["source.gfm", "markup.code.shell.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.shell"
{tokens, ruleStack} = grammar.tokenizeLine("```js ")
expect(tokens[0]).toEqual value: "```js ", scopes: ["source.gfm", "markup.code.js.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.js"
{tokens, ruleStack} = grammar.tokenizeLine("```JS ")
expect(tokens[0]).toEqual value: "```JS ", scopes: ["source.gfm", "markup.code.js.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.js"
{tokens, ruleStack} = grammar.tokenizeLine("```r ")
expect(tokens[0]).toEqual value: "```r ", scopes: ["source.gfm", "markup.code.r.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.r"
{tokens, ruleStack} = grammar.tokenizeLine("```properties ")
expect(tokens[0]).toEqual value: "```properties ", scopes: ["source.gfm", "markup.code.git-config.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.git-config"
it "tokenizes a Rmarkdown ``` code block", ->
{tokens, ruleStack} = grammar.tokenizeLine("```{r}")
expect(tokens[0]).toEqual value: "```{r}", scopes: ["source.gfm", "markup.code.r.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.r"
{tokens, ruleStack} = grammar.tokenizeLine("```{r,eval=TRUE,cache=FALSE}")
expect(tokens[0]).toEqual value: "```{r,eval=TRUE,cache=FALSE}", scopes: ["source.gfm", "markup.code.r.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.r"
{tokens, ruleStack} = grammar.tokenizeLine("```{r eval=TRUE,cache=FALSE}")
expect(tokens[0]).toEqual value: "```{r eval=TRUE,cache=FALSE}", scopes: ["source.gfm", "markup.code.r.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.r"
it "tokenizes a Rmarkdown ``` code block with whitespace", ->
{tokens, ruleStack} = grammar.tokenizeLine("```{r }")
expect(tokens[0]).toEqual value: "```{r }", scopes: ["source.gfm", "markup.code.r.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.r"
{tokens, ruleStack} = grammar.tokenizeLine("```{R } ")
expect(tokens[0]).toEqual value: "```{R } ", scopes: ["source.gfm", "markup.code.r.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.r"
{tokens, ruleStack} = grammar.tokenizeLine("```{r eval = TRUE, cache = FALSE}")
expect(tokens[0]).toEqual value: "```{r eval = TRUE, cache = FALSE}", scopes: ["source.gfm", "markup.code.r.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.r"
it "tokenizes a ~~~ code block with a language", ->
{tokens, ruleStack} = grammar.tokenizeLine("~~~ bash")
expect(tokens[0]).toEqual value: "~~~ bash", scopes: ["source.gfm", "markup.code.shell.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.shell"
{tokens, ruleStack} = grammar.tokenizeLine("~~~js ")
expect(tokens[0]).toEqual value: "~~~js ", scopes: ["source.gfm", "markup.code.js.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.js"
{tokens, ruleStack} = grammar.tokenizeLine("~~~properties ")
expect(tokens[0]).toEqual value: "~~~properties ", scopes: ["source.gfm", "markup.code.git-config.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.git-config"
it "tokenizes a ``` code block with a language and trailing whitespace", ->
{tokens, ruleStack} = grammar.tokenizeLine("``` bash")
{tokens} = grammar.tokenizeLine("``` ", ruleStack)
expect(tokens[0]).toEqual value: "``` ", scopes: ["source.gfm", "markup.code.shell.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.shell"
{tokens, ruleStack} = grammar.tokenizeLine("```js ")
{tokens} = grammar.tokenizeLine("``` ", ruleStack)
expect(tokens[0]).toEqual value: "``` ", scopes: ["source.gfm", "markup.code.js.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.js"
it "tokenizes a ~~~ code block with a language and trailing whitespace", ->
{tokens, ruleStack} = grammar.tokenizeLine("~~~ bash")
{tokens} = grammar.tokenizeLine("~~~ ", ruleStack)
expect(tokens[0]).toEqual value: "~~~ ", scopes: ["source.gfm", "markup.code.shell.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.shell"
{tokens, ruleStack} = grammar.tokenizeLine("~~~js ")
{tokens} = grammar.tokenizeLine("~~~ ", ruleStack)
expect(tokens[0]).toEqual value: "~~~ ", scopes: ["source.gfm", "markup.code.js.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.js"
{tokens, ruleStack} = grammar.tokenizeLine("~~~ properties ")
{tokens} = grammar.tokenizeLine("~~~ ", ruleStack)
expect(tokens[0]).toEqual value: "~~~ ", scopes: ["source.gfm", "markup.code.git-config.gfm", "support.gfm"]
expect(ruleStack[1].contentScopeName).toBe "source.embedded.git-config"
it "tokenizes inline `code` blocks", ->
{tokens} = grammar.tokenizeLine("`this` is `code`")
expect(tokens[0]).toEqual value: "`", scopes: ["source.gfm", "markup.raw.gfm"]
expect(tokens[1]).toEqual value: "this", scopes: ["source.gfm", "markup.raw.gfm"]
expect(tokens[2]).toEqual value: "`", scopes: ["source.gfm", "markup.raw.gfm"]
expect(tokens[3]).toEqual value: " is ", scopes: ["source.gfm"]
expect(tokens[4]).toEqual value: "`", scopes: ["source.gfm", "markup.raw.gfm"]
expect(tokens[5]).toEqual value: "code", scopes: ["source.gfm", "markup.raw.gfm"]
expect(tokens[6]).toEqual value: "`", scopes: ["source.gfm", "markup.raw.gfm"]
{tokens} = grammar.tokenizeLine("``")
expect(tokens[0]).toEqual value: "`", scopes: ["source.gfm", "markup.raw.gfm"]
expect(tokens[1]).toEqual value: "`", scopes: ["source.gfm", "markup.raw.gfm"]
{tokens} = grammar.tokenizeLine("``a\\`b``")
expect(tokens[0]).toEqual value: "``", scopes: ["source.gfm", "markup.raw.gfm"]
expect(tokens[1]).toEqual value: "a\\`b", scopes: ["source.gfm", "markup.raw.gfm"]
expect(tokens[2]).toEqual value: "``", scopes: ["source.gfm", "markup.raw.gfm"]
it "tokenizes [links](links)", ->
{tokens} = grammar.tokenizeLine("please click [this link](website)")
expect(tokens[0]).toEqual value: "please click ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[2]).toEqual value: "this link", scopes: ["source.gfm", "link", "entity.gfm"]
expect(tokens[3]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[4]).toEqual value: "(", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[5]).toEqual value: "website", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]
expect(tokens[6]).toEqual value: ")", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
it "tokenizes reference [links][links]", ->
{tokens} = grammar.tokenizeLine("please click [this link][website]")
expect(tokens[0]).toEqual value: "please click ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[2]).toEqual value: "this link", scopes: ["source.gfm", "link", "entity.gfm"]
expect(tokens[3]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[4]).toEqual value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[5]).toEqual value: "website", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]
expect(tokens[6]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
it "tokenizes id-less reference [links][]", ->
{tokens} = grammar.tokenizeLine("please click [this link][]")
expect(tokens[0]).toEqual value: "please click ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[2]).toEqual value: "this link", scopes: ["source.gfm", "link", "entity.gfm"]
expect(tokens[3]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[4]).toEqual value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[5]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
it "tokenizes [link]: footers", ->
{tokens} = grammar.tokenizeLine("[aLink]: http://website")
expect(tokens[0]).toEqual value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[1]).toEqual value: "aLink", scopes: ["source.gfm", "link", "entity.gfm"]
expect(tokens[2]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[3]).toEqual value: ":", scopes: ["source.gfm", "link", "punctuation.separator.key-value.gfm"]
expect(tokens[4]).toEqual value: " ", scopes: ["source.gfm", "link"]
expect(tokens[5]).toEqual value: "http://website", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]
it "tokenizes [link]: <footers>", ->
{tokens} = grammar.tokenizeLine("[aLink]: <http://website>")
expect(tokens[0]).toEqual value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[1]).toEqual value: "aLink", scopes: ["source.gfm", "link", "entity.gfm"]
expect(tokens[2]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[3]).toEqual value: ": <", scopes: ["source.gfm", "link"]
expect(tokens[4]).toEqual value: "http://website", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]
expect(tokens[5]).toEqual value: ">", scopes: ["source.gfm", "link"]
it "tokenizes [![links](links)](links)", ->
{tokens} = grammar.tokenizeLine("[![title](image)](link)")
expect(tokens[0]).toEqual value: "[!", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[1]).toEqual value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[2]).toEqual value: "title", scopes: ["source.gfm", "link", "entity.gfm"]
expect(tokens[3]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[4]).toEqual value: "(", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[5]).toEqual value: "image", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]
expect(tokens[6]).toEqual value: ")", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[7]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[8]).toEqual value: "(", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[9]).toEqual value: "link", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]
expect(tokens[10]).toEqual value: ")", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
it "tokenizes [![links](links)][links]", ->
{tokens} = grammar.tokenizeLine("[![title](image)][link]")
expect(tokens[0]).toEqual value: "[!", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[1]).toEqual value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[2]).toEqual value: "title", scopes: ["source.gfm", "link", "entity.gfm"]
expect(tokens[3]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[4]).toEqual value: "(", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[5]).toEqual value: "image", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]
expect(tokens[6]).toEqual value: ")", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[7]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[8]).toEqual value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[9]).toEqual value: "link", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]
expect(tokens[10]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
it "tokenizes [![links][links]](links)", ->
{tokens} = grammar.tokenizeLine("[![title][image]](link)")
expect(tokens[0]).toEqual value: "[!", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[1]).toEqual value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[2]).toEqual value: "title", scopes: ["source.gfm", "link", "entity.gfm"]
expect(tokens[3]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[4]).toEqual value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[5]).toEqual value: "image", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]
expect(tokens[6]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[7]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[8]).toEqual value: "(", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[9]).toEqual value: "link", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]
expect(tokens[10]).toEqual value: ")", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
it "tokenizes [![links][links]][links]", ->
{tokens} = grammar.tokenizeLine("[![title][image]][link]")
expect(tokens[0]).toEqual value: "[!", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[1]).toEqual value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[2]).toEqual value: "title", scopes: ["source.gfm", "link", "entity.gfm"]
expect(tokens[3]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[4]).toEqual value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[5]).toEqual value: "image", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]
expect(tokens[6]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[7]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
expect(tokens[8]).toEqual value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]
expect(tokens[9]).toEqual value: "link", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]
expect(tokens[10]).toEqual value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]
it "tokenizes mentions", ->
{tokens} = grammar.tokenizeLine("sentence with no space before@name ")
expect(tokens[0]).toEqual value: "sentence with no space before@name ", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("@name '@name' @name's @name. @name, (@name) [@name]")
expect(tokens[0]).toEqual value: "@", scopes: ["source.gfm", "variable.mention.gfm"]
expect(tokens[1]).toEqual value: "name", scopes: ["source.gfm", "string.username.gfm"]
expect(tokens[2]).toEqual value: " '", scopes: ["source.gfm"]
expect(tokens[3]).toEqual value: "@", scopes: ["source.gfm", "variable.mention.gfm"]
expect(tokens[4]).toEqual value: "name", scopes: ["source.gfm", "string.username.gfm"]
expect(tokens[5]).toEqual value: "' ", scopes: ["source.gfm"]
expect(tokens[6]).toEqual value: "@", scopes: ["source.gfm", "variable.mention.gfm"]
expect(tokens[7]).toEqual value: "name", scopes: ["source.gfm", "string.username.gfm"]
expect(tokens[8]).toEqual value: "'s ", scopes: ["source.gfm"]
expect(tokens[9]).toEqual value: "@", scopes: ["source.gfm", "variable.mention.gfm"]
expect(tokens[10]).toEqual value: "name", scopes: ["source.gfm", "string.username.gfm"]
expect(tokens[11]).toEqual value: ". ", scopes: ["source.gfm"]
expect(tokens[12]).toEqual value: "@", scopes: ["source.gfm", "variable.mention.gfm"]
expect(tokens[13]).toEqual value: "name", scopes: ["source.gfm", "string.username.gfm"]
expect(tokens[14]).toEqual value: ", (", scopes: ["source.gfm"]
expect(tokens[15]).toEqual value: "@", scopes: ["source.gfm", "variable.mention.gfm"]
expect(tokens[16]).toEqual value: "name", scopes: ["source.gfm", "string.username.gfm"]
expect(tokens[17]).toEqual value: ") [", scopes: ["source.gfm"]
expect(tokens[18]).toEqual value: "@", scopes: ["source.gfm", "variable.mention.gfm"]
expect(tokens[19]).toEqual value: "name", scopes: ["source.gfm", "string.username.gfm"]
expect(tokens[20]).toEqual value: "]", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine('"@name"')
expect(tokens[0]).toEqual value: '"', scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "@", scopes: ["source.gfm", "variable.mention.gfm"]
expect(tokens[2]).toEqual value: "name", scopes: ["source.gfm", "string.username.gfm"]
expect(tokens[3]).toEqual value: '"', scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("sentence with a space before @name/ and an invalid symbol after")
expect(tokens[0]).toEqual value: "sentence with a space before @name/ and an invalid symbol after", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("sentence with a space before @name that continues")
expect(tokens[0]).toEqual value: "sentence with a space before ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "@", scopes: ["source.gfm", "variable.mention.gfm"]
expect(tokens[2]).toEqual value: "name", scopes: ["source.gfm", "string.username.gfm"]
expect(tokens[3]).toEqual value: " that continues", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("* @name at the start of an unordered list")
expect(tokens[0]).toEqual value: "*", scopes: ["source.gfm", "variable.unordered.list.gfm"]
expect(tokens[1]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[2]).toEqual value: "@", scopes: ["source.gfm", "variable.mention.gfm"]
expect(tokens[3]).toEqual value: "name", scopes: ["source.gfm", "string.username.gfm"]
expect(tokens[4]).toEqual value: " at the start of an unordered list", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("a username @1337_hubot with numbers, letters and underscores")
expect(tokens[0]).toEqual value: "a username ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "@", scopes: ["source.gfm", "variable.mention.gfm"]
expect(tokens[2]).toEqual value: "1337_hubot", scopes: ["source.gfm", "string.username.gfm"]
expect(tokens[3]).toEqual value: " with numbers, letters and underscores", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("a username @1337-hubot with numbers, letters and hyphens")
expect(tokens[0]).toEqual value: "a username ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "@", scopes: ["source.gfm", "variable.mention.gfm"]
expect(tokens[2]).toEqual value: "1337-hubot", scopes: ["source.gfm", "string.username.gfm"]
expect(tokens[3]).toEqual value: " with numbers, letters and hyphens", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("@name at the start of a line")
expect(tokens[0]).toEqual value: "@", scopes: ["source.gfm", "variable.mention.gfm"]
expect(tokens[1]).toEqual value: "name", scopes: ["source.gfm", "string.username.gfm"]
expect(tokens[2]).toEqual value: " at the start of a line", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("any email like you@domain.com shouldn't mistakenly be matched as a mention")
expect(tokens[0]).toEqual value: "any email like you@domain.com shouldn't mistakenly be matched as a mention", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("@person's")
expect(tokens[0]).toEqual value: "@", scopes: ["source.gfm", "variable.mention.gfm"]
expect(tokens[1]).toEqual value: "person", scopes: ["source.gfm", "string.username.gfm"]
expect(tokens[2]).toEqual value: "'s", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("@person;")
expect(tokens[0]).toEqual value: "@", scopes: ["source.gfm", "variable.mention.gfm"]
expect(tokens[1]).toEqual value: "person", scopes: ["source.gfm", "string.username.gfm"]
expect(tokens[2]).toEqual value: ";", scopes: ["source.gfm"]
it "tokenizes issue numbers", ->
{tokens} = grammar.tokenizeLine("sentence with no space before#12 ")
expect(tokens[0]).toEqual value: "sentence with no space before#12 ", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine(" #101 '#101' #101's #101. #101, (#101) [#101]")
expect(tokens[1]).toEqual value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]
expect(tokens[2]).toEqual value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]
expect(tokens[3]).toEqual value: " '", scopes: ["source.gfm"]
expect(tokens[4]).toEqual value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]
expect(tokens[5]).toEqual value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]
expect(tokens[6]).toEqual value: "' ", scopes: ["source.gfm"]
expect(tokens[7]).toEqual value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]
expect(tokens[8]).toEqual value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]
expect(tokens[9]).toEqual value: "'s ", scopes: ["source.gfm"]
expect(tokens[10]).toEqual value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]
expect(tokens[11]).toEqual value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]
expect(tokens[12]).toEqual value: ". ", scopes: ["source.gfm"]
expect(tokens[13]).toEqual value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]
expect(tokens[14]).toEqual value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]
expect(tokens[15]).toEqual value: ", (", scopes: ["source.gfm"]
expect(tokens[16]).toEqual value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]
expect(tokens[17]).toEqual value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]
expect(tokens[18]).toEqual value: ") [", scopes: ["source.gfm"]
expect(tokens[19]).toEqual value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]
expect(tokens[20]).toEqual value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]
expect(tokens[21]).toEqual value: "]", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine('"#101"')
expect(tokens[0]).toEqual value: '"', scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]
expect(tokens[2]).toEqual value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]
expect(tokens[3]).toEqual value: '"', scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("sentence with a space before #123i and a character after")
expect(tokens[0]).toEqual value: "sentence with a space before #123i and a character after", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine("sentence with a space before #123 that continues")
expect(tokens[0]).toEqual value: "sentence with a space before ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]
expect(tokens[2]).toEqual value: "123", scopes: ["source.gfm", "string.issue.number.gfm"]
expect(tokens[3]).toEqual value: " that continues", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine(" #123's")
expect(tokens[1]).toEqual value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]
expect(tokens[2]).toEqual value: "123", scopes: ["source.gfm", "string.issue.number.gfm"]
expect(tokens[3]).toEqual value: "'s", scopes: ["source.gfm"]
it "tokenizes unordered lists", ->
{tokens} = grammar.tokenizeLine("*Item 1")
expect(tokens[0]).not.toEqual value: "*Item 1", scopes: ["source.gfm", "variable.unordered.list.gfm"]
{tokens} = grammar.tokenizeLine(" * Item 1")
expect(tokens[0]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "*", scopes: ["source.gfm", "variable.unordered.list.gfm"]
expect(tokens[2]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[3]).toEqual value: "Item 1", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine(" + Item 2")
expect(tokens[0]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "+", scopes: ["source.gfm", "variable.unordered.list.gfm"]
expect(tokens[2]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[3]).toEqual value: "Item 2", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine(" - Item 3")
expect(tokens[0]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "-", scopes: ["source.gfm", "variable.unordered.list.gfm"]
expect(tokens[2]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[3]).toEqual value: "Item 3", scopes: ["source.gfm"]
it "tokenizes ordered lists", ->
{tokens} = grammar.tokenizeLine("1.First Item")
expect(tokens[0]).toEqual value: "1.First Item", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine(" 1. First Item")
expect(tokens[0]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "1.", scopes: ["source.gfm", "variable.ordered.list.gfm"]
expect(tokens[2]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[3]).toEqual value: "First Item", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine(" 10. Tenth Item")
expect(tokens[0]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "10.", scopes: ["source.gfm", "variable.ordered.list.gfm"]
expect(tokens[2]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[3]).toEqual value: "Tenth Item", scopes: ["source.gfm"]
{tokens} = grammar.tokenizeLine(" 111. Hundred and eleventh item")
expect(tokens[0]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: "111.", scopes: ["source.gfm", "variable.ordered.list.gfm"]
expect(tokens[2]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[3]).toEqual value: "Hundred and eleventh item", scopes: ["source.gfm"]
it "tokenizes > quoted text", ->
{tokens} = grammar.tokenizeLine("> Quotation :+1:")
expect(tokens[0]).toEqual value: ">", scopes: ["source.gfm", "comment.quote.gfm", "support.quote.gfm"]
expect(tokens[1]).toEqual value: " Quotation :+1:", scopes: ["source.gfm", "comment.quote.gfm"]
it "tokenizes HTML entities", ->
{tokens} = grammar.tokenizeLine("&trade; &#8482; &a1; &#xb3;")
expect(tokens[0]).toEqual value: "&", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[1]).toEqual value: "trade", scopes: ["source.gfm", "constant.character.entity.gfm"]
expect(tokens[2]).toEqual value: ";", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[3]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[4]).toEqual value: "&", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[5]).toEqual value: "#8482", scopes: ["source.gfm", "constant.character.entity.gfm"]
expect(tokens[6]).toEqual value: ";", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[7]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[8]).toEqual value: "&", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[9]).toEqual value: "a1", scopes: ["source.gfm", "constant.character.entity.gfm"]
expect(tokens[10]).toEqual value: ";", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[11]).toEqual value: " ", scopes: ["source.gfm"]
expect(tokens[12]).toEqual value: "&", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[13]).toEqual value: "#xb3", scopes: ["source.gfm", "constant.character.entity.gfm"]
expect(tokens[14]).toEqual value: ";", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
it "tokenizes HTML entities in *italic* text", ->
{tokens} = grammar.tokenizeLine("*&trade; &#8482; &#xb3;*")
expect(tokens[0]).toEqual value: "*", scopes: [ 'source.gfm', 'markup.italic.gfm', 'punctuation.definition.entity.gfm' ]
expect(tokens[1]).toEqual value: "&", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[2]).toEqual value: "trade", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm"]
expect(tokens[3]).toEqual value: ";", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[4]).toEqual value: " ", scopes: ["source.gfm", "markup.italic.gfm"]
expect(tokens[5]).toEqual value: "&", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[6]).toEqual value: "#8482", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm"]
expect(tokens[7]).toEqual value: ";", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[8]).toEqual value: " ", scopes: ["source.gfm", "markup.italic.gfm"]
expect(tokens[9]).toEqual value: "&", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[10]).toEqual value: "#xb3", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm"]
expect(tokens[11]).toEqual value: ";", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[12]).toEqual value: "*", scopes: [ 'source.gfm', 'markup.italic.gfm', 'punctuation.definition.entity.gfm' ]
{tokens} = grammar.tokenizeLine("_&trade; &#8482; &#xb3;_")
expect(tokens[0]).toEqual value: "_", scopes: [ 'source.gfm', 'markup.italic.gfm', 'punctuation.definition.entity.gfm' ]
expect(tokens[1]).toEqual value: "&", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[2]).toEqual value: "trade", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm"]
expect(tokens[3]).toEqual value: ";", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[4]).toEqual value: " ", scopes: ["source.gfm", "markup.italic.gfm"]
expect(tokens[5]).toEqual value: "&", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[6]).toEqual value: "#8482", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm"]
expect(tokens[7]).toEqual value: ";", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[8]).toEqual value: " ", scopes: ["source.gfm", "markup.italic.gfm"]
expect(tokens[9]).toEqual value: "&", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[10]).toEqual value: "#xb3", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm"]
expect(tokens[11]).toEqual value: ";", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[12]).toEqual value: "_", scopes: [ 'source.gfm', 'markup.italic.gfm', 'punctuation.definition.entity.gfm' ]
it "tokenizes HTML entities in **bold** text", ->
{tokens} = grammar.tokenizeLine("**&trade; &#8482; &#xb3;**")
expect(tokens[0]).toEqual value: "**", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[1]).toEqual value: "&", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[2]).toEqual value: "trade", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm"]
expect(tokens[3]).toEqual value: ";", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[4]).toEqual value: " ", scopes: ["source.gfm", "markup.bold.gfm"]
expect(tokens[5]).toEqual value: "&", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[6]).toEqual value: "#8482", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm"]
expect(tokens[7]).toEqual value: ";", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[8]).toEqual value: " ", scopes: ["source.gfm", "markup.bold.gfm"]
expect(tokens[9]).toEqual value: "&", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[10]).toEqual value: "#xb3", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm"]
expect(tokens[11]).toEqual value: ";", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[12]).toEqual value: "**", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]
{tokens} = grammar.tokenizeLine("__&trade; &#8482; &#xb3;__")
expect(tokens[0]).toEqual value: "__", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[1]).toEqual value: "&", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[2]).toEqual value: "trade", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm"]
expect(tokens[3]).toEqual value: ";", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[4]).toEqual value: " ", scopes: ["source.gfm", "markup.bold.gfm"]
expect(tokens[5]).toEqual value: "&", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[6]).toEqual value: "#8482", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm"]
expect(tokens[7]).toEqual value: ";", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[8]).toEqual value: " ", scopes: ["source.gfm", "markup.bold.gfm"]
expect(tokens[9]).toEqual value: "&", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[10]).toEqual value: "#xb3", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm"]
expect(tokens[11]).toEqual value: ";", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[12]).toEqual value: "__", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]
it "tokenizes HTML entities in ***bold italic*** text", ->
{tokens} = grammar.tokenizeLine("***&trade; &#8482; &#xb3;***")
expect(tokens[0]).toEqual value: "***", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(tokens[1]).toEqual value: "&", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[2]).toEqual value: "trade", scopes: [ "source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm" ]
expect(tokens[3]).toEqual value: ";", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm"]
expect(tokens[4]).toEqual value: " ", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(tokens[5]).toEqual value: "&", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[6]).toEqual value: "#8482", scopes: [ "source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm" ]
expect(tokens[7]).toEqual value: ";", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm"]
expect(tokens[8]).toEqual value: " ", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(tokens[9]).toEqual value: "&", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[10]).toEqual value: "#xb3", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[11]).toEqual value: ";", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm"]
expect(tokens[12]).toEqual value: "***", scopes: ["source.gfm", "markup.bold.italic.gfm"]
{tokens} = grammar.tokenizeLine("___&trade; &#8482; &#xb3;___")
expect(tokens[0]).toEqual value: "___", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(tokens[1]).toEqual value: "&", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[2]).toEqual value: "trade", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[3]).toEqual value: ";", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm"]
expect(tokens[4]).toEqual value: " ", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(tokens[5]).toEqual value: "&", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[6]).toEqual value: "#8482", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[7]).toEqual value: ";", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm"]
expect(tokens[8]).toEqual value: " ", scopes: ["source.gfm", "markup.bold.italic.gfm"]
expect(tokens[9]).toEqual value: "&", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[10]).toEqual value: "#xb3", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[11]).toEqual value: ";", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm"]
expect(tokens[12]).toEqual value: "___", scopes: ["source.gfm", "markup.bold.italic.gfm"]
it "tokenizes HTML entities in strikethrough text", ->
{tokens} = grammar.tokenizeLine("~~&trade; &#8482; &#xb3;~~")
expect(tokens[0]).toEqual value: "~~", scopes: ["source.gfm", "markup.strike.gfm"]
expect(tokens[1]).toEqual value: "&", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[2]).toEqual value: "trade", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm"]
expect(tokens[3]).toEqual value: ";", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[4]).toEqual value: " ", scopes: ["source.gfm", "markup.strike.gfm"]
expect(tokens[5]).toEqual value: "&", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[6]).toEqual value: "#8482", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm"]
expect(tokens[7]).toEqual value: ";", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[8]).toEqual value: " ", scopes: ["source.gfm", "markup.strike.gfm"]
expect(tokens[9]).toEqual value: "&", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[10]).toEqual value: "#xb3", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm"]
expect(tokens[11]).toEqual value: ";", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]
expect(tokens[12]).toEqual value: "~~", scopes: ["source.gfm", "markup.strike.gfm"]
it "tokenizes HTML comments", ->
{tokens} = grammar.tokenizeLine("<!-- a comment -->")
expect(tokens[0]).toEqual value: "<!--", scopes: ["source.gfm", "comment.block.gfm", "punctuation.definition.comment.gfm"]
expect(tokens[1]).toEqual value: " a comment ", scopes: ["source.gfm", "comment.block.gfm"]
expect(tokens[2]).toEqual value: "-->", scopes: ["source.gfm", "comment.block.gfm", "punctuation.definition.comment.gfm"]
it "tokenizes YAML front matter", ->
[firstLineTokens, secondLineTokens, thirdLineTokens] = grammar.tokenizeLines """
---
front: matter
---
"""
expect(firstLineTokens[0]).toEqual value: "---", scopes: ["source.gfm", "front-matter.yaml.gfm", "comment.hr.gfm"]
expect(secondLineTokens[0]).toEqual value: "front: matter", scopes: ["source.gfm", "front-matter.yaml.gfm"]
expect(thirdLineTokens[0]).toEqual value: "---", scopes: ["source.gfm", "front-matter.yaml.gfm", "comment.hr.gfm"]
it "tokenizes linebreaks", ->
{tokens} = grammar.tokenizeLine("line ")
expect(tokens[0]).toEqual value: "line", scopes: ["source.gfm"]
expect(tokens[1]).toEqual value: " ", scopes: ["source.gfm", "linebreak.gfm"]
it "tokenizes tables", ->
[headerTokens, alignTokens, contentTokens] = grammar.tokenizeLines """
| Column 1 | Column 2 |
|:----------|:---------:|
| Content 1 | Content 2 |
"""
# Header line
expect(headerTokens[0]).toEqual value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.outer"]
expect(headerTokens[1]).toEqual value: " Column 1 ", scopes: ["source.gfm", "table.gfm"]
expect(headerTokens[2]).toEqual value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.inner"]
expect(headerTokens[3]).toEqual value: " Column 2 ", scopes: ["source.gfm", "table.gfm"]
expect(headerTokens[4]).toEqual value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.outer"]
# Alignment line
expect(alignTokens[0]).toEqual value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.outer"]
expect(alignTokens[1]).toEqual value: ":", scopes: ["source.gfm", "table.gfm", "border.alignment"]
expect(alignTokens[2]).toEqual value: "----------", scopes: ["source.gfm", "table.gfm", "border.header"]
expect(alignTokens[3]).toEqual value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.inner"]
expect(alignTokens[4]).toEqual value: ":", scopes: ["source.gfm", "table.gfm", "border.alignment"]
expect(alignTokens[5]).toEqual value: "---------", scopes: ["source.gfm", "table.gfm", "border.header"]
expect(alignTokens[6]).toEqual value: ":", scopes: ["source.gfm", "table.gfm", "border.alignment"]
expect(alignTokens[7]).toEqual value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.outer"]
# Content line
expect(contentTokens[0]).toEqual value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.outer"]
expect(contentTokens[1]).toEqual value: " Content 1 ", scopes: ["source.gfm", "table.gfm"]
expect(contentTokens[2]).toEqual value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.inner"]
expect(contentTokens[3]).toEqual value: " Content 2 ", scopes: ["source.gfm", "table.gfm"]
expect(contentTokens[4]).toEqual value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.outer"]
[headerTokens, emptyLineTokens, headingTokens] = grammar.tokenizeLines """
| Column 1 | Column 2\t
# Heading
"""
expect(headerTokens[0]).toEqual value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.outer"]
expect(headerTokens[1]).toEqual value: " Column 1 ", scopes: ["source.gfm", "table.gfm"]
expect(headerTokens[2]).toEqual value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.inner"]
expect(headerTokens[3]).toEqual value: " Column 2", scopes: ["source.gfm", "table.gfm"]
expect(headerTokens[4]).toEqual value: "\t", scopes: ["source.gfm", "table.gfm"]
expect(headingTokens[0]).toEqual value: "#", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "markup.heading.marker.gfm"]
expect(headingTokens[1]).toEqual value: " ", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "markup.heading.space.gfm"]
expect(headingTokens[2]).toEqual value: "Heading", scopes: ["source.gfm", "markup.heading.heading-1.gfm"]
it "tokenizes criticmarkup", ->
[addToken, delToken, hlToken, subToken] = grammar.tokenizeLines """
Add{++ some text++}
Delete{-- some text--}
Highlight {==some text==}{>>with comment<<}
Replace {~~this~>by that~~}
"""
# Addition
expect(addToken[0]).toEqual value: "Add", scopes: ["source.gfm"]
expect(addToken[1]).toEqual value: "{++", scopes: ["source.gfm", "markup.inserted.critic.gfm.addition", "punctuation.definition.inserted.critic.gfm.addition.marker"]
expect(addToken[2]).toEqual value: " some text", scopes: ["source.gfm", "markup.inserted.critic.gfm.addition"]
expect(addToken[3]).toEqual value: "++}", scopes: ["source.gfm", "markup.inserted.critic.gfm.addition", "punctuation.definition.inserted.critic.gfm.addition.marker"]
# Deletion
expect(delToken[0]).toEqual value: "Delete", scopes: ["source.gfm"]
expect(delToken[1]).toEqual value: "{--", scopes: ["source.gfm", "markup.deleted.critic.gfm.deletion", "punctuation.definition.deleted.critic.gfm.deletion.marker"]
expect(delToken[2]).toEqual value: " some text", scopes: ["source.gfm", "markup.deleted.critic.gfm.deletion"]
expect(delToken[3]).toEqual value: "--}", scopes: ["source.gfm", "markup.deleted.critic.gfm.deletion", "punctuation.definition.deleted.critic.gfm.deletion.marker"]
# Comment and highlight
expect(hlToken[0]).toEqual value: "Highlight ", scopes: ["source.gfm"]
expect(hlToken[1]).toEqual value: "{==", scopes: ["source.gfm", "critic.gfm.highlight", "critic.gfm.highlight.marker"]
expect(hlToken[2]).toEqual value: "some text", scopes: ["source.gfm", "critic.gfm.highlight"]
expect(hlToken[3]).toEqual value: "==}", scopes: ["source.gfm", "critic.gfm.highlight", "critic.gfm.highlight.marker"]
expect(hlToken[4]).toEqual value: "{>>", scopes: ["source.gfm", "critic.gfm.comment", "critic.gfm.comment.marker"]
expect(hlToken[5]).toEqual value: "with comment", scopes: ["source.gfm", "critic.gfm.comment"]
expect(hlToken[6]).toEqual value: "<<}", scopes: ["source.gfm", "critic.gfm.comment", "critic.gfm.comment.marker"]
# Replace
expect(subToken[0]).toEqual value: "Replace ", scopes: ["source.gfm"]
expect(subToken[1]).toEqual value: "{~~", scopes: ["source.gfm", "markup.changed.critic.gfm.substitution", "punctuation.definition.changed.critic.gfm.substitution.marker"]
expect(subToken[2]).toEqual value: "this", scopes: ["source.gfm", "markup.changed.critic.gfm.substitution"]
expect(subToken[3]).toEqual value: "~>", scopes: ["source.gfm", "markup.changed.critic.gfm.substitution", "punctuation.definition.changed.critic.gfm.substitution.operator"]
expect(subToken[4]).toEqual value: "by that", scopes: ["source.gfm", "markup.changed.critic.gfm.substitution"]
expect(subToken[5]).toEqual value: "~~}", scopes: ["source.gfm", "markup.changed.critic.gfm.substitution", "punctuation.definition.changed.critic.gfm.substitution.marker"]

View File

@ -0,0 +1,955 @@
describe("GitHub Flavored Markdown grammar", function() {
let grammar = null;
beforeEach(function() {
waitsForPromise(() => atom.packages.activatePackage("language-gfm"));
runs(() => grammar = atom.grammars.grammarForScopeName("source.gfm"));
});
it("parses the grammar", function() {
expect(grammar).toBeDefined();
expect(grammar.scopeName).toBe("source.gfm");
});
it("tokenizes spaces", function() {
const {tokens} = grammar.tokenizeLine(" ");
expect(tokens[0]).toEqual({value: " ", scopes: ["source.gfm"]});
});
it("tokenizes horizontal rules", function() {
let {tokens} = grammar.tokenizeLine("***");
expect(tokens[0]).toEqual({value: "***", scopes: ["source.gfm", "comment.hr.gfm"]});
({tokens} = grammar.tokenizeLine("---"));
expect(tokens[0]).toEqual({value: "---", scopes: ["source.gfm", "comment.hr.gfm"]});
({tokens} = grammar.tokenizeLine("___"));
expect(tokens[0]).toEqual({value: "___", scopes: ["source.gfm", "comment.hr.gfm"]});
});
it("tokenizes escaped characters", function() {
let {tokens} = grammar.tokenizeLine("\\*");
expect(tokens[0]).toEqual({value: "\\*", scopes: ["source.gfm", "constant.character.escape.gfm"]});
({tokens} = grammar.tokenizeLine("\\\\"));
expect(tokens[0]).toEqual({value: "\\\\", scopes: ["source.gfm", "constant.character.escape.gfm"]});
({tokens} = grammar.tokenizeLine("\\abc"));
expect(tokens[0]).toEqual({value: "\\a", scopes: ["source.gfm", "constant.character.escape.gfm"]});
expect(tokens[1]).toEqual({value: "bc", scopes: ["source.gfm"]});
});
it("tokenizes ***bold italic*** text", function() {
const {tokens} = grammar.tokenizeLine("this is ***bold italic*** text");
expect(tokens[0]).toEqual({value: "this is ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "***", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(tokens[2]).toEqual({value: "bold italic", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(tokens[3]).toEqual({value: "***", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(tokens[4]).toEqual({value: " text", scopes: ["source.gfm"]});
const [firstLineTokens, secondLineTokens] = Array.from(grammar.tokenizeLines("this is ***bold\nitalic***!"));
expect(firstLineTokens[0]).toEqual({value: "this is ", scopes: ["source.gfm"]});
expect(firstLineTokens[1]).toEqual({value: "***", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(firstLineTokens[2]).toEqual({value: "bold", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(secondLineTokens[0]).toEqual({value: "italic", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(secondLineTokens[1]).toEqual({value: "***", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(secondLineTokens[2]).toEqual({value: "!", scopes: ["source.gfm"]});
});
it("tokenizes ___bold italic___ text", function() {
const {tokens} = grammar.tokenizeLine("this is ___bold italic___ text");
expect(tokens[0]).toEqual({value: "this is ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "___", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(tokens[2]).toEqual({value: "bold italic", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(tokens[3]).toEqual({value: "___", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(tokens[4]).toEqual({value: " text", scopes: ["source.gfm"]});
const [firstLineTokens, secondLineTokens] = Array.from(grammar.tokenizeLines("this is ___bold\nitalic___!"));
expect(firstLineTokens[0]).toEqual({value: "this is ", scopes: ["source.gfm"]});
expect(firstLineTokens[1]).toEqual({value: "___", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(firstLineTokens[2]).toEqual({value: "bold", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(secondLineTokens[0]).toEqual({value: "italic", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(secondLineTokens[1]).toEqual({value: "___", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(secondLineTokens[2]).toEqual({value: "!", scopes: ["source.gfm"]});
});
it("tokenizes **bold** text", function() {
let {tokens} = grammar.tokenizeLine("**bold**");
expect(tokens[0]).toEqual({value: "**", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[1]).toEqual({value: "bold", scopes: ["source.gfm", "markup.bold.gfm"]});
expect(tokens[2]).toEqual({value: "**", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]});
const [firstLineTokens, secondLineTokens] = Array.from(grammar.tokenizeLines("this is **not\nbold**!"));
expect(firstLineTokens[0]).toEqual({value: "this is **not", scopes: ["source.gfm"]});
expect(secondLineTokens[0]).toEqual({value: "bold**!", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("not**bold**"));
expect(tokens[0]).toEqual({value: "not", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "**", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[2]).toEqual({value: "bold", scopes: ["source.gfm", "markup.bold.gfm"]});
expect(tokens[3]).toEqual({value: "**", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]});
});
it("tokenizes __bold__ text", function() {
let {tokens} = grammar.tokenizeLine("____");
expect(tokens[0]).toEqual({value: "____", scopes: ["source.gfm", "comment.hr.gfm"]});
({tokens} = grammar.tokenizeLine("__bold__"));
expect(tokens[0]).toEqual({value: "__", scopes: [ 'source.gfm', 'markup.bold.gfm', 'punctuation.definition.entity.gfm' ]});
expect(tokens[1]).toEqual({value: "bold", scopes: ["source.gfm", "markup.bold.gfm"]});
expect(tokens[2]).toEqual({value: "__", scopes: [ 'source.gfm', 'markup.bold.gfm', 'punctuation.definition.entity.gfm' ]});
const [firstLineTokens, secondLineTokens] = Array.from(grammar.tokenizeLines("this is __not\nbold__!"));
expect(firstLineTokens[0]).toEqual({value: "this is __not", scopes: ["source.gfm"]});
expect(secondLineTokens[0]).toEqual({value: "bold__!", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("not__bold__"));
expect(tokens[0]).toEqual({value: "not__bold__", scopes: ["source.gfm"]});
});
it("tokenizes *italic* text", function() {
let {tokens} = grammar.tokenizeLine("**");
expect(tokens[0]).toEqual({value: "**", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("this is *italic* text"));
expect(tokens[0]).toEqual({value: "this is ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "*", scopes: [ "source.gfm", "markup.italic.gfm", "punctuation.definition.entity.gfm" ]});
expect(tokens[2]).toEqual({value: "italic", scopes: ["source.gfm", "markup.italic.gfm"]});
expect(tokens[3]).toEqual({value: "*", scopes: [ "source.gfm", "markup.italic.gfm", "punctuation.definition.entity.gfm" ]});
expect(tokens[4]).toEqual({value: " text", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("is*italic*"));
expect(tokens[0]).toEqual({value: "is", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "*", scopes: [ "source.gfm", "markup.italic.gfm", "punctuation.definition.entity.gfm" ]});
expect(tokens[2]).toEqual({value: "italic", scopes: ["source.gfm", "markup.italic.gfm"]});
expect(tokens[3]).toEqual({value: "*", scopes: [ "source.gfm", "markup.italic.gfm", "punctuation.definition.entity.gfm" ]});
({tokens} = grammar.tokenizeLine("* not italic"));
expect(tokens[0]).toEqual({value: "*", scopes: ["source.gfm", "variable.unordered.list.gfm"]});
expect(tokens[1]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[2]).toEqual({value: "not italic", scopes: ["source.gfm"]});
const [firstLineTokens, secondLineTokens] = Array.from(grammar.tokenizeLines("this is *not\nitalic*!"));
expect(firstLineTokens[0]).toEqual({value: "this is *not", scopes: ["source.gfm"]});
expect(secondLineTokens[0]).toEqual({value: "italic*!", scopes: ["source.gfm"]});
});
it("tokenizes _italic_ text", function() {
let {tokens} = grammar.tokenizeLine("__");
expect(tokens[0]).toEqual({value: "__", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("this is _italic_ text"));
expect(tokens[0]).toEqual({value: "this is ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "_", scopes: [ 'source.gfm', 'markup.italic.gfm', 'punctuation.definition.entity.gfm' ]});
expect(tokens[2]).toEqual({value: "italic", scopes: ["source.gfm", "markup.italic.gfm"]});
expect(tokens[3]).toEqual({value: "_", scopes: [ 'source.gfm', 'markup.italic.gfm', 'punctuation.definition.entity.gfm' ]});
expect(tokens[4]).toEqual({value: " text", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("not_italic_"));
expect(tokens[0]).toEqual({value: "not_italic_", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("not x^{a}_m y^{b}_n italic"));
expect(tokens[0]).toEqual({value: "not x^{a}_m y^{b}_n italic", scopes: ["source.gfm"]});
const [firstLineTokens, secondLineTokens] = Array.from(grammar.tokenizeLines("this is _not\nitalic_!"));
expect(firstLineTokens[0]).toEqual({value: "this is _not", scopes: ["source.gfm"]});
expect(secondLineTokens[0]).toEqual({value: "italic_!", scopes: ["source.gfm"]});
});
it("tokenizes ~~strike~~ text", function() {
let {tokens} = grammar.tokenizeLine("~~strike~~");
expect(tokens[0]).toEqual({value: "~~", scopes: ["source.gfm", "markup.strike.gfm"]});
expect(tokens[1]).toEqual({value: "strike", scopes: ["source.gfm", "markup.strike.gfm"]});
expect(tokens[2]).toEqual({value: "~~", scopes: ["source.gfm", "markup.strike.gfm"]});
const [firstLineTokens, secondLineTokens] = Array.from(grammar.tokenizeLines("this is ~~str\nike~~!"));
expect(firstLineTokens[0]).toEqual({value: "this is ", scopes: ["source.gfm"]});
expect(firstLineTokens[1]).toEqual({value: "~~", scopes: ["source.gfm", "markup.strike.gfm"]});
expect(firstLineTokens[2]).toEqual({value: "str", scopes: ["source.gfm", "markup.strike.gfm"]});
expect(secondLineTokens[0]).toEqual({value: "ike", scopes: ["source.gfm", "markup.strike.gfm"]});
expect(secondLineTokens[1]).toEqual({value: "~~", scopes: ["source.gfm", "markup.strike.gfm"]});
expect(secondLineTokens[2]).toEqual({value: "!", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("not~~strike~~"));
expect(tokens[0]).toEqual({value: "not~~strike~~", scopes: ["source.gfm"]});
});
it("tokenizes headings", function() {
let {tokens} = grammar.tokenizeLine("# Heading 1");
expect(tokens[0]).toEqual({value: "#", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "markup.heading.marker.gfm"]});
expect(tokens[1]).toEqual({value: " ", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "markup.heading.space.gfm"]});
expect(tokens[2]).toEqual({value: "Heading 1", scopes: ["source.gfm", "markup.heading.heading-1.gfm"]});
({tokens} = grammar.tokenizeLine("## Heading 2"));
expect(tokens[0]).toEqual({value: "##", scopes: ["source.gfm", "markup.heading.heading-2.gfm", "markup.heading.marker.gfm"]});
expect(tokens[1]).toEqual({value: " ", scopes: ["source.gfm", "markup.heading.heading-2.gfm", "markup.heading.space.gfm"]});
expect(tokens[2]).toEqual({value: "Heading 2", scopes: ["source.gfm", "markup.heading.heading-2.gfm"]});
({tokens} = grammar.tokenizeLine("### Heading 3"));
expect(tokens[0]).toEqual({value: "###", scopes: ["source.gfm", "markup.heading.heading-3.gfm", "markup.heading.marker.gfm"]});
expect(tokens[1]).toEqual({value: " ", scopes: ["source.gfm", "markup.heading.heading-3.gfm", "markup.heading.space.gfm"]});
expect(tokens[2]).toEqual({value: "Heading 3", scopes: ["source.gfm", "markup.heading.heading-3.gfm"]});
({tokens} = grammar.tokenizeLine("#### Heading 4"));
expect(tokens[0]).toEqual({value: "####", scopes: ["source.gfm", "markup.heading.heading-4.gfm", "markup.heading.marker.gfm"]});
expect(tokens[1]).toEqual({value: " ", scopes: ["source.gfm", "markup.heading.heading-4.gfm", "markup.heading.space.gfm"]});
expect(tokens[2]).toEqual({value: "Heading 4", scopes: ["source.gfm", "markup.heading.heading-4.gfm"]});
({tokens} = grammar.tokenizeLine("##### Heading 5"));
expect(tokens[0]).toEqual({value: "#####", scopes: ["source.gfm", "markup.heading.heading-5.gfm", "markup.heading.marker.gfm"]});
expect(tokens[1]).toEqual({value: " ", scopes: ["source.gfm", "markup.heading.heading-5.gfm", "markup.heading.space.gfm"]});
expect(tokens[2]).toEqual({value: "Heading 5", scopes: ["source.gfm", "markup.heading.heading-5.gfm"]});
({tokens} = grammar.tokenizeLine("###### Heading 6"));
expect(tokens[0]).toEqual({value: "######", scopes: ["source.gfm", "markup.heading.heading-6.gfm", "markup.heading.marker.gfm"]});
expect(tokens[1]).toEqual({value: " ", scopes: ["source.gfm", "markup.heading.heading-6.gfm", "markup.heading.space.gfm"]});
expect(tokens[2]).toEqual({value: "Heading 6", scopes: ["source.gfm", "markup.heading.heading-6.gfm"]});
});
it("tokenizes matches inside of headers", function() {
const {tokens} = grammar.tokenizeLine("# Heading :one:");
expect(tokens[0]).toEqual({value: "#", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "markup.heading.marker.gfm"]});
expect(tokens[1]).toEqual({value: " ", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "markup.heading.space.gfm"]});
expect(tokens[2]).toEqual({value: "Heading ", scopes: ["source.gfm", "markup.heading.heading-1.gfm"]});
expect(tokens[3]).toEqual({value: ":", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "string.emoji.gfm", "string.emoji.start.gfm"]});
expect(tokens[4]).toEqual({value: "one", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "string.emoji.gfm", "string.emoji.word.gfm"]});
expect(tokens[5]).toEqual({value: ":", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "string.emoji.gfm", "string.emoji.end.gfm"]});
});
it("tokenizes an :emoji:", function() {
let {tokens} = grammar.tokenizeLine("this is :no_good:");
expect(tokens[0]).toEqual({value: "this is ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: ":", scopes: ["source.gfm", "string.emoji.gfm", "string.emoji.start.gfm"]});
expect(tokens[2]).toEqual({value: "no_good", scopes: ["source.gfm", "string.emoji.gfm", "string.emoji.word.gfm"]});
expect(tokens[3]).toEqual({value: ":", scopes: ["source.gfm", "string.emoji.gfm", "string.emoji.end.gfm"]});
({tokens} = grammar.tokenizeLine("this is :no good:"));
expect(tokens[0]).toEqual({value: "this is :no good:", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("http://localhost:8080"));
expect(tokens[0]).toEqual({value: "http://localhost:8080", scopes: ["source.gfm"]});
});
it("tokenizes a ``` code block", function() {
let {tokens, ruleStack} = grammar.tokenizeLine("```");
expect(tokens[0]).toEqual({value: "```", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]});
({tokens, ruleStack} = grammar.tokenizeLine("-> 'hello'", ruleStack));
expect(tokens[0]).toEqual({value: "-> 'hello'", scopes: ["source.gfm", "markup.raw.gfm"]});
({tokens} = grammar.tokenizeLine("```", ruleStack));
expect(tokens[0]).toEqual({value: "```", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]});
});
it("tokenizes a ~~~ code block", function() {
let {tokens, ruleStack} = grammar.tokenizeLine("~~~");
expect(tokens[0]).toEqual({value: "~~~", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]});
({tokens, ruleStack} = grammar.tokenizeLine("-> 'hello'", ruleStack));
expect(tokens[0]).toEqual({value: "-> 'hello'", scopes: ["source.gfm", "markup.raw.gfm"]});
({tokens} = grammar.tokenizeLine("~~~", ruleStack));
expect(tokens[0]).toEqual({value: "~~~", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]});
});
it("doesn't tokenise ~`~ as a code block", function() {
const {tokens} = grammar.tokenizeLine("~`~");
expect(tokens[0]).toEqual({value: '~', scopes: ['source.gfm']});
expect(tokens[1]).toEqual({value: '`', scopes: ['source.gfm', 'markup.raw.gfm']});
expect(tokens[2]).toEqual({value: '~', scopes: ['source.gfm', 'markup.raw.gfm']});
});
it("tokenises code-blocks with borders of differing lengths", function() {
let [firstLineTokens, secondLineTokens, thirdLineTokens] = Array.from(grammar.tokenizeLines("~~~\nfoo bar\n~~~~~~~"));
expect(firstLineTokens[0]).toEqual({value: '~~~', scopes: ['source.gfm', 'markup.raw.gfm', 'support.gfm']});
expect(secondLineTokens[0]).toEqual({value: 'foo bar', scopes: ['source.gfm', 'markup.raw.gfm']});
expect(thirdLineTokens[0]).toEqual({value: '~~~~~~~', scopes: ['source.gfm', 'markup.raw.gfm', 'support.gfm']});
[firstLineTokens, secondLineTokens, thirdLineTokens] = Array.from(grammar.tokenizeLines("~~~~~~~\nfoo bar\n~~~"));
expect(firstLineTokens[0]).toEqual({value: '~~~~~~~', scopes: ['source.gfm', 'markup.raw.gfm', 'support.gfm']});
expect(secondLineTokens[0]).toEqual({value: 'foo bar', scopes: ['source.gfm', 'markup.raw.gfm']});
expect(thirdLineTokens[0]).toEqual({value: '~~~', scopes: ['source.gfm', 'markup.raw.gfm']});
});
it("tokenizes a ``` code block with trailing whitespace", function() {
let {tokens, ruleStack} = grammar.tokenizeLine("```");
expect(tokens[0]).toEqual({value: "```", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]});
({tokens, ruleStack} = grammar.tokenizeLine("-> 'hello'", ruleStack));
expect(tokens[0]).toEqual({value: "-> 'hello'", scopes: ["source.gfm", "markup.raw.gfm"]});
({tokens} = grammar.tokenizeLine("``` ", ruleStack));
expect(tokens[0]).toEqual({value: "``` ", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]});
});
it("tokenizes a ~~~ code block with trailing whitespace", function() {
let {tokens, ruleStack} = grammar.tokenizeLine("~~~");
expect(tokens[0]).toEqual({value: "~~~", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]});
({tokens, ruleStack} = grammar.tokenizeLine("-> 'hello'", ruleStack));
expect(tokens[0]).toEqual({value: "-> 'hello'", scopes: ["source.gfm", "markup.raw.gfm"]});
({tokens} = grammar.tokenizeLine("~~~ ", ruleStack));
expect(tokens[0]).toEqual({value: "~~~ ", scopes: ["source.gfm", "markup.raw.gfm", "support.gfm"]});
});
it("tokenises a ``` code block with an unknown language", function() {
let {tokens, ruleStack} = grammar.tokenizeLine("``` myLanguage");
expect(tokens[0]).toEqual({value: '``` myLanguage', scopes: ['source.gfm', 'markup.code.other.gfm', 'support.gfm']});
({tokens, ruleStack} = grammar.tokenizeLine("-> 'hello'", ruleStack));
expect(tokens[0]).toEqual({value: "-> 'hello'", scopes: ['source.gfm', 'markup.code.other.gfm', 'source.embedded.mylanguage']});
({tokens} = grammar.tokenizeLine("```", ruleStack));
expect(tokens[0]).toEqual({value: '```', scopes: ['source.gfm', 'markup.code.other.gfm', 'support.gfm']});
});
it("tokenizes a ``` code block with a known language", function() {
let {tokens, ruleStack} = grammar.tokenizeLine("``` bash");
expect(tokens[0]).toEqual({value: "``` bash", scopes: ["source.gfm", "markup.code.shell.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.shell");
({tokens, ruleStack} = grammar.tokenizeLine("```js "));
expect(tokens[0]).toEqual({value: "```js ", scopes: ["source.gfm", "markup.code.js.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.js");
({tokens, ruleStack} = grammar.tokenizeLine("```JS "));
expect(tokens[0]).toEqual({value: "```JS ", scopes: ["source.gfm", "markup.code.js.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.js");
({tokens, ruleStack} = grammar.tokenizeLine("```r "));
expect(tokens[0]).toEqual({value: "```r ", scopes: ["source.gfm", "markup.code.r.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.r");
({tokens, ruleStack} = grammar.tokenizeLine("```properties "));
expect(tokens[0]).toEqual({value: "```properties ", scopes: ["source.gfm", "markup.code.git-config.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.git-config");
});
it("tokenizes a Rmarkdown ``` code block", function() {
let {tokens, ruleStack} = grammar.tokenizeLine("```{r}");
expect(tokens[0]).toEqual({value: "```{r}", scopes: ["source.gfm", "markup.code.r.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.r");
({tokens, ruleStack} = grammar.tokenizeLine("```{r,eval=TRUE,cache=FALSE}"));
expect(tokens[0]).toEqual({value: "```{r,eval=TRUE,cache=FALSE}", scopes: ["source.gfm", "markup.code.r.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.r");
({tokens, ruleStack} = grammar.tokenizeLine("```{r eval=TRUE,cache=FALSE}"));
expect(tokens[0]).toEqual({value: "```{r eval=TRUE,cache=FALSE}", scopes: ["source.gfm", "markup.code.r.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.r");
});
it("tokenizes a Rmarkdown ``` code block with whitespace", function() {
let {tokens, ruleStack} = grammar.tokenizeLine("```{r }");
expect(tokens[0]).toEqual({value: "```{r }", scopes: ["source.gfm", "markup.code.r.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.r");
({tokens, ruleStack} = grammar.tokenizeLine("```{R } "));
expect(tokens[0]).toEqual({value: "```{R } ", scopes: ["source.gfm", "markup.code.r.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.r");
({tokens, ruleStack} = grammar.tokenizeLine("```{r eval = TRUE, cache = FALSE}"));
expect(tokens[0]).toEqual({value: "```{r eval = TRUE, cache = FALSE}", scopes: ["source.gfm", "markup.code.r.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.r");
});
it("tokenizes a ~~~ code block with a language", function() {
let {tokens, ruleStack} = grammar.tokenizeLine("~~~ bash");
expect(tokens[0]).toEqual({value: "~~~ bash", scopes: ["source.gfm", "markup.code.shell.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.shell");
({tokens, ruleStack} = grammar.tokenizeLine("~~~js "));
expect(tokens[0]).toEqual({value: "~~~js ", scopes: ["source.gfm", "markup.code.js.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.js");
({tokens, ruleStack} = grammar.tokenizeLine("~~~properties "));
expect(tokens[0]).toEqual({value: "~~~properties ", scopes: ["source.gfm", "markup.code.git-config.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.git-config");
});
it("tokenizes a ``` code block with a language and trailing whitespace", function() {
let {tokens, ruleStack} = grammar.tokenizeLine("``` bash");
({tokens} = grammar.tokenizeLine("``` ", ruleStack));
expect(tokens[0]).toEqual({value: "``` ", scopes: ["source.gfm", "markup.code.shell.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.shell");
({tokens, ruleStack} = grammar.tokenizeLine("```js "));
({tokens} = grammar.tokenizeLine("``` ", ruleStack));
expect(tokens[0]).toEqual({value: "``` ", scopes: ["source.gfm", "markup.code.js.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.js");
});
it("tokenizes a ~~~ code block with a language and trailing whitespace", function() {
let {tokens, ruleStack} = grammar.tokenizeLine("~~~ bash");
({tokens} = grammar.tokenizeLine("~~~ ", ruleStack));
expect(tokens[0]).toEqual({value: "~~~ ", scopes: ["source.gfm", "markup.code.shell.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.shell");
({tokens, ruleStack} = grammar.tokenizeLine("~~~js "));
({tokens} = grammar.tokenizeLine("~~~ ", ruleStack));
expect(tokens[0]).toEqual({value: "~~~ ", scopes: ["source.gfm", "markup.code.js.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.js");
({tokens, ruleStack} = grammar.tokenizeLine("~~~ properties "));
({tokens} = grammar.tokenizeLine("~~~ ", ruleStack));
expect(tokens[0]).toEqual({value: "~~~ ", scopes: ["source.gfm", "markup.code.git-config.gfm", "support.gfm"]});
expect(ruleStack[1].contentScopeName).toBe("source.embedded.git-config");
});
it("tokenizes inline `code` blocks", function() {
let {tokens} = grammar.tokenizeLine("`this` is `code`");
expect(tokens[0]).toEqual({value: "`", scopes: ["source.gfm", "markup.raw.gfm"]});
expect(tokens[1]).toEqual({value: "this", scopes: ["source.gfm", "markup.raw.gfm"]});
expect(tokens[2]).toEqual({value: "`", scopes: ["source.gfm", "markup.raw.gfm"]});
expect(tokens[3]).toEqual({value: " is ", scopes: ["source.gfm"]});
expect(tokens[4]).toEqual({value: "`", scopes: ["source.gfm", "markup.raw.gfm"]});
expect(tokens[5]).toEqual({value: "code", scopes: ["source.gfm", "markup.raw.gfm"]});
expect(tokens[6]).toEqual({value: "`", scopes: ["source.gfm", "markup.raw.gfm"]});
({tokens} = grammar.tokenizeLine("``"));
expect(tokens[0]).toEqual({value: "`", scopes: ["source.gfm", "markup.raw.gfm"]});
expect(tokens[1]).toEqual({value: "`", scopes: ["source.gfm", "markup.raw.gfm"]});
({tokens} = grammar.tokenizeLine("``a\\`b``"));
expect(tokens[0]).toEqual({value: "``", scopes: ["source.gfm", "markup.raw.gfm"]});
expect(tokens[1]).toEqual({value: "a\\`b", scopes: ["source.gfm", "markup.raw.gfm"]});
expect(tokens[2]).toEqual({value: "``", scopes: ["source.gfm", "markup.raw.gfm"]});
});
it("tokenizes [links](links)", function() {
const {tokens} = grammar.tokenizeLine("please click [this link](website)");
expect(tokens[0]).toEqual({value: "please click ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[2]).toEqual({value: "this link", scopes: ["source.gfm", "link", "entity.gfm"]});
expect(tokens[3]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[4]).toEqual({value: "(", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[5]).toEqual({value: "website", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]});
expect(tokens[6]).toEqual({value: ")", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
});
it("tokenizes reference [links][links]", function() {
const {tokens} = grammar.tokenizeLine("please click [this link][website]");
expect(tokens[0]).toEqual({value: "please click ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[2]).toEqual({value: "this link", scopes: ["source.gfm", "link", "entity.gfm"]});
expect(tokens[3]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[4]).toEqual({value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[5]).toEqual({value: "website", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]});
expect(tokens[6]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
});
it("tokenizes id-less reference [links][]", function() {
const {tokens} = grammar.tokenizeLine("please click [this link][]");
expect(tokens[0]).toEqual({value: "please click ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[2]).toEqual({value: "this link", scopes: ["source.gfm", "link", "entity.gfm"]});
expect(tokens[3]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[4]).toEqual({value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[5]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
});
it("tokenizes [link]: footers", function() {
const {tokens} = grammar.tokenizeLine("[aLink]: http://website");
expect(tokens[0]).toEqual({value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[1]).toEqual({value: "aLink", scopes: ["source.gfm", "link", "entity.gfm"]});
expect(tokens[2]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[3]).toEqual({value: ":", scopes: ["source.gfm", "link", "punctuation.separator.key-value.gfm"]});
expect(tokens[4]).toEqual({value: " ", scopes: ["source.gfm", "link"]});
expect(tokens[5]).toEqual({value: "http://website", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]});
});
it("tokenizes [link]: <footers>", function() {
const {tokens} = grammar.tokenizeLine("[aLink]: <http://website>");
expect(tokens[0]).toEqual({value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[1]).toEqual({value: "aLink", scopes: ["source.gfm", "link", "entity.gfm"]});
expect(tokens[2]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[3]).toEqual({value: ": <", scopes: ["source.gfm", "link"]});
expect(tokens[4]).toEqual({value: "http://website", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]});
expect(tokens[5]).toEqual({value: ">", scopes: ["source.gfm", "link"]});
});
it("tokenizes [![links](links)](links)", function() {
const {tokens} = grammar.tokenizeLine("[![title](image)](link)");
expect(tokens[0]).toEqual({value: "[!", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[1]).toEqual({value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[2]).toEqual({value: "title", scopes: ["source.gfm", "link", "entity.gfm"]});
expect(tokens[3]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[4]).toEqual({value: "(", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[5]).toEqual({value: "image", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]});
expect(tokens[6]).toEqual({value: ")", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[7]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[8]).toEqual({value: "(", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[9]).toEqual({value: "link", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]});
expect(tokens[10]).toEqual({value: ")", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
});
it("tokenizes [![links](links)][links]", function() {
const {tokens} = grammar.tokenizeLine("[![title](image)][link]");
expect(tokens[0]).toEqual({value: "[!", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[1]).toEqual({value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[2]).toEqual({value: "title", scopes: ["source.gfm", "link", "entity.gfm"]});
expect(tokens[3]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[4]).toEqual({value: "(", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[5]).toEqual({value: "image", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]});
expect(tokens[6]).toEqual({value: ")", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[7]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[8]).toEqual({value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[9]).toEqual({value: "link", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]});
expect(tokens[10]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
});
it("tokenizes [![links][links]](links)", function() {
const {tokens} = grammar.tokenizeLine("[![title][image]](link)");
expect(tokens[0]).toEqual({value: "[!", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[1]).toEqual({value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[2]).toEqual({value: "title", scopes: ["source.gfm", "link", "entity.gfm"]});
expect(tokens[3]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[4]).toEqual({value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[5]).toEqual({value: "image", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]});
expect(tokens[6]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[7]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[8]).toEqual({value: "(", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[9]).toEqual({value: "link", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]});
expect(tokens[10]).toEqual({value: ")", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
});
it("tokenizes [![links][links]][links]", function() {
const {tokens} = grammar.tokenizeLine("[![title][image]][link]");
expect(tokens[0]).toEqual({value: "[!", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[1]).toEqual({value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[2]).toEqual({value: "title", scopes: ["source.gfm", "link", "entity.gfm"]});
expect(tokens[3]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[4]).toEqual({value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[5]).toEqual({value: "image", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]});
expect(tokens[6]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[7]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
expect(tokens[8]).toEqual({value: "[", scopes: ["source.gfm", "link", "punctuation.definition.begin.gfm"]});
expect(tokens[9]).toEqual({value: "link", scopes: ["source.gfm", "link", "markup.underline.link.gfm"]});
expect(tokens[10]).toEqual({value: "]", scopes: ["source.gfm", "link", "punctuation.definition.end.gfm"]});
});
it("tokenizes mentions", function() {
let {tokens} = grammar.tokenizeLine("sentence with no space before@name ");
expect(tokens[0]).toEqual({value: "sentence with no space before@name ", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("@name '@name' @name's @name. @name, (@name) [@name]"));
expect(tokens[0]).toEqual({value: "@", scopes: ["source.gfm", "variable.mention.gfm"]});
expect(tokens[1]).toEqual({value: "name", scopes: ["source.gfm", "string.username.gfm"]});
expect(tokens[2]).toEqual({value: " '", scopes: ["source.gfm"]});
expect(tokens[3]).toEqual({value: "@", scopes: ["source.gfm", "variable.mention.gfm"]});
expect(tokens[4]).toEqual({value: "name", scopes: ["source.gfm", "string.username.gfm"]});
expect(tokens[5]).toEqual({value: "' ", scopes: ["source.gfm"]});
expect(tokens[6]).toEqual({value: "@", scopes: ["source.gfm", "variable.mention.gfm"]});
expect(tokens[7]).toEqual({value: "name", scopes: ["source.gfm", "string.username.gfm"]});
expect(tokens[8]).toEqual({value: "'s ", scopes: ["source.gfm"]});
expect(tokens[9]).toEqual({value: "@", scopes: ["source.gfm", "variable.mention.gfm"]});
expect(tokens[10]).toEqual({value: "name", scopes: ["source.gfm", "string.username.gfm"]});
expect(tokens[11]).toEqual({value: ". ", scopes: ["source.gfm"]});
expect(tokens[12]).toEqual({value: "@", scopes: ["source.gfm", "variable.mention.gfm"]});
expect(tokens[13]).toEqual({value: "name", scopes: ["source.gfm", "string.username.gfm"]});
expect(tokens[14]).toEqual({value: ", (", scopes: ["source.gfm"]});
expect(tokens[15]).toEqual({value: "@", scopes: ["source.gfm", "variable.mention.gfm"]});
expect(tokens[16]).toEqual({value: "name", scopes: ["source.gfm", "string.username.gfm"]});
expect(tokens[17]).toEqual({value: ") [", scopes: ["source.gfm"]});
expect(tokens[18]).toEqual({value: "@", scopes: ["source.gfm", "variable.mention.gfm"]});
expect(tokens[19]).toEqual({value: "name", scopes: ["source.gfm", "string.username.gfm"]});
expect(tokens[20]).toEqual({value: "]", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine('"@name"'));
expect(tokens[0]).toEqual({value: '"', scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "@", scopes: ["source.gfm", "variable.mention.gfm"]});
expect(tokens[2]).toEqual({value: "name", scopes: ["source.gfm", "string.username.gfm"]});
expect(tokens[3]).toEqual({value: '"', scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("sentence with a space before @name/ and an invalid symbol after"));
expect(tokens[0]).toEqual({value: "sentence with a space before @name/ and an invalid symbol after", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("sentence with a space before @name that continues"));
expect(tokens[0]).toEqual({value: "sentence with a space before ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "@", scopes: ["source.gfm", "variable.mention.gfm"]});
expect(tokens[2]).toEqual({value: "name", scopes: ["source.gfm", "string.username.gfm"]});
expect(tokens[3]).toEqual({value: " that continues", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("* @name at the start of an unordered list"));
expect(tokens[0]).toEqual({value: "*", scopes: ["source.gfm", "variable.unordered.list.gfm"]});
expect(tokens[1]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[2]).toEqual({value: "@", scopes: ["source.gfm", "variable.mention.gfm"]});
expect(tokens[3]).toEqual({value: "name", scopes: ["source.gfm", "string.username.gfm"]});
expect(tokens[4]).toEqual({value: " at the start of an unordered list", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("a username @1337_hubot with numbers, letters and underscores"));
expect(tokens[0]).toEqual({value: "a username ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "@", scopes: ["source.gfm", "variable.mention.gfm"]});
expect(tokens[2]).toEqual({value: "1337_hubot", scopes: ["source.gfm", "string.username.gfm"]});
expect(tokens[3]).toEqual({value: " with numbers, letters and underscores", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("a username @1337-hubot with numbers, letters and hyphens"));
expect(tokens[0]).toEqual({value: "a username ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "@", scopes: ["source.gfm", "variable.mention.gfm"]});
expect(tokens[2]).toEqual({value: "1337-hubot", scopes: ["source.gfm", "string.username.gfm"]});
expect(tokens[3]).toEqual({value: " with numbers, letters and hyphens", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("@name at the start of a line"));
expect(tokens[0]).toEqual({value: "@", scopes: ["source.gfm", "variable.mention.gfm"]});
expect(tokens[1]).toEqual({value: "name", scopes: ["source.gfm", "string.username.gfm"]});
expect(tokens[2]).toEqual({value: " at the start of a line", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("any email like you@domain.com shouldn't mistakenly be matched as a mention"));
expect(tokens[0]).toEqual({value: "any email like you@domain.com shouldn't mistakenly be matched as a mention", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("@person's"));
expect(tokens[0]).toEqual({value: "@", scopes: ["source.gfm", "variable.mention.gfm"]});
expect(tokens[1]).toEqual({value: "person", scopes: ["source.gfm", "string.username.gfm"]});
expect(tokens[2]).toEqual({value: "'s", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("@person;"));
expect(tokens[0]).toEqual({value: "@", scopes: ["source.gfm", "variable.mention.gfm"]});
expect(tokens[1]).toEqual({value: "person", scopes: ["source.gfm", "string.username.gfm"]});
expect(tokens[2]).toEqual({value: ";", scopes: ["source.gfm"]});
});
it("tokenizes issue numbers", function() {
let {tokens} = grammar.tokenizeLine("sentence with no space before#12 ");
expect(tokens[0]).toEqual({value: "sentence with no space before#12 ", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine(" #101 '#101' #101's #101. #101, (#101) [#101]"));
expect(tokens[1]).toEqual({value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]});
expect(tokens[2]).toEqual({value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]});
expect(tokens[3]).toEqual({value: " '", scopes: ["source.gfm"]});
expect(tokens[4]).toEqual({value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]});
expect(tokens[5]).toEqual({value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]});
expect(tokens[6]).toEqual({value: "' ", scopes: ["source.gfm"]});
expect(tokens[7]).toEqual({value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]});
expect(tokens[8]).toEqual({value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]});
expect(tokens[9]).toEqual({value: "'s ", scopes: ["source.gfm"]});
expect(tokens[10]).toEqual({value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]});
expect(tokens[11]).toEqual({value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]});
expect(tokens[12]).toEqual({value: ". ", scopes: ["source.gfm"]});
expect(tokens[13]).toEqual({value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]});
expect(tokens[14]).toEqual({value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]});
expect(tokens[15]).toEqual({value: ", (", scopes: ["source.gfm"]});
expect(tokens[16]).toEqual({value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]});
expect(tokens[17]).toEqual({value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]});
expect(tokens[18]).toEqual({value: ") [", scopes: ["source.gfm"]});
expect(tokens[19]).toEqual({value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]});
expect(tokens[20]).toEqual({value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]});
expect(tokens[21]).toEqual({value: "]", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine('"#101"'));
expect(tokens[0]).toEqual({value: '"', scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]});
expect(tokens[2]).toEqual({value: "101", scopes: ["source.gfm", "string.issue.number.gfm"]});
expect(tokens[3]).toEqual({value: '"', scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("sentence with a space before #123i and a character after"));
expect(tokens[0]).toEqual({value: "sentence with a space before #123i and a character after", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine("sentence with a space before #123 that continues"));
expect(tokens[0]).toEqual({value: "sentence with a space before ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]});
expect(tokens[2]).toEqual({value: "123", scopes: ["source.gfm", "string.issue.number.gfm"]});
expect(tokens[3]).toEqual({value: " that continues", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine(" #123's"));
expect(tokens[1]).toEqual({value: "#", scopes: ["source.gfm", "variable.issue.tag.gfm"]});
expect(tokens[2]).toEqual({value: "123", scopes: ["source.gfm", "string.issue.number.gfm"]});
expect(tokens[3]).toEqual({value: "'s", scopes: ["source.gfm"]});
});
it("tokenizes unordered lists", function() {
let {tokens} = grammar.tokenizeLine("*Item 1");
expect(tokens[0]).not.toEqual({value: "*Item 1", scopes: ["source.gfm", "variable.unordered.list.gfm"]});
({tokens} = grammar.tokenizeLine(" * Item 1"));
expect(tokens[0]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "*", scopes: ["source.gfm", "variable.unordered.list.gfm"]});
expect(tokens[2]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[3]).toEqual({value: "Item 1", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine(" + Item 2"));
expect(tokens[0]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "+", scopes: ["source.gfm", "variable.unordered.list.gfm"]});
expect(tokens[2]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[3]).toEqual({value: "Item 2", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine(" - Item 3"));
expect(tokens[0]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "-", scopes: ["source.gfm", "variable.unordered.list.gfm"]});
expect(tokens[2]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[3]).toEqual({value: "Item 3", scopes: ["source.gfm"]});
});
it("tokenizes ordered lists", function() {
let {tokens} = grammar.tokenizeLine("1.First Item");
expect(tokens[0]).toEqual({value: "1.First Item", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine(" 1. First Item"));
expect(tokens[0]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "1.", scopes: ["source.gfm", "variable.ordered.list.gfm"]});
expect(tokens[2]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[3]).toEqual({value: "First Item", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine(" 10. Tenth Item"));
expect(tokens[0]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "10.", scopes: ["source.gfm", "variable.ordered.list.gfm"]});
expect(tokens[2]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[3]).toEqual({value: "Tenth Item", scopes: ["source.gfm"]});
({tokens} = grammar.tokenizeLine(" 111. Hundred and eleventh item"));
expect(tokens[0]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: "111.", scopes: ["source.gfm", "variable.ordered.list.gfm"]});
expect(tokens[2]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[3]).toEqual({value: "Hundred and eleventh item", scopes: ["source.gfm"]});
});
it("tokenizes > quoted text", function() {
const {tokens} = grammar.tokenizeLine("> Quotation :+1:");
expect(tokens[0]).toEqual({value: ">", scopes: ["source.gfm", "comment.quote.gfm", "support.quote.gfm"]});
expect(tokens[1]).toEqual({value: " Quotation :+1:", scopes: ["source.gfm", "comment.quote.gfm"]});
});
it("tokenizes HTML entities", function() {
const {tokens} = grammar.tokenizeLine("&trade; &#8482; &a1; &#xb3;");
expect(tokens[0]).toEqual({value: "&", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[1]).toEqual({value: "trade", scopes: ["source.gfm", "constant.character.entity.gfm"]});
expect(tokens[2]).toEqual({value: ";", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[3]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[4]).toEqual({value: "&", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[5]).toEqual({value: "#8482", scopes: ["source.gfm", "constant.character.entity.gfm"]});
expect(tokens[6]).toEqual({value: ";", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[7]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[8]).toEqual({value: "&", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[9]).toEqual({value: "a1", scopes: ["source.gfm", "constant.character.entity.gfm"]});
expect(tokens[10]).toEqual({value: ";", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[11]).toEqual({value: " ", scopes: ["source.gfm"]});
expect(tokens[12]).toEqual({value: "&", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[13]).toEqual({value: "#xb3", scopes: ["source.gfm", "constant.character.entity.gfm"]});
expect(tokens[14]).toEqual({value: ";", scopes: ["source.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
});
it("tokenizes HTML entities in *italic* text", function() {
let {tokens} = grammar.tokenizeLine("*&trade; &#8482; &#xb3;*");
expect(tokens[0]).toEqual({value: "*", scopes: [ 'source.gfm', 'markup.italic.gfm', 'punctuation.definition.entity.gfm' ]});
expect(tokens[1]).toEqual({value: "&", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[2]).toEqual({value: "trade", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm"]});
expect(tokens[3]).toEqual({value: ";", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[4]).toEqual({value: " ", scopes: ["source.gfm", "markup.italic.gfm"]});
expect(tokens[5]).toEqual({value: "&", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[6]).toEqual({value: "#8482", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm"]});
expect(tokens[7]).toEqual({value: ";", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[8]).toEqual({value: " ", scopes: ["source.gfm", "markup.italic.gfm"]});
expect(tokens[9]).toEqual({value: "&", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[10]).toEqual({value: "#xb3", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm"]});
expect(tokens[11]).toEqual({value: ";", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[12]).toEqual({value: "*", scopes: [ 'source.gfm', 'markup.italic.gfm', 'punctuation.definition.entity.gfm' ]});
({tokens} = grammar.tokenizeLine("_&trade; &#8482; &#xb3;_"));
expect(tokens[0]).toEqual({value: "_", scopes: [ 'source.gfm', 'markup.italic.gfm', 'punctuation.definition.entity.gfm' ]});
expect(tokens[1]).toEqual({value: "&", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[2]).toEqual({value: "trade", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm"]});
expect(tokens[3]).toEqual({value: ";", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[4]).toEqual({value: " ", scopes: ["source.gfm", "markup.italic.gfm"]});
expect(tokens[5]).toEqual({value: "&", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[6]).toEqual({value: "#8482", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm"]});
expect(tokens[7]).toEqual({value: ";", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[8]).toEqual({value: " ", scopes: ["source.gfm", "markup.italic.gfm"]});
expect(tokens[9]).toEqual({value: "&", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[10]).toEqual({value: "#xb3", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm"]});
expect(tokens[11]).toEqual({value: ";", scopes: ["source.gfm", "markup.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[12]).toEqual({value: "_", scopes: [ 'source.gfm', 'markup.italic.gfm', 'punctuation.definition.entity.gfm' ]});
});
it("tokenizes HTML entities in **bold** text", function() {
let {tokens} = grammar.tokenizeLine("**&trade; &#8482; &#xb3;**");
expect(tokens[0]).toEqual({value: "**", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[1]).toEqual({value: "&", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[2]).toEqual({value: "trade", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm"]});
expect(tokens[3]).toEqual({value: ";", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[4]).toEqual({value: " ", scopes: ["source.gfm", "markup.bold.gfm"]});
expect(tokens[5]).toEqual({value: "&", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[6]).toEqual({value: "#8482", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm"]});
expect(tokens[7]).toEqual({value: ";", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[8]).toEqual({value: " ", scopes: ["source.gfm", "markup.bold.gfm"]});
expect(tokens[9]).toEqual({value: "&", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[10]).toEqual({value: "#xb3", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm"]});
expect(tokens[11]).toEqual({value: ";", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[12]).toEqual({value: "**", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]});
({tokens} = grammar.tokenizeLine("__&trade; &#8482; &#xb3;__"));
expect(tokens[0]).toEqual({value: "__", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[1]).toEqual({value: "&", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[2]).toEqual({value: "trade", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm"]});
expect(tokens[3]).toEqual({value: ";", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[4]).toEqual({value: " ", scopes: ["source.gfm", "markup.bold.gfm"]});
expect(tokens[5]).toEqual({value: "&", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[6]).toEqual({value: "#8482", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm"]});
expect(tokens[7]).toEqual({value: ";", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[8]).toEqual({value: " ", scopes: ["source.gfm", "markup.bold.gfm"]});
expect(tokens[9]).toEqual({value: "&", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[10]).toEqual({value: "#xb3", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm"]});
expect(tokens[11]).toEqual({value: ";", scopes: ["source.gfm", "markup.bold.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[12]).toEqual({value: "__", scopes: ["source.gfm", "markup.bold.gfm", "punctuation.definition.entity.gfm"]});
});
it("tokenizes HTML entities in ***bold italic*** text", function() {
let {tokens} = grammar.tokenizeLine("***&trade; &#8482; &#xb3;***");
expect(tokens[0]).toEqual({value: "***", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(tokens[1]).toEqual({value: "&", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[2]).toEqual({value: "trade", scopes: [ "source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm" ]});
expect(tokens[3]).toEqual({value: ";", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm"]});
expect(tokens[4]).toEqual({value: " ", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(tokens[5]).toEqual({value: "&", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[6]).toEqual({value: "#8482", scopes: [ "source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm" ]});
expect(tokens[7]).toEqual({value: ";", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm"]});
expect(tokens[8]).toEqual({value: " ", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(tokens[9]).toEqual({value: "&", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[10]).toEqual({value: "#xb3", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[11]).toEqual({value: ";", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm"]});
expect(tokens[12]).toEqual({value: "***", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
({tokens} = grammar.tokenizeLine("___&trade; &#8482; &#xb3;___"));
expect(tokens[0]).toEqual({value: "___", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(tokens[1]).toEqual({value: "&", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[2]).toEqual({value: "trade", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[3]).toEqual({value: ";", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm"]});
expect(tokens[4]).toEqual({value: " ", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(tokens[5]).toEqual({value: "&", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[6]).toEqual({value: "#8482", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[7]).toEqual({value: ";", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm"]});
expect(tokens[8]).toEqual({value: " ", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
expect(tokens[9]).toEqual({value: "&", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[10]).toEqual({value: "#xb3", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[11]).toEqual({value: ";", scopes: ["source.gfm", "markup.bold.italic.gfm", "constant.character.entity.gfm"]});
expect(tokens[12]).toEqual({value: "___", scopes: ["source.gfm", "markup.bold.italic.gfm"]});
});
it("tokenizes HTML entities in strikethrough text", function() {
const {tokens} = grammar.tokenizeLine("~~&trade; &#8482; &#xb3;~~");
expect(tokens[0]).toEqual({value: "~~", scopes: ["source.gfm", "markup.strike.gfm"]});
expect(tokens[1]).toEqual({value: "&", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[2]).toEqual({value: "trade", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm"]});
expect(tokens[3]).toEqual({value: ";", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[4]).toEqual({value: " ", scopes: ["source.gfm", "markup.strike.gfm"]});
expect(tokens[5]).toEqual({value: "&", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[6]).toEqual({value: "#8482", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm"]});
expect(tokens[7]).toEqual({value: ";", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[8]).toEqual({value: " ", scopes: ["source.gfm", "markup.strike.gfm"]});
expect(tokens[9]).toEqual({value: "&", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[10]).toEqual({value: "#xb3", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm"]});
expect(tokens[11]).toEqual({value: ";", scopes: ["source.gfm", "markup.strike.gfm", "constant.character.entity.gfm", "punctuation.definition.entity.gfm"]});
expect(tokens[12]).toEqual({value: "~~", scopes: ["source.gfm", "markup.strike.gfm"]});
});
it("tokenizes HTML comments", function() {
const {tokens} = grammar.tokenizeLine("<!-- a comment -->");
expect(tokens[0]).toEqual({value: "<!--", scopes: ["source.gfm", "comment.block.gfm", "punctuation.definition.comment.gfm"]});
expect(tokens[1]).toEqual({value: " a comment ", scopes: ["source.gfm", "comment.block.gfm"]});
expect(tokens[2]).toEqual({value: "-->", scopes: ["source.gfm", "comment.block.gfm", "punctuation.definition.comment.gfm"]});
});
it("tokenizes YAML front matter", function() {
const [firstLineTokens, secondLineTokens, thirdLineTokens] = Array.from(grammar.tokenizeLines(`\
---
front: matter
---\
`
));
expect(firstLineTokens[0]).toEqual({value: "---", scopes: ["source.gfm", "front-matter.yaml.gfm", "comment.hr.gfm"]});
expect(secondLineTokens[0]).toEqual({value: "front: matter", scopes: ["source.gfm", "front-matter.yaml.gfm"]});
expect(thirdLineTokens[0]).toEqual({value: "---", scopes: ["source.gfm", "front-matter.yaml.gfm", "comment.hr.gfm"]});
});
it("tokenizes linebreaks", function() {
const {tokens} = grammar.tokenizeLine("line ");
expect(tokens[0]).toEqual({value: "line", scopes: ["source.gfm"]});
expect(tokens[1]).toEqual({value: " ", scopes: ["source.gfm", "linebreak.gfm"]});
});
it("tokenizes tables", function() {
let emptyLineTokens, headingTokens;
let [headerTokens, alignTokens, contentTokens] = Array.from(grammar.tokenizeLines(`\
| Column 1 | Column 2 |
|:----------|:---------:|
| Content 1 | Content 2 |\
`
));
// Header line
expect(headerTokens[0]).toEqual({value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.outer"]});
expect(headerTokens[1]).toEqual({value: " Column 1 ", scopes: ["source.gfm", "table.gfm"]});
expect(headerTokens[2]).toEqual({value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.inner"]});
expect(headerTokens[3]).toEqual({value: " Column 2 ", scopes: ["source.gfm", "table.gfm"]});
expect(headerTokens[4]).toEqual({value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.outer"]});
// Alignment line
expect(alignTokens[0]).toEqual({value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.outer"]});
expect(alignTokens[1]).toEqual({value: ":", scopes: ["source.gfm", "table.gfm", "border.alignment"]});
expect(alignTokens[2]).toEqual({value: "----------", scopes: ["source.gfm", "table.gfm", "border.header"]});
expect(alignTokens[3]).toEqual({value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.inner"]});
expect(alignTokens[4]).toEqual({value: ":", scopes: ["source.gfm", "table.gfm", "border.alignment"]});
expect(alignTokens[5]).toEqual({value: "---------", scopes: ["source.gfm", "table.gfm", "border.header"]});
expect(alignTokens[6]).toEqual({value: ":", scopes: ["source.gfm", "table.gfm", "border.alignment"]});
expect(alignTokens[7]).toEqual({value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.outer"]});
// Content line
expect(contentTokens[0]).toEqual({value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.outer"]});
expect(contentTokens[1]).toEqual({value: " Content 1 ", scopes: ["source.gfm", "table.gfm"]});
expect(contentTokens[2]).toEqual({value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.inner"]});
expect(contentTokens[3]).toEqual({value: " Content 2 ", scopes: ["source.gfm", "table.gfm"]});
expect(contentTokens[4]).toEqual({value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.outer"]});
[headerTokens, emptyLineTokens, headingTokens] = Array.from(grammar.tokenizeLines(`\
| Column 1 | Column 2\t
# Heading\
`
));
expect(headerTokens[0]).toEqual({value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.outer"]});
expect(headerTokens[1]).toEqual({value: " Column 1 ", scopes: ["source.gfm", "table.gfm"]});
expect(headerTokens[2]).toEqual({value: "|", scopes: ["source.gfm", "table.gfm", "border.pipe.inner"]});
expect(headerTokens[3]).toEqual({value: " Column 2", scopes: ["source.gfm", "table.gfm"]});
expect(headerTokens[4]).toEqual({value: "\t", scopes: ["source.gfm", "table.gfm"]});
expect(headingTokens[0]).toEqual({value: "#", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "markup.heading.marker.gfm"]});
expect(headingTokens[1]).toEqual({value: " ", scopes: ["source.gfm", "markup.heading.heading-1.gfm", "markup.heading.space.gfm"]});
expect(headingTokens[2]).toEqual({value: "Heading", scopes: ["source.gfm", "markup.heading.heading-1.gfm"]});
});
it("tokenizes criticmarkup", function() {
const [addToken, delToken, hlToken, subToken] = Array.from(grammar.tokenizeLines(`\
Add{++ some text++}
Delete{-- some text--}
Highlight {==some text==}{>>with comment<<}
Replace {~~this~>by that~~}\
`
));
// Addition
expect(addToken[0]).toEqual({value: "Add", scopes: ["source.gfm"]});
expect(addToken[1]).toEqual({value: "{++", scopes: ["source.gfm", "markup.inserted.critic.gfm.addition", "punctuation.definition.inserted.critic.gfm.addition.marker"]});
expect(addToken[2]).toEqual({value: " some text", scopes: ["source.gfm", "markup.inserted.critic.gfm.addition"]});
expect(addToken[3]).toEqual({value: "++}", scopes: ["source.gfm", "markup.inserted.critic.gfm.addition", "punctuation.definition.inserted.critic.gfm.addition.marker"]});
// Deletion
expect(delToken[0]).toEqual({value: "Delete", scopes: ["source.gfm"]});
expect(delToken[1]).toEqual({value: "{--", scopes: ["source.gfm", "markup.deleted.critic.gfm.deletion", "punctuation.definition.deleted.critic.gfm.deletion.marker"]});
expect(delToken[2]).toEqual({value: " some text", scopes: ["source.gfm", "markup.deleted.critic.gfm.deletion"]});
expect(delToken[3]).toEqual({value: "--}", scopes: ["source.gfm", "markup.deleted.critic.gfm.deletion", "punctuation.definition.deleted.critic.gfm.deletion.marker"]});
// Comment and highlight
expect(hlToken[0]).toEqual({value: "Highlight ", scopes: ["source.gfm"]});
expect(hlToken[1]).toEqual({value: "{==", scopes: ["source.gfm", "critic.gfm.highlight", "critic.gfm.highlight.marker"]});
expect(hlToken[2]).toEqual({value: "some text", scopes: ["source.gfm", "critic.gfm.highlight"]});
expect(hlToken[3]).toEqual({value: "==}", scopes: ["source.gfm", "critic.gfm.highlight", "critic.gfm.highlight.marker"]});
expect(hlToken[4]).toEqual({value: "{>>", scopes: ["source.gfm", "critic.gfm.comment", "critic.gfm.comment.marker"]});
expect(hlToken[5]).toEqual({value: "with comment", scopes: ["source.gfm", "critic.gfm.comment"]});
expect(hlToken[6]).toEqual({value: "<<}", scopes: ["source.gfm", "critic.gfm.comment", "critic.gfm.comment.marker"]});
// Replace
expect(subToken[0]).toEqual({value: "Replace ", scopes: ["source.gfm"]});
expect(subToken[1]).toEqual({value: "{~~", scopes: ["source.gfm", "markup.changed.critic.gfm.substitution", "punctuation.definition.changed.critic.gfm.substitution.marker"]});
expect(subToken[2]).toEqual({value: "this", scopes: ["source.gfm", "markup.changed.critic.gfm.substitution"]});
expect(subToken[3]).toEqual({value: "~>", scopes: ["source.gfm", "markup.changed.critic.gfm.substitution", "punctuation.definition.changed.critic.gfm.substitution.operator"]});
expect(subToken[4]).toEqual({value: "by that", scopes: ["source.gfm", "markup.changed.critic.gfm.substitution"]});
expect(subToken[5]).toEqual({value: "~~}", scopes: ["source.gfm", "markup.changed.critic.gfm.substitution", "punctuation.definition.changed.critic.gfm.substitution.marker"]});
});
});

View File

@ -1,196 +0,0 @@
describe "Git grammars", ->
grammar = null
beforeEach ->
waitsForPromise ->
atom.packages.activatePackage("language-git")
describe "Git configs", ->
beforeEach ->
grammar = atom.grammars.grammarForScopeName("source.git-config")
it "parses the Git config grammar", ->
expect(grammar).toBeTruthy()
expect(grammar.scopeName).toBe "source.git-config"
describe "Git commit messages", ->
scopeNormal = ['text.git-commit', 'meta.scope.message.git-commit']
scopeLeadingLowercase =
['text.git-commit', 'meta.scope.message.git-commit', 'invalid.illegal.first-char-should-be-uppercase.git-commit']
scopeTrailingPeriod =
['text.git-commit', 'meta.scope.message.git-commit', 'invalid.illegal.subject-no-trailing-period.git-commit']
scopeLineOver50 = ['text.git-commit', 'meta.scope.message.git-commit', 'invalid.deprecated.line-too-long.git-commit']
scopeLineOver72 = ['text.git-commit', 'meta.scope.message.git-commit', 'invalid.illegal.line-too-long.git-commit']
beforeEach ->
grammar = atom.grammars.grammarForScopeName("text.git-commit")
it "parses the Git commit message grammar", ->
expect(grammar).toBeTruthy()
expect(grammar.scopeName).toBe "text.git-commit"
it "highlights subject lines of less than 50 chars correctly", ->
{tokens} = grammar.tokenizeLine("123456789012345678901234567890", null, true)
expect(tokens[0]).toEqual value: '123456789012345678901234567890', scopes: scopeNormal
{tokens} = grammar.tokenizeLine("a23456789012345678901234567890", null, true)
expect(tokens[0]).toEqual value: 'a', scopes: scopeLeadingLowercase
expect(tokens[1]).toEqual value: '23456789012345678901234567890', scopes: scopeNormal
{tokens} = grammar.tokenizeLine("12345678901234567890123456789.", null, true)
expect(tokens[0]).toEqual value: '12345678901234567890123456789', scopes: scopeNormal
expect(tokens[1]).toEqual value: '.', scopes: scopeTrailingPeriod
{tokens} = grammar.tokenizeLine("b2345678901234567890123456789.", null, true)
expect(tokens[0]).toEqual value: 'b', scopes: scopeLeadingLowercase
expect(tokens[1]).toEqual value: '2345678901234567890123456789', scopes: scopeNormal
expect(tokens[2]).toEqual value: '.', scopes: scopeTrailingPeriod
it "highlights subject lines of 50 chars correctly", ->
{tokens} = grammar.tokenizeLine("12345678901234567890123456789012345678901234567890", null, true)
expect(tokens[0]).toEqual value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal
{tokens} = grammar.tokenizeLine("c2345678901234567890123456789012345678901234567890", null, true)
expect(tokens[0]).toEqual value: 'c', scopes: scopeLeadingLowercase
expect(tokens[1]).toEqual value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal
{tokens} = grammar.tokenizeLine("1234567890123456789012345678901234567890123456789.", null, true)
expect(tokens[0]).toEqual value: '1234567890123456789012345678901234567890123456789', scopes: scopeNormal
expect(tokens[1]).toEqual value: '.', scopes: scopeTrailingPeriod
{tokens} = grammar.tokenizeLine("d234567890123456789012345678901234567890123456789.", null, true)
expect(tokens[0]).toEqual value: 'd', scopes: scopeLeadingLowercase
expect(tokens[1]).toEqual value: '234567890123456789012345678901234567890123456789', scopes: scopeNormal
expect(tokens[2]).toEqual value: '.', scopes: scopeTrailingPeriod
it "highlights subject lines of 51 chars correctly", ->
{tokens} = grammar.tokenizeLine("123456789012345678901234567890123456789012345678901", null, true)
expect(tokens[0]).toEqual value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[1]).toEqual value: '1', scopes: scopeLineOver50
{tokens} = grammar.tokenizeLine("e23456789012345678901234567890123456789012345678901", null, true)
expect(tokens[0]).toEqual value: 'e', scopes: scopeLeadingLowercase
expect(tokens[1]).toEqual value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[2]).toEqual value: '1', scopes: scopeLineOver50
{tokens} = grammar.tokenizeLine("12345678901234567890123456789012345678901234567890.", null, true)
expect(tokens[0]).toEqual value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[1]).toEqual value: '.', scopes: scopeTrailingPeriod
{tokens} = grammar.tokenizeLine("f2345678901234567890123456789012345678901234567890.", null, true)
expect(tokens[0]).toEqual value: 'f', scopes: scopeLeadingLowercase
expect(tokens[1]).toEqual value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[2]).toEqual value: '.', scopes: scopeTrailingPeriod
it "highlights subject lines of 72 chars correctly", ->
{tokens} = grammar.tokenizeLine("123456789012345678901234567890123456789012345678901234567890123456789012", null, true)
expect(tokens[0]).toEqual value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[1]).toEqual value: '123456789012345678901', scopes: scopeLineOver50
expect(tokens[2]).toEqual value: '2', scopes: scopeLineOver50
{tokens} = grammar.tokenizeLine("g23456789012345678901234567890123456789012345678901234567890123456789012", null, true)
expect(tokens[0]).toEqual value: 'g', scopes: scopeLeadingLowercase
expect(tokens[1]).toEqual value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[2]).toEqual value: '123456789012345678901', scopes: scopeLineOver50
expect(tokens[3]).toEqual value: '2', scopes: scopeLineOver50
{tokens} = grammar.tokenizeLine("12345678901234567890123456789012345678901234567890123456789012345678901.", null, true)
expect(tokens[0]).toEqual value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[1]).toEqual value: '123456789012345678901', scopes: scopeLineOver50
expect(tokens[2]).toEqual value: '.', scopes: scopeTrailingPeriod
{tokens} = grammar.tokenizeLine("h2345678901234567890123456789012345678901234567890123456789012345678901.", null, true)
expect(tokens[0]).toEqual value: 'h', scopes: scopeLeadingLowercase
expect(tokens[1]).toEqual value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[2]).toEqual value: '123456789012345678901', scopes: scopeLineOver50
expect(tokens[3]).toEqual value: '.', scopes: scopeTrailingPeriod
it "highlights subject lines of 73 chars correctly", ->
{tokens} = grammar.tokenizeLine("1234567890123456789012345678901234567890123456789012345678901234567890123", null, true)
expect(tokens[0]).toEqual value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[1]).toEqual value: '1234567890123456789012', scopes: scopeLineOver50
expect(tokens[2]).toEqual value: '3', scopes: scopeLineOver72
{tokens} = grammar.tokenizeLine("i234567890123456789012345678901234567890123456789012345678901234567890123", null, true)
expect(tokens[0]).toEqual value: 'i', scopes: scopeLeadingLowercase
expect(tokens[1]).toEqual value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[2]).toEqual value: '1234567890123456789012', scopes: scopeLineOver50
expect(tokens[3]).toEqual value: '3', scopes: scopeLineOver72
{tokens} = grammar.tokenizeLine("123456789012345678901234567890123456789012345678901234567890123456789012.", null, true)
expect(tokens[0]).toEqual value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[1]).toEqual value: '1234567890123456789012', scopes: scopeLineOver50
expect(tokens[2]).toEqual value: '.', scopes: scopeTrailingPeriod
{tokens} = grammar.tokenizeLine("j23456789012345678901234567890123456789012345678901234567890123456789012.", null, true)
expect(tokens[0]).toEqual value: 'j', scopes: scopeLeadingLowercase
expect(tokens[1]).toEqual value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[2]).toEqual value: '1234567890123456789012', scopes: scopeLineOver50
expect(tokens[3]).toEqual value: '.', scopes: scopeTrailingPeriod
it "highlights subject lines of over 73 chars correctly", ->
{tokens} = grammar.tokenizeLine("123456789012345678901234567890123456789012345678901234567890123456789012345678", null, true)
expect(tokens[0]).toEqual value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[1]).toEqual value: '1234567890123456789012', scopes: scopeLineOver50
expect(tokens[2]).toEqual value: '345678', scopes: scopeLineOver72
{tokens} = grammar.tokenizeLine("k23456789012345678901234567890123456789012345678901234567890123456789012345678", null, true)
expect(tokens[0]).toEqual value: 'k', scopes: scopeLeadingLowercase
expect(tokens[1]).toEqual value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[2]).toEqual value: '1234567890123456789012', scopes: scopeLineOver50
expect(tokens[3]).toEqual value: '345678', scopes: scopeLineOver72
{tokens} = grammar.tokenizeLine("123456789012345678901234567890123456789012345678901234567890123456789012345678.", null, true)
expect(tokens[0]).toEqual value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[1]).toEqual value: '1234567890123456789012', scopes: scopeLineOver50
expect(tokens[2]).toEqual value: '345678', scopes: scopeLineOver72
expect(tokens[3]).toEqual value: '.', scopes: scopeTrailingPeriod
{tokens} = grammar.tokenizeLine("m23456789012345678901234567890123456789012345678901234567890123456789012345678.", null, true)
expect(tokens[0]).toEqual value: 'm', scopes: scopeLeadingLowercase
expect(tokens[1]).toEqual value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal
expect(tokens[2]).toEqual value: '1234567890123456789012', scopes: scopeLineOver50
expect(tokens[3]).toEqual value: '345678', scopes: scopeLineOver72
expect(tokens[4]).toEqual value: '.', scopes: scopeTrailingPeriod
describe "Git rebases", ->
beforeEach ->
grammar = atom.grammars.grammarForScopeName("text.git-rebase")
it "parses the Git rebase message grammar", ->
expect(grammar).toBeTruthy()
expect(grammar.scopeName).toBe "text.git-rebase"
for cmd in ["pick", "p", "reword", "r", "edit", "e", "squash", "s", "fixup", "f", "drop", "d"]
it "parses the #{cmd} command", ->
{tokens} = grammar.tokenizeLine "#{cmd} c0ffeee This is commit message"
expect(tokens[0]).toEqual value: cmd, scopes: ["text.git-rebase", "meta.commit-command.git-rebase", "support.function.git-rebase"]
expect(tokens[1]).toEqual value: " ", scopes: ["text.git-rebase", "meta.commit-command.git-rebase"]
expect(tokens[2]).toEqual value: "c0ffeee", scopes: ["text.git-rebase", "meta.commit-command.git-rebase", "constant.sha.git-rebase"]
expect(tokens[3]).toEqual value: " ", scopes: ["text.git-rebase", "meta.commit-command.git-rebase"]
expect(tokens[4]).toEqual value: "This is commit message", scopes: ["text.git-rebase", "meta.commit-command.git-rebase", "meta.commit-message.git-rebase"]
it "parses the exec command", ->
{tokens} = grammar.tokenizeLine "exec"
expect(tokens[0]).toEqual value: "exec", scopes: ["text.git-rebase", "meta.exec-command.git-rebase", "support.function.git-rebase"]
{tokens} = grammar.tokenizeLine "x"
expect(tokens[0]).toEqual value: "x", scopes: ["text.git-rebase", "meta.exec-command.git-rebase", "support.function.git-rebase"]
it "includes language-shellscript highlighting when using the exec command", ->
waitsForPromise ->
atom.packages.activatePackage("language-shellscript")
runs ->
{tokens} = grammar.tokenizeLine "exec echo 'Hello World'"
expect(tokens[0]).toEqual value: "exec", scopes: ["text.git-rebase", "meta.exec-command.git-rebase", "support.function.git-rebase"]
expect(tokens[1]).toEqual value: " ", scopes: ["text.git-rebase", "meta.exec-command.git-rebase"]
expect(tokens[2]).toEqual value: "echo", scopes: ["text.git-rebase", "meta.exec-command.git-rebase", "support.function.builtin.shell"]

View File

@ -0,0 +1,208 @@
describe("Git grammars", function() {
let grammar = null;
beforeEach(() => waitsForPromise(() => atom.packages.activatePackage("language-git")));
describe("Git configs", function() {
beforeEach(() => grammar = atom.grammars.grammarForScopeName("source.git-config"));
it("parses the Git config grammar", function() {
expect(grammar).toBeTruthy();
expect(grammar.scopeName).toBe("source.git-config");
});
});
describe("Git commit messages", function() {
const scopeNormal = ['text.git-commit', 'meta.scope.message.git-commit'];
const scopeLeadingLowercase =
['text.git-commit', 'meta.scope.message.git-commit', 'invalid.illegal.first-char-should-be-uppercase.git-commit'];
const scopeTrailingPeriod =
['text.git-commit', 'meta.scope.message.git-commit', 'invalid.illegal.subject-no-trailing-period.git-commit'];
const scopeLineOver50 = ['text.git-commit', 'meta.scope.message.git-commit', 'invalid.deprecated.line-too-long.git-commit'];
const scopeLineOver72 = ['text.git-commit', 'meta.scope.message.git-commit', 'invalid.illegal.line-too-long.git-commit'];
beforeEach(() => grammar = atom.grammars.grammarForScopeName("text.git-commit"));
it("parses the Git commit message grammar", function() {
expect(grammar).toBeTruthy();
expect(grammar.scopeName).toBe("text.git-commit");
});
it("highlights subject lines of less than 50 chars correctly", function() {
let {tokens} = grammar.tokenizeLine("123456789012345678901234567890", null, true);
expect(tokens[0]).toEqual({value: '123456789012345678901234567890', scopes: scopeNormal});
({tokens} = grammar.tokenizeLine("a23456789012345678901234567890", null, true));
expect(tokens[0]).toEqual({value: 'a', scopes: scopeLeadingLowercase});
expect(tokens[1]).toEqual({value: '23456789012345678901234567890', scopes: scopeNormal});
({tokens} = grammar.tokenizeLine("12345678901234567890123456789.", null, true));
expect(tokens[0]).toEqual({value: '12345678901234567890123456789', scopes: scopeNormal});
expect(tokens[1]).toEqual({value: '.', scopes: scopeTrailingPeriod});
({tokens} = grammar.tokenizeLine("b2345678901234567890123456789.", null, true));
expect(tokens[0]).toEqual({value: 'b', scopes: scopeLeadingLowercase});
expect(tokens[1]).toEqual({value: '2345678901234567890123456789', scopes: scopeNormal});
expect(tokens[2]).toEqual({value: '.', scopes: scopeTrailingPeriod});
});
it("highlights subject lines of 50 chars correctly", function() {
let {tokens} = grammar.tokenizeLine("12345678901234567890123456789012345678901234567890", null, true);
expect(tokens[0]).toEqual({value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal});
({tokens} = grammar.tokenizeLine("c2345678901234567890123456789012345678901234567890", null, true));
expect(tokens[0]).toEqual({value: 'c', scopes: scopeLeadingLowercase});
expect(tokens[1]).toEqual({value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal});
({tokens} = grammar.tokenizeLine("1234567890123456789012345678901234567890123456789.", null, true));
expect(tokens[0]).toEqual({value: '1234567890123456789012345678901234567890123456789', scopes: scopeNormal});
expect(tokens[1]).toEqual({value: '.', scopes: scopeTrailingPeriod});
({tokens} = grammar.tokenizeLine("d234567890123456789012345678901234567890123456789.", null, true));
expect(tokens[0]).toEqual({value: 'd', scopes: scopeLeadingLowercase});
expect(tokens[1]).toEqual({value: '234567890123456789012345678901234567890123456789', scopes: scopeNormal});
expect(tokens[2]).toEqual({value: '.', scopes: scopeTrailingPeriod});
});
it("highlights subject lines of 51 chars correctly", function() {
let {tokens} = grammar.tokenizeLine("123456789012345678901234567890123456789012345678901", null, true);
expect(tokens[0]).toEqual({value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[1]).toEqual({value: '1', scopes: scopeLineOver50});
({tokens} = grammar.tokenizeLine("e23456789012345678901234567890123456789012345678901", null, true));
expect(tokens[0]).toEqual({value: 'e', scopes: scopeLeadingLowercase});
expect(tokens[1]).toEqual({value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[2]).toEqual({value: '1', scopes: scopeLineOver50});
({tokens} = grammar.tokenizeLine("12345678901234567890123456789012345678901234567890.", null, true));
expect(tokens[0]).toEqual({value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[1]).toEqual({value: '.', scopes: scopeTrailingPeriod});
({tokens} = grammar.tokenizeLine("f2345678901234567890123456789012345678901234567890.", null, true));
expect(tokens[0]).toEqual({value: 'f', scopes: scopeLeadingLowercase});
expect(tokens[1]).toEqual({value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[2]).toEqual({value: '.', scopes: scopeTrailingPeriod});
});
it("highlights subject lines of 72 chars correctly", function() {
let {tokens} = grammar.tokenizeLine("123456789012345678901234567890123456789012345678901234567890123456789012", null, true);
expect(tokens[0]).toEqual({value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[1]).toEqual({value: '123456789012345678901', scopes: scopeLineOver50});
expect(tokens[2]).toEqual({value: '2', scopes: scopeLineOver50});
({tokens} = grammar.tokenizeLine("g23456789012345678901234567890123456789012345678901234567890123456789012", null, true));
expect(tokens[0]).toEqual({value: 'g', scopes: scopeLeadingLowercase});
expect(tokens[1]).toEqual({value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[2]).toEqual({value: '123456789012345678901', scopes: scopeLineOver50});
expect(tokens[3]).toEqual({value: '2', scopes: scopeLineOver50});
({tokens} = grammar.tokenizeLine("12345678901234567890123456789012345678901234567890123456789012345678901.", null, true));
expect(tokens[0]).toEqual({value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[1]).toEqual({value: '123456789012345678901', scopes: scopeLineOver50});
expect(tokens[2]).toEqual({value: '.', scopes: scopeTrailingPeriod});
({tokens} = grammar.tokenizeLine("h2345678901234567890123456789012345678901234567890123456789012345678901.", null, true));
expect(tokens[0]).toEqual({value: 'h', scopes: scopeLeadingLowercase});
expect(tokens[1]).toEqual({value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[2]).toEqual({value: '123456789012345678901', scopes: scopeLineOver50});
expect(tokens[3]).toEqual({value: '.', scopes: scopeTrailingPeriod});
});
it("highlights subject lines of 73 chars correctly", function() {
let {tokens} = grammar.tokenizeLine("1234567890123456789012345678901234567890123456789012345678901234567890123", null, true);
expect(tokens[0]).toEqual({value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[1]).toEqual({value: '1234567890123456789012', scopes: scopeLineOver50});
expect(tokens[2]).toEqual({value: '3', scopes: scopeLineOver72});
({tokens} = grammar.tokenizeLine("i234567890123456789012345678901234567890123456789012345678901234567890123", null, true));
expect(tokens[0]).toEqual({value: 'i', scopes: scopeLeadingLowercase});
expect(tokens[1]).toEqual({value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[2]).toEqual({value: '1234567890123456789012', scopes: scopeLineOver50});
expect(tokens[3]).toEqual({value: '3', scopes: scopeLineOver72});
({tokens} = grammar.tokenizeLine("123456789012345678901234567890123456789012345678901234567890123456789012.", null, true));
expect(tokens[0]).toEqual({value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[1]).toEqual({value: '1234567890123456789012', scopes: scopeLineOver50});
expect(tokens[2]).toEqual({value: '.', scopes: scopeTrailingPeriod});
({tokens} = grammar.tokenizeLine("j23456789012345678901234567890123456789012345678901234567890123456789012.", null, true));
expect(tokens[0]).toEqual({value: 'j', scopes: scopeLeadingLowercase});
expect(tokens[1]).toEqual({value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[2]).toEqual({value: '1234567890123456789012', scopes: scopeLineOver50});
expect(tokens[3]).toEqual({value: '.', scopes: scopeTrailingPeriod});
});
it("highlights subject lines of over 73 chars correctly", function() {
let {tokens} = grammar.tokenizeLine("123456789012345678901234567890123456789012345678901234567890123456789012345678", null, true);
expect(tokens[0]).toEqual({value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[1]).toEqual({value: '1234567890123456789012', scopes: scopeLineOver50});
expect(tokens[2]).toEqual({value: '345678', scopes: scopeLineOver72});
({tokens} = grammar.tokenizeLine("k23456789012345678901234567890123456789012345678901234567890123456789012345678", null, true));
expect(tokens[0]).toEqual({value: 'k', scopes: scopeLeadingLowercase});
expect(tokens[1]).toEqual({value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[2]).toEqual({value: '1234567890123456789012', scopes: scopeLineOver50});
expect(tokens[3]).toEqual({value: '345678', scopes: scopeLineOver72});
({tokens} = grammar.tokenizeLine("123456789012345678901234567890123456789012345678901234567890123456789012345678.", null, true));
expect(tokens[0]).toEqual({value: '12345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[1]).toEqual({value: '1234567890123456789012', scopes: scopeLineOver50});
expect(tokens[2]).toEqual({value: '345678', scopes: scopeLineOver72});
expect(tokens[3]).toEqual({value: '.', scopes: scopeTrailingPeriod});
({tokens} = grammar.tokenizeLine("m23456789012345678901234567890123456789012345678901234567890123456789012345678.", null, true));
expect(tokens[0]).toEqual({value: 'm', scopes: scopeLeadingLowercase});
expect(tokens[1]).toEqual({value: '2345678901234567890123456789012345678901234567890', scopes: scopeNormal});
expect(tokens[2]).toEqual({value: '1234567890123456789012', scopes: scopeLineOver50});
expect(tokens[3]).toEqual({value: '345678', scopes: scopeLineOver72});
expect(tokens[4]).toEqual({value: '.', scopes: scopeTrailingPeriod});
});
});
describe("Git rebases", function() {
beforeEach(() => grammar = atom.grammars.grammarForScopeName("text.git-rebase"));
it("parses the Git rebase message grammar", function() {
expect(grammar).toBeTruthy();
expect(grammar.scopeName).toBe("text.git-rebase");
});
for (var cmd of ["pick", "p", "reword", "r", "edit", "e", "squash", "s", "fixup", "f", "drop", "d"]) {
it(`parses the ${cmd} command`, function() {
const {tokens} = grammar.tokenizeLine(`${cmd} c0ffeee This is commit message`);
expect(tokens[0]).toEqual({value: cmd, scopes: ["text.git-rebase", "meta.commit-command.git-rebase", "support.function.git-rebase"]});
expect(tokens[1]).toEqual({value: " ", scopes: ["text.git-rebase", "meta.commit-command.git-rebase"]});
expect(tokens[2]).toEqual({value: "c0ffeee", scopes: ["text.git-rebase", "meta.commit-command.git-rebase", "constant.sha.git-rebase"]});
expect(tokens[3]).toEqual({value: " ", scopes: ["text.git-rebase", "meta.commit-command.git-rebase"]});
expect(tokens[4]).toEqual({value: "This is commit message", scopes: ["text.git-rebase", "meta.commit-command.git-rebase", "meta.commit-message.git-rebase"]});
});
}
it("parses the exec command", function() {
let {tokens} = grammar.tokenizeLine("exec");
expect(tokens[0]).toEqual({value: "exec", scopes: ["text.git-rebase", "meta.exec-command.git-rebase", "support.function.git-rebase"]});
({tokens} = grammar.tokenizeLine("x"));
expect(tokens[0]).toEqual({value: "x", scopes: ["text.git-rebase", "meta.exec-command.git-rebase", "support.function.git-rebase"]});
});
it("includes language-shellscript highlighting when using the exec command", function() {
waitsForPromise(() => atom.packages.activatePackage("language-shellscript"));
return runs(function() {
const {tokens} = grammar.tokenizeLine("exec echo 'Hello World'");
expect(tokens[0]).toEqual({value: "exec", scopes: ["text.git-rebase", "meta.exec-command.git-rebase", "support.function.git-rebase"]});
expect(tokens[1]).toEqual({value: " ", scopes: ["text.git-rebase", "meta.exec-command.git-rebase"]});
expect(tokens[2]).toEqual({value: "echo", scopes: ["text.git-rebase", "meta.exec-command.git-rebase", "support.function.builtin.shell"]});});
});
});
});

View File

@ -1,977 +0,0 @@
describe 'Go grammar', ->
grammar = null
beforeEach ->
atom.config.set 'core.useTreeSitterParsers', false
waitsForPromise ->
atom.packages.activatePackage('language-go')
runs ->
grammar = atom.grammars.grammarForScopeName('source.go')
it 'parses the grammar', ->
expect(grammar).toBeTruthy()
expect(grammar.scopeName).toBe 'source.go'
it 'tokenizes comments', ->
{tokens} = grammar.tokenizeLine('// I am a comment')
expect(tokens[0].value).toEqual '//'
expect(tokens[0].scopes).toEqual ['source.go', 'comment.line.double-slash.go', 'punctuation.definition.comment.go']
expect(tokens[1].value).toEqual ' I am a comment'
expect(tokens[1].scopes).toEqual ['source.go', 'comment.line.double-slash.go']
tokens = grammar.tokenizeLines('/*\nI am a comment\n*/')
expect(tokens[0][0].value).toEqual '/*'
expect(tokens[0][0].scopes).toEqual ['source.go', 'comment.block.go', 'punctuation.definition.comment.go']
expect(tokens[1][0].value).toEqual 'I am a comment'
expect(tokens[1][0].scopes).toEqual ['source.go', 'comment.block.go']
expect(tokens[2][0].value).toEqual '*/'
expect(tokens[2][0].scopes).toEqual ['source.go', 'comment.block.go', 'punctuation.definition.comment.go']
it 'tokenizes comments in imports', ->
lines = grammar.tokenizeLines '''
import (
//"fmt"
"os" // comment
// comment!
)
'''
expect(lines[1][1]).toEqual value: '//', scopes: ['source.go', 'comment.line.double-slash.go', 'punctuation.definition.comment.go']
expect(lines[2][5]).toEqual value: '//', scopes: ['source.go', 'comment.line.double-slash.go', 'punctuation.definition.comment.go']
expect(lines[3][1]).toEqual value: '//', scopes: ['source.go', 'comment.line.double-slash.go', 'punctuation.definition.comment.go']
it 'tokenizes strings', ->
delims =
'string.quoted.double.go': '"'
'string.quoted.raw.go': '`'
for scope, delim of delims
{tokens} = grammar.tokenizeLine(delim + 'I am a string' + delim)
expect(tokens[0].value).toEqual delim
expect(tokens[0].scopes).toEqual ['source.go', scope, 'punctuation.definition.string.begin.go']
expect(tokens[1].value).toEqual 'I am a string'
expect(tokens[1].scopes).toEqual ['source.go', scope]
expect(tokens[2].value).toEqual delim
expect(tokens[2].scopes).toEqual ['source.go', scope, 'punctuation.definition.string.end.go']
it 'tokenizes placeholders in strings', ->
# Taken from go/src/pkg/fmt/fmt_test.go
verbs = [
'%# x', '%-5s', '%5s', '%05s', '%.5s', '%10.1q', '%10v', '%-10v', '%.0d'
'%.d', '%+07.2f', '%0100d', '%0.100f', '%#064x', '%+.3F', '%-#20.8x',
'%[1]d', '%[2]*[1]d', '%[3]*.[2]*[1]f', '%[3]*.[2]f', '%3.[2]d', '%.[2]d'
'%-+[1]x', '%d', '%-d', '%+d', '%#d', '% d', '%0d', '%1.2d', '%-1.2d'
'%+1.2d', '%-+1.2d', '%*d', '%.*d', '%*.*d', '%0*d', '%-*d'
]
for verb in verbs
{tokens} = grammar.tokenizeLine('"' + verb + '"')
expect(tokens[0].value).toEqual '"',
expect(tokens[0].scopes).toEqual ['source.go', 'string.quoted.double.go', 'punctuation.definition.string.begin.go']
expect(tokens[1].value).toEqual verb
expect(tokens[1].scopes).toEqual ['source.go', 'string.quoted.double.go', 'constant.other.placeholder.go']
expect(tokens[2].value).toEqual '"',
expect(tokens[2].scopes).toEqual ['source.go', 'string.quoted.double.go', 'punctuation.definition.string.end.go']
it 'tokenizes character escapes in strings', ->
escapes = [
'\\a', '\\b', '\\f', '\\n', '\\r', '\\t', '\\v', '\\\\'
'\\000', '\\007', '\\377', '\\x07', '\\xff', '\\u12e4', '\\U00101234'
]
for escape in escapes
{tokens} = grammar.tokenizeLine('"' + escape + '"')
expect(tokens[1].value).toEqual escape
expect(tokens[1].scopes).toEqual ['source.go', 'string.quoted.double.go', 'constant.character.escape.go']
{tokens} = grammar.tokenizeLine('"\\""')
expect(tokens[1].value).toEqual '\\"'
expect(tokens[1].scopes).toEqual ['source.go', 'string.quoted.double.go', 'constant.character.escape.go']
it 'tokenizes placeholders in raw strings', ->
# Taken from go/src/pkg/fmt/fmt_test.go
verbs = [
'%# x', '%-5s', '%5s', '%05s', '%.5s', '%10.1q', '%10v', '%-10v', '%.0d'
'%.d', '%+07.2f', '%0100d', '%0.100f', '%#064x', '%+.3F', '%-#20.8x',
'%[1]d', '%[2]*[1]d', '%[3]*.[2]*[1]f', '%[3]*.[2]f', '%3.[2]d', '%.[2]d'
'%-+[1]x', '%d', '%-d', '%+d', '%#d', '% d', '%0d', '%1.2d', '%-1.2d'
'%+1.2d', '%-+1.2d', '%*d', '%.*d', '%*.*d', '%0*d', '%-*d'
]
for verb in verbs
{tokens} = grammar.tokenizeLine('`' + verb + '`')
expect(tokens[0].value).toEqual '`',
expect(tokens[0].scopes).toEqual ['source.go', 'string.quoted.raw.go', 'punctuation.definition.string.begin.go']
expect(tokens[1].value).toEqual verb
expect(tokens[1].scopes).toEqual ['source.go', 'string.quoted.raw.go', 'constant.other.placeholder.go']
expect(tokens[2].value).toEqual '`',
expect(tokens[2].scopes).toEqual ['source.go', 'string.quoted.raw.go', 'punctuation.definition.string.end.go']
it 'tokenizes runes', ->
runes = [
'u', 'X', '$', ':', '(', '.', '2', '=', '!', '@',
'\\a', '\\b', '\\f', '\\n', '\\r', '\\t', '\\v', '\\\\', "\\'", '\\"',
'\\000', '\\007', '\\377', '\\x07', '\\xff', '\\u12e4', '\\U00101234'
]
for rune in runes
{tokens} = grammar.tokenizeLine("'#{rune}'")
expect(tokens[0]).toEqual value: "'", scopes: ['source.go', 'string.quoted.rune.go', 'punctuation.definition.string.begin.go']
expect(tokens[1]).toEqual value: rune, scopes: ['source.go', 'string.quoted.rune.go', 'constant.other.rune.go']
expect(tokens[2]).toEqual value: "'", scopes: ['source.go', 'string.quoted.rune.go', 'punctuation.definition.string.end.go']
it 'tokenizes invalid runes and single quoted strings', ->
{tokens} = grammar.tokenizeLine("'\\c'")
expect(tokens[0]).toEqual value: "'", scopes: ['source.go', 'string.quoted.rune.go', 'punctuation.definition.string.begin.go']
expect(tokens[1]).toEqual value: '\\c', scopes: ['source.go', 'string.quoted.rune.go', 'invalid.illegal.unknown-rune.go']
expect(tokens[2]).toEqual value: "'", scopes: ['source.go', 'string.quoted.rune.go', 'punctuation.definition.string.end.go']
{tokens} = grammar.tokenizeLine("'ab'")
expect(tokens[0]).toEqual value: "'", scopes: ['source.go', 'string.quoted.rune.go', 'punctuation.definition.string.begin.go']
expect(tokens[1]).toEqual value: 'ab', scopes: ['source.go', 'string.quoted.rune.go', 'invalid.illegal.unknown-rune.go']
expect(tokens[2]).toEqual value: "'", scopes: ['source.go', 'string.quoted.rune.go', 'punctuation.definition.string.end.go']
{tokens} = grammar.tokenizeLine("'some single quote string'")
expect(tokens[0]).toEqual value: "'", scopes: ['source.go', 'string.quoted.rune.go', 'punctuation.definition.string.begin.go']
expect(tokens[1]).toEqual value: 'some single quote string', scopes: ['source.go', 'string.quoted.rune.go', 'invalid.illegal.unknown-rune.go']
expect(tokens[2]).toEqual value: "'", scopes: ['source.go', 'string.quoted.rune.go', 'punctuation.definition.string.end.go']
it 'tokenizes invalid whitespace around chan annotations', ->
invalid_send =
'chan <- sendonly': ' '
invalid_receive =
'<- chan recvonly': ' '
for expr, invalid of invalid_send
{tokens} = grammar.tokenizeLine(expr)
expect(tokens[1].value).toEqual invalid
expect(tokens[1].scopes).toEqual ['source.go', 'invalid.illegal.send-channel.go']
for expr, invalid of invalid_receive
{tokens} = grammar.tokenizeLine(expr)
expect(tokens[1].value).toEqual invalid
expect(tokens[1].scopes).toEqual ['source.go', 'invalid.illegal.receive-channel.go']
it 'tokenizes keywords', ->
keywordLists =
'keyword.control.go': ['break', 'case', 'continue', 'default', 'defer', 'else', 'fallthrough', 'for', 'go', 'goto', 'if', 'range', 'return', 'select', 'switch']
'keyword.channel.go': ['chan']
'keyword.const.go': ['const']
'keyword.function.go': ['func']
'keyword.interface.go': ['interface']
'keyword.import.go': ['import']
'keyword.map.go': ['map']
'keyword.package.go': ['package']
'keyword.struct.go': ['struct']
'keyword.type.go': ['type']
'keyword.var.go': ['var']
for scope, list of keywordLists
for keyword in list
{tokens} = grammar.tokenizeLine keyword
expect(tokens[0].value).toEqual keyword
expect(tokens[0].scopes).toEqual ['source.go', scope]
it 'tokenizes storage types', ->
storageTypes =
'storage.type.boolean.go': ['bool']
'storage.type.byte.go': ['byte']
'storage.type.error.go': ['error']
'storage.type.numeric.go': ['int', 'int8', 'int16', 'int32', 'int64', 'uint', 'uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64', 'complex64', 'complex128']
'storage.type.rune.go': ['rune']
'storage.type.string.go': ['string']
'storage.type.uintptr.go': ['uintptr']
for scope, types of storageTypes
for type in types
{tokens} = grammar.tokenizeLine type
expect(tokens[0].value).toEqual type
expect(tokens[0].scopes).toEqual ['source.go', scope]
it 'tokenizes func regardless of the context', ->
funcKeyword = ['func f()', 'func (x) f()', 'func(x) f()', 'func']
for line in funcKeyword
{tokens} = grammar.tokenizeLine line
expect(tokens[0].value).toEqual 'func'
expect(tokens[0].scopes).toEqual ['source.go', 'keyword.function.go']
funcType = [
{
'line': 'var f1 func('
'tokenPos': 4
}
{
'line': 'f2 :=func()'
'tokenPos': 3
}
{
'line': '\tfunc('
'tokenPos': 1
}
{
'line': 'type HandlerFunc func('
'tokenPos': 4
}
]
for t in funcType
{tokens} = grammar.tokenizeLine t.line
relevantToken = tokens[t.tokenPos]
expect(relevantToken.value).toEqual 'func'
expect(relevantToken.scopes).toEqual ['source.go', 'keyword.function.go']
next = tokens[t.tokenPos + 1]
expect(next.value).toEqual '('
expect(next.scopes).toEqual ['source.go', 'punctuation.definition.begin.bracket.round.go']
it 'only tokenizes func when it is an exact match', ->
tests = ['myfunc', 'funcMap']
for test in tests
{tokens} = grammar.tokenizeLine test
expect(tokens[0].value).not.toEqual 'func'
expect(tokens[0].scopes).not.toEqual ['source.go', 'keyword.function.go']
it 'tokenizes func names in their declarations', ->
tests = [
{
'line': 'func f()'
'tokenPos': 2
}
{
'line': 'func (T) f()'
'tokenPos': 6
}
{
'line': 'func (t T) f()'
'tokenPos': 6
}
{
'line': 'func (t *T) f()'
'tokenPos': 8
}
]
for t in tests
{tokens} = grammar.tokenizeLine t.line
expect(tokens[0].value).toEqual 'func'
expect(tokens[0].scopes).toEqual ['source.go', 'keyword.function.go']
relevantToken = tokens[t.tokenPos]
expect(relevantToken).toBeDefined()
expect(relevantToken.value).toEqual 'f'
expect(relevantToken.scopes).toEqual ['source.go', 'entity.name.function.go']
next = tokens[t.tokenPos + 1]
expect(next.value).toEqual '('
expect(next.scopes).toEqual ['source.go', 'punctuation.definition.begin.bracket.round.go']
it 'tokenizes operators method declarations', ->
tests = [
{
'line': 'func (t *T) f()'
'tokenPos': 4
}
]
for t in tests
{tokens} = grammar.tokenizeLine t.line
expect(tokens[0].value).toEqual 'func'
expect(tokens[0].scopes).toEqual ['source.go', 'keyword.function.go']
relevantToken = tokens[t.tokenPos]
expect(relevantToken.value).toEqual '*'
expect(relevantToken.scopes).toEqual ['source.go', 'keyword.operator.address.go']
it 'tokenizes numerics', ->
numbers =
'constant.numeric.integer.go': ['42', '0600', '0xBadFace', '170141183460469231731687303715884105727', '1E6', '0i', '011i', '1E6i']
'constant.numeric.floating-point.go': [
'0.', '72.40', '072.40', '2.71828', '1.e+0', '6.67428e-11', '.25', '.12345E+5',
'0.i', '2.71828i', '1.e+0i', '6.67428e-11i', '.25i', '.12345E+5i'
]
for scope, nums of numbers
for num in nums
{tokens} = grammar.tokenizeLine num
expect(tokens[0].value).toEqual num
expect(tokens[0].scopes).toEqual ['source.go', scope]
invalidOctals = ['08', '039', '0995']
for num in invalidOctals
{tokens} = grammar.tokenizeLine num
expect(tokens[0].value).toEqual num
expect(tokens[0].scopes).toEqual ['source.go', 'invalid.illegal.numeric.go']
it 'tokenizes language constants', ->
constants = ['true', 'false', 'nil', 'iota']
for constant in constants
{tokens} = grammar.tokenizeLine constant
expect(tokens[0].value).toEqual constant
expect(tokens[0].scopes).toEqual ['source.go', 'constant.language.go']
it 'tokenizes built-in functions', ->
funcs = [
'append(x)', 'cap(x)', 'close(x)', 'complex(x)', 'copy(x)', 'delete(x)', 'imag(x)', 'len(x)', 'make(x)', 'new(x)',
'panic(x)', 'print(x)', 'println(x)', 'real(x)', 'recover(x)'
]
funcVals = ['append', 'cap', 'close', 'complex', 'copy', 'delete', 'imag', 'len', 'make', 'new', 'panic', 'print', 'println', 'real', 'recover']
for func in funcs
funcVal = funcVals[funcs.indexOf(func)]
{tokens} = grammar.tokenizeLine func
expect(tokens[0].value).toEqual funcVal
expect(tokens[0].scopes).toEqual ['source.go', 'support.function.builtin.go']
it 'tokenizes operators', ->
binaryOpers =
'keyword.operator.arithmetic.go': ['+', '-', '*', '/', '%']
'keyword.operator.arithmetic.bitwise.go': ['&', '|', '^', '&^', '<<', '>>']
'keyword.operator.assignment.go': ['=', '+=', '-=', '|=', '^=', '*=', '/=', ':=', '%=', '<<=', '>>=', '&=', '&^=']
'keyword.operator.channel.go': ['<-']
'keyword.operator.comparison.go': ['==', '!=', '<', '<=', '>', '>=']
'keyword.operator.decrement.go': ['--']
'keyword.operator.ellipsis.go': ['...']
'keyword.operator.increment.go': ['++']
'keyword.operator.logical.go': ['&&', '||']
unaryOpers =
'keyword.operator.address.go': ['*var', '&var']
'keyword.operator.arithmetic.go': ['+var', '-var']
'keyword.operator.arithmetic.bitwise.go': ['^var']
'keyword.operator.logical.go': ['!var']
for scope, ops of binaryOpers
for op in ops
{tokens} = grammar.tokenizeLine op
expect(tokens[0].value).toEqual op
expect(tokens[0].scopes).toEqual ['source.go', scope]
for scope, ops of unaryOpers
for op in ops
{tokens} = grammar.tokenizeLine op
expect(tokens[0].value).toEqual op[0]
expect(tokens[0].scopes).toEqual ['source.go', scope]
it 'does not treat values/variables attached to comparion operators as extensions of the operator', ->
{tokens} = grammar.tokenizeLine '2<3.0 && 12>bar'
expect(tokens[0]).toEqual value: '2', scopes: ['source.go', 'constant.numeric.integer.go']
expect(tokens[1]).toEqual value: '<', scopes: ['source.go', 'keyword.operator.comparison.go']
expect(tokens[2]).toEqual value: '3.0', scopes: ['source.go', 'constant.numeric.floating-point.go']
expect(tokens[6]).toEqual value: '12', scopes: ['source.go', 'constant.numeric.integer.go']
expect(tokens[7]).toEqual value: '>', scopes: ['source.go', 'keyword.operator.comparison.go']
expect(tokens[8]).toEqual value: 'bar', scopes: ['source.go']
it 'tokenizes punctuation brackets', ->
{tokens} = grammar.tokenizeLine '{([])}'
expect(tokens[0]).toEqual value: '{', scopes: ['source.go', 'punctuation.definition.begin.bracket.curly.go']
expect(tokens[1]).toEqual value: '(', scopes: ['source.go', 'punctuation.definition.begin.bracket.round.go']
expect(tokens[2]).toEqual value: '[', scopes: ['source.go', 'punctuation.definition.bracket.square.go']
expect(tokens[3]).toEqual value: ']', scopes: ['source.go', 'punctuation.definition.bracket.square.go']
expect(tokens[4]).toEqual value: ')', scopes: ['source.go', 'punctuation.definition.end.bracket.round.go']
expect(tokens[5]).toEqual value: '}', scopes: ['source.go', 'punctuation.definition.end.bracket.curly.go']
it 'tokenizes punctuation delimiters', ->
delims =
'punctuation.other.comma.go': ','
'punctuation.other.period.go': '.'
'punctuation.other.colon.go': ':'
for scope, delim of delims
{tokens} = grammar.tokenizeLine delim
expect(tokens[0].value).toEqual delim
expect(tokens[0].scopes).toEqual ['source.go', scope]
it 'tokenizes func names in calls to them', ->
tests = [
{
'line': 'a.b()'
'name': 'b'
'tokenPos': 2
'isFunc': true
}
{
'line': 'pkg.Func1('
'name': 'Func1'
'tokenPos': 2
'isFunc': true
}
{
'line': 'pkg.Func1().Func2('
'name': 'Func2'
'tokenPos': 6
'isFunc': true
}
{
'line': 'pkg.var'
'name': 'var'
'tokenPos': 2
'isFunc': false
}
{
'line': 'doWork(ch)'
'name': 'doWork'
'tokenPos': 0
'isFunc': true
}
{
'line': 'f1()'
'name': 'f1'
'tokenPos': 0
'isFunc': true
}
]
want = ['source.go', 'support.function.go']
for t in tests
{tokens} = grammar.tokenizeLine t.line
relevantToken = tokens[t.tokenPos]
if t.isFunc
expect(relevantToken).not.toBeNull()
expect(relevantToken.value).toEqual t.name
expect(relevantToken.scopes).toEqual want
next = tokens[t.tokenPos + 1]
expect(next.value).toEqual '('
expect(next.scopes).toEqual ['source.go', 'punctuation.definition.begin.bracket.round.go']
else
expect(relevantToken.scopes).not.toEqual want
it 'tokenizes package names', ->
tests = ['package main', 'package mypackage']
for test in tests
{tokens} = grammar.tokenizeLine test
expect(tokens[0].scopes).toEqual ['source.go', 'keyword.package.go']
expect(tokens[2].scopes).toEqual ['source.go', 'entity.name.package.go']
it 'tokenizes invalid package names as such', ->
{tokens} = grammar.tokenizeLine 'package 0mypackage'
expect(tokens[0]).toEqual value: 'package', scopes: ['source.go', 'keyword.package.go']
expect(tokens[2]).toEqual value: '0mypackage', scopes: ['source.go', 'invalid.illegal.identifier.go']
it 'does not treat words that have a trailing package as a package name', ->
{tokens} = grammar.tokenizeLine 'func myFunc(Varpackage string)'
expect(tokens[4]).toEqual value: 'Varpackage ', scopes: ['source.go']
expect(tokens[5]).toEqual value: 'string', scopes: ['source.go', 'storage.type.string.go']
it 'tokenizes type names', ->
tests = ['type mystring string', 'type mytype interface{']
for test in tests
{tokens} = grammar.tokenizeLine test
expect(tokens[0].scopes).toEqual ['source.go', 'keyword.type.go']
expect(tokens[2].scopes).toEqual ['source.go', 'entity.name.type.go']
it 'tokenizes invalid type names as such', ->
{tokens} = grammar.tokenizeLine 'type 0mystring string'
expect(tokens[0]).toEqual value: 'type', scopes: ['source.go', 'keyword.type.go']
expect(tokens[2]).toEqual value: '0mystring', scopes: ['source.go', 'invalid.illegal.identifier.go']
it 'does not treat words that have a trailing type as a type name', ->
{tokens} = grammar.tokenizeLine 'func myFunc(Vartype string)'
expect(tokens[4]).toEqual value: 'Vartype ', scopes: ['source.go']
expect(tokens[5]).toEqual value: 'string', scopes: ['source.go', 'storage.type.string.go']
describe 'in variable declarations', ->
testVar = (token) ->
expect(token.value).toBe 'var'
expect(token.scopes).toEqual ['source.go', 'keyword.var.go']
testVarAssignment = (token, name) ->
expect(token.value).toBe name
expect(token.scopes).toEqual ['source.go', 'variable.other.assignment.go']
testVarDeclaration = (token, name) ->
expect(token.value).toBe name
expect(token.scopes).toEqual ['source.go', 'variable.other.declaration.go']
testOp = (token, op) ->
expect(token.value).toBe op
expect(token.scopes).toEqual ['source.go', 'keyword.operator.go']
testOpAddress = (token, op) ->
expect(token.value).toBe op
expect(token.scopes).toEqual ['source.go', 'keyword.operator.address.go']
testOpAssignment = (token, op) ->
expect(token.value).toBe op
expect(token.scopes).toEqual ['source.go', 'keyword.operator.assignment.go']
testOpBracket = (token, op, type) ->
expect(token.value).toBe op
expect(token.scopes).toEqual ['source.go', "punctuation.definition.variables.#{type}.bracket.round.go"]
testOpPunctuation = (token, op) ->
expect(token.value).toBe op
expect(token.scopes).toEqual ['source.go', 'punctuation.other.comma.go']
testOpTermination = (token, op) ->
expect(token.value).toBe op
expect(token.scopes).toEqual ['source.go', 'punctuation.terminator.go']
testNumType = (token, name) ->
expect(token.value).toBe name
expect(token.scopes).toEqual ['source.go', 'storage.type.numeric.go']
testStringType = (token, name) ->
expect(token.value).toBe name
expect(token.scopes).toEqual ['source.go', 'storage.type.string.go']
testNum = (token, value) ->
expect(token.value).toBe value
expect(token.scopes).toEqual ['source.go', 'constant.numeric.integer.go']
testString = (token, value) ->
expect(token.value).toBe value
expect(token.scopes).toEqual ['source.go', 'string.quoted.double.go']
describe 'in var statements', ->
it 'tokenizes a single variable assignment', ->
{tokens} = grammar.tokenizeLine 'i = 7'
testVarAssignment tokens[0], 'i'
testOpAssignment tokens[2], '='
testNum tokens[4], '7'
it 'tokenizes a single qualified variable assignment', ->
{tokens} = grammar.tokenizeLine 'a.b.cde = 7'
expect(tokens[0]).toEqual value: 'a', scopes: ['source.go', 'variable.other.assignment.go']
expect(tokens[1]).toEqual value: '.', scopes: ['source.go', 'variable.other.assignment.go', 'punctuation.other.period.go']
expect(tokens[2]).toEqual value: 'b', scopes: ['source.go', 'variable.other.assignment.go']
expect(tokens[3]).toEqual value: '.', scopes: ['source.go', 'variable.other.assignment.go', 'punctuation.other.period.go']
expect(tokens[4]).toEqual value: 'cde', scopes: ['source.go', 'variable.other.assignment.go']
testOpAssignment tokens[6], '='
testNum tokens[8], '7'
it 'tokenizes multiple variable assignments', ->
{tokens} = grammar.tokenizeLine 'i, j = 7, 8'
testVarAssignment tokens[0], 'i'
testOpPunctuation tokens[1], ','
testVarAssignment tokens[3], 'j'
testOpAssignment tokens[5], '='
testNum tokens[7], '7'
testNum tokens[10], '8'
it 'tokenizes multiple qualified variable assignment', ->
{tokens} = grammar.tokenizeLine 'a.b, c.d.efg = 7, 8'
expect(tokens[0]).toEqual value: 'a', scopes: ['source.go', 'variable.other.assignment.go']
expect(tokens[1]).toEqual value: '.', scopes: ['source.go', 'variable.other.assignment.go', 'punctuation.other.period.go']
expect(tokens[2]).toEqual value: 'b', scopes: ['source.go', 'variable.other.assignment.go']
testOpPunctuation tokens[3], ','
expect(tokens[5]).toEqual value: 'c', scopes: ['source.go', 'variable.other.assignment.go']
expect(tokens[6]).toEqual value: '.', scopes: ['source.go', 'variable.other.assignment.go', 'punctuation.other.period.go']
expect(tokens[7]).toEqual value: 'd', scopes: ['source.go', 'variable.other.assignment.go']
expect(tokens[8]).toEqual value: '.', scopes: ['source.go', 'variable.other.assignment.go', 'punctuation.other.period.go']
expect(tokens[9]).toEqual value: 'efg', scopes: ['source.go', 'variable.other.assignment.go']
testOpAssignment tokens[11], '='
testNum tokens[13], '7'
testNum tokens[16], '8'
it 'tokenizes a single name and a type', ->
{tokens} = grammar.tokenizeLine 'var i int'
testVar tokens[0]
testVarDeclaration tokens[2], 'i'
testNumType tokens[4], 'int'
it 'tokenizes a name and a qualified type', ->
{tokens} = grammar.tokenizeLine 'var a b.c'
testVar tokens[0]
expect(tokens[2]).toEqual value: 'a', scopes: ['source.go', 'variable.other.declaration.go']
expect(tokens[3]).toEqual value: ' b', scopes: ['source.go']
expect(tokens[4]).toEqual value: '.', scopes: ['source.go', 'punctuation.other.period.go']
expect(tokens[5]).toEqual value: 'c', scopes: ['source.go']
it 'tokenizes a single name and an array type', ->
{tokens} = grammar.tokenizeLine 'var s []string'
testVar tokens[0]
testVarDeclaration tokens[2], 's'
testStringType tokens[6], 'string'
it 'tokenizes a single name and an array type with predetermined length', ->
{tokens} = grammar.tokenizeLine 'var s [4]string'
testVar tokens[0]
testVarDeclaration tokens[2], 's'
expect(tokens[4]).toEqual value: '[', scopes: ['source.go', 'punctuation.definition.bracket.square.go']
expect(tokens[5]).toEqual value: '4', scopes: ['source.go', 'constant.numeric.integer.go']
expect(tokens[6]).toEqual value: ']', scopes: ['source.go', 'punctuation.definition.bracket.square.go']
testStringType tokens[7], 'string'
it 'tokenizes a single name and an array type with variadic length', ->
{tokens} = grammar.tokenizeLine 'var s [...]string'
testVar tokens[0]
testVarDeclaration tokens[2], 's'
expect(tokens[4]).toEqual value: '[', scopes: ['source.go', 'punctuation.definition.bracket.square.go']
expect(tokens[5]).toEqual value: '...', scopes: ['source.go', 'keyword.operator.ellipsis.go']
expect(tokens[6]).toEqual value: ']', scopes: ['source.go', 'punctuation.definition.bracket.square.go']
testStringType tokens[7], 'string'
it 'tokenizes a single name and multi-dimensional types with an address', ->
{tokens} = grammar.tokenizeLine 'var e [][]*string'
testVar tokens[0]
testVarDeclaration tokens[2], 'e'
expect(tokens[4]).toEqual value: '[', scopes: ['source.go', 'punctuation.definition.bracket.square.go']
expect(tokens[5]).toEqual value: ']', scopes: ['source.go', 'punctuation.definition.bracket.square.go']
expect(tokens[6]).toEqual value: '[', scopes: ['source.go', 'punctuation.definition.bracket.square.go']
expect(tokens[7]).toEqual value: ']', scopes: ['source.go', 'punctuation.definition.bracket.square.go']
testOpAddress tokens[8], '*'
testStringType tokens[9], 'string'
it 'tokenizes a single name and a channel', ->
{tokens} = grammar.tokenizeLine 'var x <-chan bool'
testVar tokens[0]
testVarDeclaration tokens[2], 'x'
expect(tokens[4]).toEqual value: '<-', scopes: ['source.go', 'keyword.operator.channel.go']
expect(tokens[5]).toEqual value: 'chan', scopes: ['source.go', 'keyword.channel.go']
expect(tokens[7]).toEqual value: 'bool', scopes: ['source.go', 'storage.type.boolean.go']
it 'tokenizes a single name and its initialization', ->
{tokens} = grammar.tokenizeLine ' var k = 0'
testVar tokens[1]
testVarAssignment tokens[3], 'k'
testOpAssignment tokens[5], '='
testNum tokens[7], '0'
it 'tokenizes a single name, a type, and an initialization', ->
{tokens} = grammar.tokenizeLine 'var z blub = 7'
testVar tokens[0]
testVarAssignment tokens[2], 'z'
expect(tokens[3]).toEqual value: ' blub ', scopes: ['source.go']
testOpAssignment tokens[4], '='
testNum tokens[6], '7'
it 'tokenizes a single name, a qualified type, and an initialization', ->
{tokens} = grammar.tokenizeLine 'var a b.c = 5'
testVar tokens[0]
expect(tokens[2]).toEqual value: 'a', scopes: ['source.go', 'variable.other.assignment.go']
expect(tokens[3]).toEqual value: ' b', scopes: ['source.go']
expect(tokens[4]).toEqual value: '.', scopes: ['source.go', 'punctuation.other.period.go']
expect(tokens[5]).toEqual value: 'c ', scopes: ['source.go']
testOpAssignment tokens[6], '='
testNum tokens[8], '5'
it 'does not tokenize more than necessary', ->
# This test is worded vaguely because it's hard to describe.
# Basically, make sure that the variable match isn't tokenizing the entire line
# in a (=.+) style match. This prevents multiline stuff after the assignment
# from working correctly, because match can only tokenize single lines.
lines = grammar.tokenizeLines '''
var multiline string = `wow!
this should work!`
'''
testVar lines[0][0]
testVarAssignment lines[0][2], 'multiline'
testStringType lines[0][4], 'string'
testOpAssignment lines[0][6], '='
expect(lines[0][8]).toEqual value: '`', scopes: ['source.go', 'string.quoted.raw.go', 'punctuation.definition.string.begin.go']
expect(lines[1][1]).toEqual value: '`', scopes: ['source.go', 'string.quoted.raw.go', 'punctuation.definition.string.end.go']
it 'tokenizes multiple names and a type', ->
{tokens} = grammar.tokenizeLine 'var U, V, W float64'
testVar tokens[0]
testVarDeclaration tokens[2], 'U'
testOpPunctuation tokens[3], ','
testVarDeclaration tokens[5], 'V'
testOpPunctuation tokens[6], ','
testVarDeclaration tokens[8], 'W'
it 'tokenizes multiple names and a qualified type', ->
{tokens} = grammar.tokenizeLine 'var a, b c.d'
testVar tokens[0]
expect(tokens[2]).toEqual value: 'a', scopes: ['source.go', 'variable.other.declaration.go']
testOpPunctuation tokens[3], ','
expect(tokens[5]).toEqual value: 'b', scopes: ['source.go', 'variable.other.declaration.go']
expect(tokens[6]).toEqual value: ' c', scopes: ['source.go']
expect(tokens[7]).toEqual value: '.', scopes: ['source.go', 'punctuation.other.period.go']
expect(tokens[8]).toEqual value: 'd', scopes: ['source.go']
it 'tokenizes multiple names and initialization expressions', ->
{tokens} = grammar.tokenizeLine 'var x, y, z = 1, 2, 3'
testVar tokens[0]
testVarAssignment tokens[2], 'x'
testOpPunctuation tokens[3], ','
testVarAssignment tokens[5], 'y'
testOpPunctuation tokens[6], ','
testVarAssignment tokens[8], 'z'
testOpAssignment tokens[10], '='
testNum tokens[12], '1'
testOpPunctuation tokens[13], ','
testNum tokens[15], '2'
testOpPunctuation tokens[16], ','
testNum tokens[18], '3'
it 'tokenizes multiple names, a type, and initialization expressions', ->
{tokens} = grammar.tokenizeLine 'var x, y float32 = float, thirtytwo'
testVar tokens[0]
testVarAssignment tokens[2], 'x'
testOpPunctuation tokens[3], ','
testVarAssignment tokens[5], 'y'
testNumType tokens[7], 'float32'
testOpAssignment tokens[9], '='
testOpPunctuation tokens[11], ','
it 'tokenizes multiple names, a qualified type, and initialization expression', ->
{tokens} = grammar.tokenizeLine 'var a, b c.d = 1, 2'
testVar tokens[0]
expect(tokens[2]).toEqual value: 'a', scopes: ['source.go', 'variable.other.assignment.go']
testOpPunctuation tokens[3], ','
expect(tokens[5]).toEqual value: 'b', scopes: ['source.go', 'variable.other.assignment.go']
expect(tokens[6]).toEqual value: ' c', scopes: ['source.go']
expect(tokens[7]).toEqual value: '.', scopes: ['source.go', 'punctuation.other.period.go']
expect(tokens[8]).toEqual value: 'd ', scopes: ['source.go']
testOpAssignment tokens[9], '='
testNum tokens[11], '1'
testOpPunctuation tokens[12], ','
testNum tokens[14], '2'
it 'tokenizes multiple names and a function call', ->
{tokens} = grammar.tokenizeLine 'var re, im = complexSqrt(-1)'
testVar tokens[0]
testVarAssignment tokens[2], 're'
testVarAssignment tokens[5], 'im'
testOpAssignment tokens[7], '='
it 'tokenizes with a placeholder', ->
{tokens} = grammar.tokenizeLine 'var _, found = entries[name]'
testVar tokens[0]
testVarAssignment tokens[2], '_'
testVarAssignment tokens[5], 'found'
testOpAssignment tokens[7], '='
it 'does not treat words that have a trailing var as a variable declaration', ->
{tokens} = grammar.tokenizeLine 'func test(envvar string)'
expect(tokens[4]).toEqual value: 'envvar ', scopes: ['source.go']
expect(tokens[5]).toEqual value: 'string', scopes: ['source.go', 'storage.type.string.go']
describe 'in var statement blocks', ->
it 'tokenizes single names with a type', ->
lines = grammar.tokenizeLines '''
var (
foo *bar
)
'''
testVar lines[0][0]
testOpBracket lines[0][2], '(', 'begin'
testVarDeclaration lines[1][1], 'foo'
testOpAddress lines[1][3], '*'
testOpBracket lines[2][0], ')', 'end'
it 'tokenizes single names with an initializer', ->
lines = grammar.tokenizeLines '''
var (
foo = 42
)
'''
testVar lines[0][0], 'var'
testOpBracket lines[0][2], '(', 'begin'
testVarAssignment lines[1][1], 'foo'
testOpAssignment lines[1][3], '='
testNum lines[1][5], '42'
testOpBracket lines[2][0], ')', 'end'
it 'tokenizes multiple names', ->
lines = grammar.tokenizeLines '''
var (
foo, bar = baz, quux
)
'''
testVar lines[0][0]
testOpBracket lines[0][2], '(', 'begin'
testVarAssignment lines[1][1], 'foo'
testOpPunctuation lines[1][2], ','
testVarAssignment lines[1][4], 'bar'
testOpAssignment lines[1][6], '='
testOpPunctuation lines[1][8], ','
testOpBracket lines[2][0], ')', 'end'
it 'tokenizes non variable declarations', ->
lines = grammar.tokenizeLines '''
var (
// I am a comment
foo *bar
userRegister = &routers.Handler{
Handler: func(c echo.Context) error {
if err := userService.Register(&user); err != nil {
return err
}
return nil
},
}
)
'''
testVar lines[0][0]
testOpBracket lines[0][2], '(', 'begin'
expect(lines[1][1]).toEqual value: '//', scopes: ['source.go', 'comment.line.double-slash.go', 'punctuation.definition.comment.go']
expect(lines[1][2]).toEqual value: ' I am a comment', scopes: ['source.go', 'comment.line.double-slash.go']
testVarDeclaration lines[2][1], 'foo'
testOpAddress lines[2][3], '*'
testVarAssignment lines[3][1], 'userRegister'
expect(lines[4][3]).toEqual value: 'func', scopes: ['source.go', 'keyword.function.go']
expect(lines[5][1]).toEqual value: 'if', scopes: ['source.go', 'keyword.control.go']
expect(lines[8][3]).toEqual value: 'nil', scopes: ['source.go', 'constant.language.go']
testOpBracket lines[11][0], ')', 'end'
it 'tokenizes all parts of variable initializations correctly', ->
lines = grammar.tokenizeLines '''
var (
m = map[string]int{
"key": 10,
}
)
'''
testVar lines[0][0]
testOpBracket lines[0][2], '(', 'begin'
testVarAssignment lines[1][1], 'm'
testOpAssignment lines[1][3], '='
testString lines[2][2], 'key'
testNum lines[2][6], '10'
testOpBracket lines[4][0], ')', 'end'
it 'tokenizes non-ASCII variable names', ->
{tokens} = grammar.tokenizeLine 'über = test'
testVarAssignment tokens[0], 'über'
testOpAssignment tokens[2], '='
it 'tokenizes invalid variable names as such', ->
{tokens} = grammar.tokenizeLine 'var 0test = 0'
testVar tokens[0]
expect(tokens[2]).toEqual value: '0test', scopes: ['source.go', 'invalid.illegal.identifier.go']
describe 'in shorthand variable declarations', ->
it 'tokenizes single names', ->
{tokens} = grammar.tokenizeLine 'f := func() int { return 7 }'
testVarAssignment tokens[0], 'f'
testOpAssignment tokens[2], ':='
{tokens} = grammar.tokenizeLine 'ch := make(chan int)'
testVarAssignment tokens[0], 'ch'
testOpAssignment tokens[2], ':='
it 'tokenizes multiple names', ->
{tokens} = grammar.tokenizeLine 'i, j := 0, 10'
testVarAssignment tokens[0], 'i'
testOpPunctuation tokens[1], ','
testVarAssignment tokens[3], 'j'
{tokens} = grammar.tokenizeLine 'if _, y, z := coord(p); z > 0'
testVarAssignment tokens[2], '_'
testVarAssignment tokens[5], 'y'
testVarAssignment tokens[8], 'z'
testOpAssignment tokens[10], ':='
testOpTermination tokens[16], ';'
describe 'in imports declarations', ->
testImport = (token) ->
expect(token.value).toBe 'import'
expect(token.scopes).toEqual ['source.go', 'keyword.import.go']
testImportAlias = (token, name) ->
expect(token.value).toBe name
expect(token.scopes).toEqual ['source.go', 'entity.alias.import.go']
testImportPackage = (token, name) ->
expect(token.value).toBe name
expect(token.scopes).toEqual ['source.go', 'string.quoted.double.go', 'entity.name.import.go']
testOpBracket = (token, op, type) ->
expect(token.value).toBe op
expect(token.scopes).toEqual ['source.go', "punctuation.definition.imports.#{type}.bracket.round.go"]
testBeginQuoted = (token) ->
expect(token.value).toBe '"'
expect(token.scopes).toEqual ['source.go', 'string.quoted.double.go', 'punctuation.definition.string.begin.go']
testEndQuoted = (token) ->
expect(token.value).toBe '"'
expect(token.scopes).toEqual ['source.go', 'string.quoted.double.go', 'punctuation.definition.string.end.go']
describe 'when it is a single line declaration', ->
it 'tokenizes declarations with a package name', ->
{tokens} = grammar.tokenizeLine 'import "fmt"'
testImport tokens[0]
testBeginQuoted tokens[2]
testImportPackage tokens[3], 'fmt'
testEndQuoted tokens[4]
it 'tokenizes declarations with a package name and an alias', ->
{tokens} = grammar.tokenizeLine 'import . "fmt"'
testImport tokens[0]
testImportAlias tokens[2], '.'
testBeginQuoted tokens[4]
testImportPackage tokens[5], 'fmt'
testEndQuoted tokens[6]
{tokens} = grammar.tokenizeLine 'import otherpackage "github.com/test/package"'
testImport tokens[0]
testImportAlias tokens[2], 'otherpackage'
testBeginQuoted tokens[4]
testImportPackage tokens[5], 'github.com/test/package'
testEndQuoted tokens[6]
it 'does not treat words that have a trailing import as a import declaration', ->
{tokens} = grammar.tokenizeLine 'func myFunc(Varimport string)'
expect(tokens[4]).toEqual value: 'Varimport ', scopes: ['source.go']
expect(tokens[5]).toEqual value: 'string', scopes: ['source.go', 'storage.type.string.go']
describe 'when it is a multi line declaration', ->
it 'tokenizes single declarations with a package name', ->
[kwd, decl, closing] = grammar.tokenizeLines '''
import (
"github.com/test/package"
)
'''
testImport kwd[0]
testOpBracket kwd[2], '(', 'begin'
testBeginQuoted decl[1]
testImportPackage decl[2], 'github.com/test/package'
testEndQuoted decl[3]
testOpBracket closing[0], ')', 'end'
it 'tokenizes multiple declarations with a package name', ->
[kwd, decl, decl2, closing] = grammar.tokenizeLines '''
import (
"github.com/test/package"
"fmt"
)
'''
testImport kwd[0]
testOpBracket kwd[2], '(', 'begin'
testBeginQuoted decl[1]
testImportPackage decl[2], 'github.com/test/package'
testEndQuoted decl[3]
testBeginQuoted decl2[1]
testImportPackage decl2[2], 'fmt'
testEndQuoted decl2[3]
testOpBracket closing[0], ')', 'end'
it 'tokenizes single imports with an alias for a multi-line declaration', ->
[kwd, decl, closing] = grammar.tokenizeLines '''
import (
. "github.com/test/package"
)
'''
testImport kwd[0]
testOpBracket kwd[2], '(', 'begin'
testImportAlias decl[1], '.'
testBeginQuoted decl[3]
testImportPackage decl[4], 'github.com/test/package'
testEndQuoted decl[5]
testOpBracket closing[0], ')', 'end'
it 'tokenizes multiple imports with an alias for a multi-line declaration', ->
[kwd, decl, decl2, closing] = grammar.tokenizeLines '''
import (
. "github.com/test/package"
"fmt"
)
'''
testImport kwd[0]
testOpBracket kwd[2], '(', 'begin'
testImportAlias decl[1], '.'
testBeginQuoted decl[3]
testImportPackage decl[4], 'github.com/test/package'
testEndQuoted decl[5]
testBeginQuoted decl2[1]
testImportPackage decl2[2], 'fmt'
testEndQuoted decl2[3]
testOpBracket closing[0], ')', 'end'

File diff suppressed because it is too large Load Diff

View File

@ -1,63 +0,0 @@
describe 'Go settings', ->
[editor, languageMode] = []
afterEach ->
editor.destroy()
beforeEach ->
atom.config.set 'core.useTreeSitterParsers', false
waitsForPromise ->
atom.workspace.open().then (o) ->
editor = o
languageMode = editor.languageMode
waitsForPromise ->
atom.packages.activatePackage('language-go')
it 'matches lines correctly using the increaseIndentPattern', ->
increaseIndentRegex = languageMode.increaseIndentRegexForScopeDescriptor(['source.go'])
expect(increaseIndentRegex.findNextMatchSync(' case true:')).toBeTruthy()
expect(increaseIndentRegex.findNextMatchSync(' default:')).toBeTruthy()
expect(increaseIndentRegex.findNextMatchSync('func something() {')).toBeTruthy()
expect(increaseIndentRegex.findNextMatchSync(' if true {')).toBeTruthy()
expect(increaseIndentRegex.findNextMatchSync(' else {')).toBeTruthy()
expect(increaseIndentRegex.findNextMatchSync(' switch {')).toBeTruthy()
expect(increaseIndentRegex.findNextMatchSync(' switch true {')).toBeTruthy()
expect(increaseIndentRegex.findNextMatchSync(' select {')).toBeTruthy()
expect(increaseIndentRegex.findNextMatchSync(' select true {')).toBeTruthy()
expect(increaseIndentRegex.findNextMatchSync(' for v := range val {')).toBeTruthy()
expect(increaseIndentRegex.findNextMatchSync(' for i := 0; i < 10; i++ {')).toBeTruthy()
expect(increaseIndentRegex.findNextMatchSync(' for i := 0; i < 10; i++ {')).toBeTruthy()
expect(increaseIndentRegex.findNextMatchSync(' type something struct {')).toBeTruthy()
expect(increaseIndentRegex.findNextMatchSync(' fmt.Printf("some%s",')).toBeTruthy()
expect(increaseIndentRegex.findNextMatchSync(' aSlice := []string{}{')).toBeTruthy()
it 'matches lines correctly using the decreaseIndentPattern', ->
decreaseIndentRegex = languageMode.decreaseIndentRegexForScopeDescriptor(['source.go'])
expect(decreaseIndentRegex.findNextMatchSync(' case true:')).toBeTruthy()
expect(decreaseIndentRegex.findNextMatchSync(' default:')).toBeTruthy()
expect(decreaseIndentRegex.findNextMatchSync(' }')).toBeTruthy()
expect(decreaseIndentRegex.findNextMatchSync(' },')).toBeTruthy()
expect(decreaseIndentRegex.findNextMatchSync(' )')).toBeTruthy()
expect(decreaseIndentRegex.findNextMatchSync(' ),')).toBeTruthy()
it 'matches lines correctly using the decreaseNextIndentPattern', ->
decreaseNextIndentRegex = languageMode.decreaseNextIndentRegexForScopeDescriptor(['source.go'])
expect(decreaseNextIndentRegex.findNextMatchSync(' fmt.Println("something"))')).toBeTruthy()
expect(decreaseNextIndentRegex.findNextMatchSync(' fmt.Println("something")),')).toBeTruthy()
expect(decreaseNextIndentRegex.findNextMatchSync(' fmt.Println("something"), "x"),')).toBeTruthy()
expect(decreaseNextIndentRegex.findNextMatchSync(' fmt.Println(fmt.Sprint("something"))),')).toBeTruthy()
expect(decreaseNextIndentRegex.findNextMatchSync(' fmt.Println(fmt.Sprint("something"), "x")),')).toBeTruthy()
expect(decreaseNextIndentRegex.findNextMatchSync(' fmt.Println("something")')).toBeFalsy()
expect(decreaseNextIndentRegex.findNextMatchSync(' fmt.Println("something"),')).toBeFalsy()
# a line with many (), testing for catastrophic backtracking.
# see https://github.com/atom/language-go/issues/78
longLine = 'first.second().third().fourth().fifth().sixth().seventh().eighth().ninth().tenth()'
expect(decreaseNextIndentRegex.findNextMatchSync(longLine)).toBeFalsy()

View File

@ -0,0 +1,67 @@
describe('Go settings', function() {
let [editor, languageMode] = Array.from([]);
afterEach(() => editor.destroy());
beforeEach(function() {
atom.config.set('core.useTreeSitterParsers', false);
waitsForPromise(() => atom.workspace.open().then(function(o) {
editor = o;
return languageMode = editor.languageMode;
}));
waitsForPromise(() => atom.packages.activatePackage('language-go'));
});
it('matches lines correctly using the increaseIndentPattern', function() {
const increaseIndentRegex = languageMode.increaseIndentRegexForScopeDescriptor(['source.go']);
expect(increaseIndentRegex.findNextMatchSync(' case true:')).toBeTruthy();
expect(increaseIndentRegex.findNextMatchSync(' default:')).toBeTruthy();
expect(increaseIndentRegex.findNextMatchSync('func something() {')).toBeTruthy();
expect(increaseIndentRegex.findNextMatchSync(' if true {')).toBeTruthy();
expect(increaseIndentRegex.findNextMatchSync(' else {')).toBeTruthy();
expect(increaseIndentRegex.findNextMatchSync(' switch {')).toBeTruthy();
expect(increaseIndentRegex.findNextMatchSync(' switch true {')).toBeTruthy();
expect(increaseIndentRegex.findNextMatchSync(' select {')).toBeTruthy();
expect(increaseIndentRegex.findNextMatchSync(' select true {')).toBeTruthy();
expect(increaseIndentRegex.findNextMatchSync(' for v := range val {')).toBeTruthy();
expect(increaseIndentRegex.findNextMatchSync(' for i := 0; i < 10; i++ {')).toBeTruthy();
expect(increaseIndentRegex.findNextMatchSync(' for i := 0; i < 10; i++ {')).toBeTruthy();
expect(increaseIndentRegex.findNextMatchSync(' type something struct {')).toBeTruthy();
expect(increaseIndentRegex.findNextMatchSync(' fmt.Printf("some%s",')).toBeTruthy();
expect(increaseIndentRegex.findNextMatchSync(' aSlice := []string{}{')).toBeTruthy();
});
it('matches lines correctly using the decreaseIndentPattern', function() {
const decreaseIndentRegex = languageMode.decreaseIndentRegexForScopeDescriptor(['source.go']);
expect(decreaseIndentRegex.findNextMatchSync(' case true:')).toBeTruthy();
expect(decreaseIndentRegex.findNextMatchSync(' default:')).toBeTruthy();
expect(decreaseIndentRegex.findNextMatchSync(' }')).toBeTruthy();
expect(decreaseIndentRegex.findNextMatchSync(' },')).toBeTruthy();
expect(decreaseIndentRegex.findNextMatchSync(' )')).toBeTruthy();
expect(decreaseIndentRegex.findNextMatchSync(' ),')).toBeTruthy();
});
it('matches lines correctly using the decreaseNextIndentPattern', function() {
const decreaseNextIndentRegex = languageMode.decreaseNextIndentRegexForScopeDescriptor(['source.go']);
expect(decreaseNextIndentRegex.findNextMatchSync(' fmt.Println("something"))')).toBeTruthy();
expect(decreaseNextIndentRegex.findNextMatchSync(' fmt.Println("something")),')).toBeTruthy();
expect(decreaseNextIndentRegex.findNextMatchSync(' fmt.Println("something"), "x"),')).toBeTruthy();
expect(decreaseNextIndentRegex.findNextMatchSync(' fmt.Println(fmt.Sprint("something"))),')).toBeTruthy();
expect(decreaseNextIndentRegex.findNextMatchSync(' fmt.Println(fmt.Sprint("something"), "x")),')).toBeTruthy();
expect(decreaseNextIndentRegex.findNextMatchSync(' fmt.Println("something")')).toBeFalsy();
expect(decreaseNextIndentRegex.findNextMatchSync(' fmt.Println("something"),')).toBeFalsy();
// a line with many (), testing for catastrophic backtracking.
// see https://github.com/atom/language-go/issues/78
const longLine = 'first.second().third().fourth().fifth().sixth().seventh().eighth().ninth().tenth()';
expect(decreaseNextIndentRegex.findNextMatchSync(longLine)).toBeFalsy();
});
});