mirror of
https://github.com/enso-org/enso.git
synced 2024-11-22 11:52:59 +03:00
Parse the standard library (#3830)
Fix bugs in `TreeToIr` (rewrite) and parser. Implement more undocumented features in parser. Emulate some old parser bugs and quirks for compatibility. Changes in libs: - Fix some bugs. - Clean up some odd syntaxes that the old parser translates idiosyncratically. - Constructors are now required to precede methods. # Important Notes Out of 221 files: - 215 match the old parser - 6 contain complex types the old parser is known not to handle correctly So, compared to the old parser, the new parser parses 103% of files correctly.
This commit is contained in:
parent
f60e9e9d8e
commit
330612119a
@ -69,7 +69,7 @@ type Fitted_Model
|
||||
Fitted_Exponential_Model a b _ -> a.to_text + " * (" + b.to_text + " * X).exp"
|
||||
Fitted_Logarithmic_Model a b _ -> a.to_text + " * X.ln + " + b.to_text
|
||||
Fitted_Power_Model a b _ -> a.to_text + " * X ^ " + b.to_text
|
||||
"Fitted_Model(" + equation + ")"
|
||||
"Fitted_Model(" + equation + ")"
|
||||
|
||||
## Use the model to predict a value.
|
||||
predict : Number -> Number
|
||||
|
@ -19,18 +19,6 @@ from project.Data.Statistics.Statistic import all
|
||||
from project.Data.Statistics.Statistic export all
|
||||
|
||||
type Statistic
|
||||
## PRIVATE
|
||||
Convert the Enso Statistic into Java equivalent.
|
||||
to_moment_statistic : SingleValue
|
||||
to_moment_statistic self = case self of
|
||||
Sum -> Moments.SUM
|
||||
Mean -> Moments.MEAN
|
||||
Variance p -> if p then Moments.VARIANCE_POPULATION else Moments.VARIANCE
|
||||
Standard_Deviation p -> if p then Moments.STANDARD_DEVIATION_POPULATION else Moments.STANDARD_DEVIATION
|
||||
Skew p -> if p then Moments.SKEW_POPULATION else Moments.SKEW
|
||||
Kurtosis -> Moments.KURTOSIS
|
||||
_ -> Nothing
|
||||
|
||||
## Count the number of non-Nothing and non-NaN values.
|
||||
Count
|
||||
|
||||
@ -89,6 +77,18 @@ type Statistic
|
||||
- predicted: the series to compute the r_squared with.
|
||||
R_Squared (predicted:Vector)
|
||||
|
||||
## PRIVATE
|
||||
Convert the Enso Statistic into Java equivalent.
|
||||
to_moment_statistic : SingleValue
|
||||
to_moment_statistic self = case self of
|
||||
Sum -> Moments.SUM
|
||||
Mean -> Moments.MEAN
|
||||
Variance p -> if p then Moments.VARIANCE_POPULATION else Moments.VARIANCE
|
||||
Standard_Deviation p -> if p then Moments.STANDARD_DEVIATION_POPULATION else Moments.STANDARD_DEVIATION
|
||||
Skew p -> if p then Moments.SKEW_POPULATION else Moments.SKEW
|
||||
Kurtosis -> Moments.KURTOSIS
|
||||
_ -> Nothing
|
||||
|
||||
|
||||
## Compute a single statistic on a vector like object.
|
||||
|
||||
|
@ -9,6 +9,30 @@ import project.Internal.IR.Order_Descriptor.Order_Descriptor
|
||||
|
||||
A context associated with an SQL query.
|
||||
type Context
|
||||
## PRIVATE
|
||||
|
||||
A context associated with an SQL query.
|
||||
|
||||
The expressions can only be computed in a context which specifies from where
|
||||
their columns come and set filters and other settings for processing the
|
||||
query.
|
||||
|
||||
Arguments:
|
||||
- from_spec: the sources for the query, see `From_Spec` for more
|
||||
details.
|
||||
- where_filters: a list of expressions for filtering - only the rows
|
||||
for which these expressions evaluate to true are included in the
|
||||
result.
|
||||
- orders: a list of ordering expressions, for each entry an ORDER BY
|
||||
clause is added.
|
||||
- groups: a list of grouping expressions, for each entry a GROUP BY is
|
||||
added, the resulting query can then directly include only the
|
||||
grouped-by columns or aggregate expressions.
|
||||
- meta_index: a list of internal columns to use for joining or grouping.
|
||||
- limit: an optional maximum number of elements that the equery should
|
||||
return.
|
||||
Value (from_spec : From_Spec) (where_filters : Vector Expression) (orders : Vector Order_Descriptor) (groups : Vector Expression) (meta_index : Vector Internal_Column) (limit : Nothing | Integer)
|
||||
|
||||
## PRIVATE
|
||||
|
||||
Creates a query context that just fetches data from a table, without any
|
||||
@ -42,30 +66,6 @@ type Context
|
||||
for_subquery subquery =
|
||||
Context.Value subquery [] [] [] [] Nothing
|
||||
|
||||
## PRIVATE
|
||||
|
||||
A context associated with an SQL query.
|
||||
|
||||
The expressions can only be computed in a context which specifies from where
|
||||
their columns come and set filters and other settings for processing the
|
||||
query.
|
||||
|
||||
Arguments:
|
||||
- from_spec: the sources for the query, see `From_Spec` for more
|
||||
details.
|
||||
- where_filters: a list of expressions for filtering - only the rows
|
||||
for which these expressions evaluate to true are included in the
|
||||
result.
|
||||
- orders: a list of ordering expressions, for each entry an ORDER BY
|
||||
clause is added.
|
||||
- groups: a list of grouping expressions, for each entry a GROUP BY is
|
||||
added, the resulting query can then directly include only the
|
||||
grouped-by columns or aggregate expressions.
|
||||
- meta_index: a list of internal columns to use for joining or grouping.
|
||||
- limit: an optional maximum number of elements that the equery should
|
||||
return.
|
||||
Value (from_spec : From_Spec) (where_filters : Vector Expression) (orders : Vector Order_Descriptor) (groups : Vector Expression) (meta_index : Vector Internal_Column) (limit : Nothing | Integer)
|
||||
|
||||
## PRIVATE
|
||||
|
||||
Returns a copy of the context with changed `meta_index`.
|
||||
|
@ -13,6 +13,18 @@ polyglot java import org.opencv.core.Scalar
|
||||
|
||||
## UNSTABLE
|
||||
type Image
|
||||
## UNSTABLE
|
||||
|
||||
The image data type.
|
||||
|
||||
Arguments:
|
||||
- opencv_mat: The underlying matrix that stores the image data.
|
||||
|
||||
The image is represented with a matrix of rows x columns. Each
|
||||
pixel is represented with a vector of 1 to 4 values (channels).
|
||||
Pixel values are normalized in a range [0.0 .. 1.0].
|
||||
Value opencv_mat
|
||||
|
||||
## UNSTABLE
|
||||
|
||||
Create an image from the array of values.
|
||||
@ -102,18 +114,6 @@ type Image
|
||||
Panic.catch_java Any (Java_Codecs.write path self.opencv_mat int_flags) _->
|
||||
Error.throw (File.IO_Error (File.new path) 'Failed to write to the file')
|
||||
|
||||
## UNSTABLE
|
||||
|
||||
The image data type.
|
||||
|
||||
Arguments:
|
||||
- opencv_mat: The underlying matrix that stores the image data.
|
||||
|
||||
The image is represented with a matrix of rows x columns. Each
|
||||
pixel is represented with a vector of 1 to 4 values (channels).
|
||||
Pixel values are normalized in a range [0.0 .. 1.0].
|
||||
Value opencv_mat
|
||||
|
||||
## UNSTABLE
|
||||
|
||||
Return the number of image rows.
|
||||
|
@ -15,6 +15,14 @@ polyglot java import org.enso.table.operations.OrderBuilder
|
||||
from project.Data.Column.Column import Column_Data
|
||||
|
||||
type Column
|
||||
## PRIVATE
|
||||
|
||||
A representation of a column in a Table.
|
||||
|
||||
Arguments:
|
||||
- java_column: The internal representation of the column.
|
||||
Column_Data java_column
|
||||
|
||||
## Creates a new column given a name and a vector of elements.
|
||||
|
||||
Arguments:
|
||||
@ -31,14 +39,6 @@ type Column
|
||||
from_vector : Text -> Vector -> Column
|
||||
from_vector name items = Column_Data (Java_Column.fromItems name items.to_array)
|
||||
|
||||
## PRIVATE
|
||||
|
||||
A representation of a column in a Table.
|
||||
|
||||
Arguments:
|
||||
- java_column: The internal representation of the column.
|
||||
Column_Data java_column
|
||||
|
||||
## Returns a text containing an ASCII-art table displaying this data.
|
||||
|
||||
Arguments:
|
||||
|
@ -41,6 +41,14 @@ polyglot java import org.enso.table.data.mask.OrderMask
|
||||
|
||||
## Represents a column-oriented table data structure.
|
||||
type Table
|
||||
## PRIVATE
|
||||
|
||||
A table.
|
||||
|
||||
Arguments:
|
||||
- java_table: The internal java representation of the table.
|
||||
Table_Data java_table
|
||||
|
||||
## Creates a new table from a vector of `[name, items]` pairs.
|
||||
|
||||
Arguments:
|
||||
@ -88,14 +96,6 @@ type Table
|
||||
columns = header.map_with_index i-> name-> [name, rows.map (_.at i)]
|
||||
Table.new columns
|
||||
|
||||
## PRIVATE
|
||||
|
||||
A table.
|
||||
|
||||
Arguments:
|
||||
- java_table: The internal java representation of the table.
|
||||
Table_Data java_table
|
||||
|
||||
## Returns a text containing an ASCII-art table displaying this data.
|
||||
|
||||
Arguments:
|
||||
|
@ -5,6 +5,8 @@ polyglot java import org.enso.base.Text_Utils
|
||||
|
||||
## Object to generate (deterministic) random value for testing
|
||||
type Faker
|
||||
Value generator
|
||||
|
||||
upper_case_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ".char_vector
|
||||
|
||||
lower_case_letters = "abcdefghijklmnopqrstuvwxyz".char_vector
|
||||
@ -20,8 +22,6 @@ type Faker
|
||||
generator = if seed == 0 then Random.new else Random.new seed
|
||||
Faker.Value generator
|
||||
|
||||
Value generator
|
||||
|
||||
## Creates a random Text based on a template of character sets.
|
||||
|
||||
Arguments:
|
||||
|
@ -29,6 +29,9 @@ find_caller_script stack =
|
||||
|
||||
## Holds configuration for a Test_Suite
|
||||
type Suite_Config
|
||||
## PRIVATE - construct a configuration
|
||||
Value only_group_regexp print_only_failures output_path
|
||||
|
||||
## Creates an Suite_Config based off environment and caller location
|
||||
from_environment : Suite_Config
|
||||
from_environment =
|
||||
@ -47,9 +50,6 @@ type Suite_Config
|
||||
|
||||
Suite_Config.Value only_group_regexp print_only_failures results_path
|
||||
|
||||
## PRIVATE - construct a configuration
|
||||
Value only_group_regexp print_only_failures output_path
|
||||
|
||||
should_run_group self name =
|
||||
regexp = self.only_group_regexp
|
||||
case regexp of
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -736,6 +736,9 @@ object IR {
|
||||
s"""
|
||||
|IR.Module.Scope.Import.Module(
|
||||
|name = $name,
|
||||
|rename = $rename,
|
||||
|onlyNames = $onlyNames,
|
||||
|hiddenNames = $hiddenNames,
|
||||
|location = $location,
|
||||
|passData = ${this.showPassData},
|
||||
|diagnostics = $diagnostics,
|
||||
|
@ -138,15 +138,6 @@ public class EnsoCompilerTest {
|
||||
""");
|
||||
}
|
||||
|
||||
@Test
|
||||
@Ignore
|
||||
public void testIsDigitWithoutSpaces() throws Exception {
|
||||
parseTest("""
|
||||
compare =
|
||||
is_digit=character -> 42
|
||||
""");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testComments() throws Exception {
|
||||
parseTest("""
|
||||
@ -405,7 +396,6 @@ public class EnsoCompilerTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
@Ignore // wrong order of exported symbols
|
||||
public void testExportFromTen() throws Exception {
|
||||
parseTest("from prj.Data.Foo export One, Two, Three, Four, Five, Six, Seven, Eight, Nine, Ten");
|
||||
}
|
||||
@ -429,6 +419,27 @@ public class EnsoCompilerTest {
|
||||
""");
|
||||
}
|
||||
|
||||
@Test
|
||||
@Ignore
|
||||
public void testRawBlockLiteral() throws Exception {
|
||||
// mimics TextTest
|
||||
parseTest("""
|
||||
x = \"\"\"
|
||||
Foo
|
||||
Bar
|
||||
Baz
|
||||
""");
|
||||
}
|
||||
|
||||
@Test
|
||||
@Ignore
|
||||
public void testVariousKindsOfUnicodeWhitespace() throws Exception {
|
||||
// mimics Text_Spec.enso:1049
|
||||
parseTest("""
|
||||
'\\v\\f\\u{200a}\\u{202f}\\u{205F}\\u{3000}\\u{feff}'.trim
|
||||
""");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testLambda() throws Exception {
|
||||
parseTest("""
|
||||
@ -625,7 +636,6 @@ public class EnsoCompilerTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
@Ignore
|
||||
public void testExtensionOperator() throws Exception {
|
||||
parseTest("""
|
||||
Text.* : Integer -> Text
|
||||
@ -805,6 +815,13 @@ public class EnsoCompilerTest {
|
||||
""");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNameAsMethodApp() throws Exception {
|
||||
parseTest("""
|
||||
f = foo x=A.B
|
||||
""");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsMethodWithSpaces() throws Exception {
|
||||
parseTest("""
|
||||
@ -850,6 +867,77 @@ public class EnsoCompilerTest {
|
||||
""");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConstructorMultipleNamedArgs1() throws Exception {
|
||||
parseTest("""
|
||||
x = Regex_Matcher.Regex_Matcher_Data case_sensitivity=Case_Sensitivity.Sensitive dot_matches_newline=True
|
||||
""");
|
||||
}
|
||||
|
||||
@Test
|
||||
@Ignore // Old parser's representation of this is inconsistent with normal treatment of names.
|
||||
public void testConstructorMultipleNamedArgs2() throws Exception {
|
||||
parseTest("""
|
||||
x = (Regex_Matcher.Regex_Matcher_Data case_sensitivity=Case_Sensitivity.Sensitive) dot_matches_newline=True
|
||||
""");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDocAtEndOfBlock() throws Exception {
|
||||
parseTest("""
|
||||
x =
|
||||
23
|
||||
## end of block
|
||||
""");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMethodSections() throws Exception {
|
||||
parseTest("""
|
||||
x = .from self=Foo
|
||||
""");
|
||||
}
|
||||
|
||||
static String simplifyIR(IR i) {
|
||||
var txt = i.pretty().replaceAll("id = [0-9a-f\\-]*", "id = _");
|
||||
for (;;) {
|
||||
final String pref = "IdentifiedLocation(";
|
||||
int at = txt.indexOf(pref);
|
||||
if (at == -1) {
|
||||
break;
|
||||
}
|
||||
int to = at + pref.length();
|
||||
int depth = 1;
|
||||
while (depth > 0) {
|
||||
switch (txt.charAt(to)) {
|
||||
case '(' -> depth++;
|
||||
case ')' -> depth--;
|
||||
}
|
||||
to++;
|
||||
}
|
||||
txt = txt.substring(0, at) + "IdentifiedLocation[_]" + txt.substring(to);
|
||||
}
|
||||
for (;;) {
|
||||
final String pref = "IR.Comment.Documentation(";
|
||||
int at = txt.indexOf(pref);
|
||||
if (at == -1) {
|
||||
break;
|
||||
}
|
||||
int to = txt.indexOf("location =", at + pref.length());
|
||||
txt = txt.substring(0, at) + "IR.Comment.Doc(" + txt.substring(to);
|
||||
}
|
||||
for (;;) {
|
||||
final String pref = "IR.Case.Pattern.Doc(";
|
||||
int at = txt.indexOf(pref);
|
||||
if (at == -1) {
|
||||
break;
|
||||
}
|
||||
int to = txt.indexOf("location =", at + pref.length());
|
||||
txt = txt.substring(0, at) + "IR.Comment.CaseDoc(" + txt.substring(to);
|
||||
}
|
||||
return txt;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static void parseTest(String code) throws IOException {
|
||||
var src = Source.newBuilder("enso", code, "test-" + Integer.toHexString(code.hashCode()) + ".enso").build();
|
||||
@ -859,27 +947,7 @@ public class EnsoCompilerTest {
|
||||
var oldAst = new Parser().runWithIds(src.getCharacters().toString());
|
||||
var oldIr = AstToIr.translate((ASTOf<Shape>)(Object)oldAst);
|
||||
|
||||
Function<IR, String> filter = (i) -> {
|
||||
var txt = i.pretty().replaceAll("id = [0-9a-f\\-]*", "id = _");
|
||||
for (;;) {
|
||||
final String pref = "IdentifiedLocation(";
|
||||
int at = txt.indexOf(pref);
|
||||
if (at == -1) {
|
||||
break;
|
||||
}
|
||||
int to = at + pref.length();
|
||||
int depth = 1;
|
||||
while (depth > 0) {
|
||||
switch (txt.charAt(to)) {
|
||||
case '(' -> depth++;
|
||||
case ')' -> depth--;
|
||||
}
|
||||
to++;
|
||||
}
|
||||
txt = txt.substring(0, at) + "IdentifiedLocation[_]" + txt.substring(to);
|
||||
}
|
||||
return txt;
|
||||
};
|
||||
Function<IR, String> filter = EnsoCompilerTest::simplifyIR;
|
||||
|
||||
var old = filter.apply(oldIr);
|
||||
var now = filter.apply(ir);
|
||||
|
@ -27,248 +27,191 @@ import org.junit.runners.AllTests;
|
||||
|
||||
@RunWith(AllTests.class)
|
||||
public final class ParseStdLibTest extends TestCase {
|
||||
private static final EnsoCompiler ensoCompiler = new EnsoCompiler();
|
||||
private final File where;
|
||||
private final Dump dump;
|
||||
private static final EnsoCompiler ensoCompiler = new EnsoCompiler();
|
||||
private final File where;
|
||||
private final Dump dump;
|
||||
|
||||
private ParseStdLibTest(String name, File where, Dump dump) {
|
||||
super(name);
|
||||
this.where = where;
|
||||
this.dump = dump;
|
||||
private ParseStdLibTest(String name, File where, Dump dump) {
|
||||
super(name);
|
||||
this.where = where;
|
||||
this.dump = dump;
|
||||
}
|
||||
|
||||
public static TestSuite suite() throws Exception {
|
||||
TestSuite s = new TestSuite();
|
||||
var os = System.getProperty("os.name");
|
||||
if (os != null && os.contains("Window")) {
|
||||
s.addTest(new ParseStdLibTest("IgnoringStdLibParsingOnWindows", null, null));
|
||||
} else {
|
||||
collectDistribution(s, "Base");
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
public static TestSuite suite() throws Exception {
|
||||
TestSuite s = new TestSuite();
|
||||
var os = System.getProperty("os.name");
|
||||
if (os != null && os.contains("Window")) {
|
||||
s.addTest(new ParseStdLibTest("IgnoringStdLibParsingOnWindows", null, null));
|
||||
} else {
|
||||
collectDistribution(s, "Base");
|
||||
}
|
||||
return s;
|
||||
private static File file(File dir, String... relative) {
|
||||
var f = dir;
|
||||
for (var ch : relative) {
|
||||
f = new File(f, ch);
|
||||
}
|
||||
return f;
|
||||
}
|
||||
|
||||
private static File file(File dir, String... relative) {
|
||||
var f = dir;
|
||||
for (var ch : relative) {
|
||||
f = new File(f, ch);
|
||||
}
|
||||
return f;
|
||||
private static Path locateDistribution(final String name) throws URISyntaxException {
|
||||
var where =
|
||||
new File(ParseStdLibTest.class.getProtectionDomain().getCodeSource().getLocation().toURI());
|
||||
var dir = where;
|
||||
for (; ; ) {
|
||||
dir = file(where, "distribution", "lib", "Standard", name, "0.0.0-dev", "src");
|
||||
if (dir.exists()) {
|
||||
break;
|
||||
}
|
||||
where = where.getParentFile();
|
||||
}
|
||||
return dir.toPath();
|
||||
}
|
||||
|
||||
private static Path locateDistribution(final String name) throws URISyntaxException {
|
||||
var where = new File(ParseStdLibTest.class.getProtectionDomain().getCodeSource().getLocation().toURI());
|
||||
var dir = where;
|
||||
for (;;) {
|
||||
dir = file(where, "distribution", "lib", "Standard", name, "0.0.0-dev", "src");
|
||||
if (dir.exists()) {
|
||||
break;
|
||||
}
|
||||
where = where.getParentFile();
|
||||
private static void collectDistribution(TestSuite s, String name) throws Exception {
|
||||
var dir = locateDistribution(name);
|
||||
var dump = new Dump();
|
||||
class CollectSuites implements FileVisitor<Path> {
|
||||
|
||||
private final TestSuite suite;
|
||||
|
||||
CollectSuites(TestSuite suite) {
|
||||
this.suite = suite;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs)
|
||||
throws IOException {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
if (!file.getFileName().toString().endsWith(".enso")) {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
return dir.toPath();
|
||||
final String name = file.toFile().getPath().substring(dir.toFile().getPath().length() + 1);
|
||||
suite.addTest(new ParseStdLibTest(name, file.toFile(), dump));
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
}
|
||||
Files.walkFileTree(dir, new CollectSuites(s));
|
||||
}
|
||||
|
||||
private static void collectDistribution(TestSuite s, String name) throws Exception {
|
||||
var dir = locateDistribution(name);
|
||||
var dump = new Dump();
|
||||
class CollectSuites implements FileVisitor<Path> {
|
||||
@SuppressWarnings("unchecked")
|
||||
private void parseTest(Source src, boolean generate) throws IOException {
|
||||
var ir = ensoCompiler.compile(src);
|
||||
assertNotNull("IR was generated", ir);
|
||||
|
||||
private final TestSuite suite;
|
||||
var oldAst = new Parser().runWithIds(src.getCharacters().toString());
|
||||
var oldIr = AstToIr.translate((ASTOf<Shape>) (Object) oldAst);
|
||||
|
||||
CollectSuites(TestSuite suite) {
|
||||
this.suite = suite;
|
||||
}
|
||||
Function<IR, String> filter = EnsoCompilerTest::simplifyIR;
|
||||
|
||||
@Override
|
||||
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
if (!file.getFileName().toString().endsWith(".enso")) {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
final String name = file.toFile().getPath().substring(dir.toFile().getPath().length() + 1);
|
||||
suite.addTest(new ParseStdLibTest(name, file.toFile(), dump));
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
}
|
||||
Files.walkFileTree(dir, new CollectSuites(s));
|
||||
var old = filter.apply(oldIr);
|
||||
var now = filter.apply(ir);
|
||||
if (!old.equals(now)) {
|
||||
if (generate) {
|
||||
dump.dump(where, old, now);
|
||||
} else {
|
||||
fail("IR for " + where.getName() + " shall be equal");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void parseTest(Source src, boolean generate) throws IOException {
|
||||
var ir = ensoCompiler.compile(src);
|
||||
assertNotNull("IR was generated", ir);
|
||||
|
||||
var oldAst = new Parser().runWithIds(src.getCharacters().toString());
|
||||
var oldIr = AstToIr.translate((ASTOf<Shape>) (Object) oldAst);
|
||||
|
||||
Function<IR, String> filter = (i) -> {
|
||||
var txt = i.pretty().replaceAll("id = [0-9a-f\\-]*", "id = _");
|
||||
for (;;) {
|
||||
final String pref = "IdentifiedLocation(";
|
||||
int at = txt.indexOf(pref);
|
||||
if (at == -1) {
|
||||
break;
|
||||
}
|
||||
int to = at + pref.length();
|
||||
int depth = 1;
|
||||
while (depth > 0) {
|
||||
switch (txt.charAt(to)) {
|
||||
case '(' ->
|
||||
depth++;
|
||||
case ')' ->
|
||||
depth--;
|
||||
}
|
||||
to++;
|
||||
}
|
||||
txt = txt.substring(0, at) + "IdentifiedLocation[_]" + txt.substring(to);
|
||||
}
|
||||
return txt;
|
||||
};
|
||||
|
||||
var old = filter.apply(oldIr);
|
||||
var now = filter.apply(ir);
|
||||
if (!old.equals(now)) {
|
||||
if (generate) {
|
||||
dump.dump(where, old, now);
|
||||
} else {
|
||||
fail("IR for " + where.getName() + " shall be equal");
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public void runBare() throws Throwable {
|
||||
if (where == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void runBare() throws Throwable {
|
||||
if (where == null) {
|
||||
return;
|
||||
}
|
||||
var code = Files.readString(where.toPath());
|
||||
var src = Source.newBuilder("enso", code, getName())
|
||||
.uri(where.toURI())
|
||||
.build();
|
||||
if (isKnownToWork(getName())) {
|
||||
parseTest(src, true);
|
||||
} else {
|
||||
try {
|
||||
parseTest(src, false);
|
||||
} catch (Exception | Error e) {
|
||||
// OK
|
||||
return;
|
||||
}
|
||||
fail("This test isn't known to work!");
|
||||
}
|
||||
var code = Files.readString(where.toPath());
|
||||
var src = Source.newBuilder("enso", code, getName()).uri(where.toURI()).build();
|
||||
if (isKnownToWork(getName())) {
|
||||
parseTest(src, true);
|
||||
} else {
|
||||
try {
|
||||
parseTest(src, false);
|
||||
} catch (Exception | Error e) {
|
||||
// OK
|
||||
return;
|
||||
}
|
||||
fail("This test isn't known to work!");
|
||||
}
|
||||
}
|
||||
|
||||
private static final Set<String> KNOWN_TO_FAIL;
|
||||
static {
|
||||
KNOWN_TO_FAIL = new HashSet<>();
|
||||
KNOWN_TO_FAIL.addAll(Arrays.asList(
|
||||
"Data/Filter_Condition.enso",
|
||||
private static final Set<String> SHOULD_FAIL;
|
||||
|
||||
static {
|
||||
SHOULD_FAIL = new HashSet<>();
|
||||
SHOULD_FAIL.addAll(
|
||||
Arrays.asList(
|
||||
// Files containing type expressions not supported by old parser.
|
||||
"Data/Index_Sub_Range.enso",
|
||||
"Data/Interval/Bound.enso",
|
||||
"Data/Interval.enso",
|
||||
"Data/Json.enso",
|
||||
"Data/Json/Internal.enso",
|
||||
"Data/List.enso",
|
||||
"Data/Locale.enso",
|
||||
"Data/Map.enso",
|
||||
"Data/Maybe.enso",
|
||||
"Data/Numbers.enso",
|
||||
"Data/Ordering.enso",
|
||||
"Data/Ordering/Sort_Direction.enso",
|
||||
"Data/Pair.enso",
|
||||
"Data/Range.enso",
|
||||
"Data/Regression.enso",
|
||||
"Data/Statistics.enso",
|
||||
"Data/Statistics/Rank_Method.enso",
|
||||
"Data/Text/Case.enso",
|
||||
"Data/Text/Case_Sensitivity.enso",
|
||||
"Data/Text/Encoding.enso",
|
||||
"Data/Text/Extensions.enso",
|
||||
"Data/Text/Line_Ending_Style.enso",
|
||||
"Data/Text/Matching.enso",
|
||||
"Data/Text/Regex/Engine/Default.enso",
|
||||
"Data/Text/Regex/Engine.enso",
|
||||
"Data/Text/Regex.enso",
|
||||
"Data/Text/Regex_Matcher.enso",
|
||||
"Data/Text/Regex/Option.enso",
|
||||
"Data/Text/Regex/Regex_Mode.enso",
|
||||
"Data/Text/Span.enso",
|
||||
"Data/Text/Text_Matcher.enso",
|
||||
"Data/Text/Text_Ordering.enso",
|
||||
"Data/Text/Text_Sub_Range.enso",
|
||||
"Data/Time/Date.enso",
|
||||
"Data/Time/Date_Period.enso",
|
||||
"Data/Time/Date_Time.enso",
|
||||
"Data/Time/Duration.enso",
|
||||
"Data/Time/Period.enso",
|
||||
"Data/Time/Time_Of_Day.enso",
|
||||
"Data/Time/Time_Zone.enso",
|
||||
"Data/Vector.enso",
|
||||
"Error/Common.enso",
|
||||
"Error/Problem_Behavior.enso",
|
||||
"Function.enso",
|
||||
"Main.enso",
|
||||
"Meta.enso",
|
||||
"Meta/Enso_Project.enso",
|
||||
"Network/Http.enso",
|
||||
"Network/Http/Form.enso",
|
||||
"Network/Http/Header.enso",
|
||||
"Network/Http/Method.enso",
|
||||
"Network/Http/Request/Body.enso",
|
||||
"Network/Http/Request.enso",
|
||||
"Network/Http/Response/Body.enso",
|
||||
"Network/Http/Response.enso",
|
||||
"Network/Http/Status_Code.enso",
|
||||
"Network/Http/Version.enso",
|
||||
"Network/Proxy.enso",
|
||||
"Network/URI.enso",
|
||||
"Random.enso",
|
||||
"Runtime.enso",
|
||||
"Runtime/Extensions.enso",
|
||||
"System/File.enso",
|
||||
"System/File/Existing_File_Behavior.enso",
|
||||
"System/File/File_Permissions.enso",
|
||||
"System/File/Option.enso",
|
||||
"System/Platform.enso",
|
||||
"System/Process.enso",
|
||||
"System/Process/Exit_Code.enso",
|
||||
"Warning.enso"
|
||||
));
|
||||
}
|
||||
private static boolean isKnownToWork(String name) {
|
||||
return !KNOWN_TO_FAIL.contains(name);
|
||||
}
|
||||
"Internal/Base_Generator.enso",
|
||||
"Data/Sort_Column_Selector.enso",
|
||||
"Data/Value_Type.enso"));
|
||||
}
|
||||
|
||||
private static final class Dump {
|
||||
private boolean first = true;
|
||||
private static boolean isKnownToWork(String name) {
|
||||
return !SHOULD_FAIL.contains(name);
|
||||
}
|
||||
|
||||
public void dump(File where, CharSequence old, CharSequence now) throws IOException {
|
||||
var name = where.getName();
|
||||
var result = where.getParentFile().toPath();
|
||||
final Path oldPath = result.resolve(name + ".old");
|
||||
Files.writeString(oldPath, old, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE, StandardOpenOption.WRITE);
|
||||
final Path nowPath = result.resolve(name + ".now");
|
||||
Files.writeString(nowPath, now, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE, StandardOpenOption.WRITE);
|
||||
if (first) {
|
||||
first = false;
|
||||
fail("IR for " + where.getName() + " shall be equal:\n$ diff -u '" + oldPath + "' '" + nowPath + "'\n ===== Old =====\n" + old + "\n===== Now =====\n" + now);
|
||||
}
|
||||
fail("IR for " + where.getName() + " shall be equal:\n$ diff -u '" + oldPath + "' '" + nowPath + "'");
|
||||
}
|
||||
private static final class Dump {
|
||||
private boolean first = true;
|
||||
|
||||
public void dump(File where, CharSequence old, CharSequence now) throws IOException {
|
||||
var name = where.getName();
|
||||
var result = where.getParentFile().toPath();
|
||||
final Path oldPath = result.resolve(name + ".old");
|
||||
Files.writeString(
|
||||
oldPath,
|
||||
old,
|
||||
StandardOpenOption.TRUNCATE_EXISTING,
|
||||
StandardOpenOption.CREATE,
|
||||
StandardOpenOption.WRITE);
|
||||
final Path nowPath = result.resolve(name + ".now");
|
||||
Files.writeString(
|
||||
nowPath,
|
||||
now,
|
||||
StandardOpenOption.TRUNCATE_EXISTING,
|
||||
StandardOpenOption.CREATE,
|
||||
StandardOpenOption.WRITE);
|
||||
if (first) {
|
||||
first = false;
|
||||
fail(
|
||||
"IR for "
|
||||
+ where.getName()
|
||||
+ " shall be equal:\n$ diff -u '"
|
||||
+ oldPath
|
||||
+ "' '"
|
||||
+ nowPath
|
||||
+ "'\n ===== Old =====\n"
|
||||
+ old
|
||||
+ "\n===== Now =====\n"
|
||||
+ now);
|
||||
}
|
||||
fail(
|
||||
"IR for "
|
||||
+ where.getName()
|
||||
+ " shall be equal:\n$ diff -u '"
|
||||
+ oldPath
|
||||
+ "' '"
|
||||
+ nowPath
|
||||
+ "'");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -119,9 +119,13 @@ impl FromMeta {
|
||||
if *hide {
|
||||
field.hide_in_tostring();
|
||||
} else {
|
||||
let mut getter_name = meta::FieldName::from_snake_case("get");
|
||||
getter_name.append(name.clone());
|
||||
let getter_name = getter_name.to_camel_case().unwrap();
|
||||
let getter_name = if name.as_identifier().to_snake_case().starts_with("is_") {
|
||||
name.to_camel_case().unwrap()
|
||||
} else {
|
||||
let mut getter = meta::FieldName::from_snake_case("get");
|
||||
getter.append(name.clone());
|
||||
getter.to_camel_case().unwrap()
|
||||
};
|
||||
methods.push(Method::Dynamic(Dynamic::GetterNamed(field.id(), getter_name)));
|
||||
}
|
||||
fields.push(field);
|
||||
|
@ -310,6 +310,10 @@ impl FieldName {
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
/// Return a reference to the underlying identifier.
|
||||
pub fn as_identifier(&self) -> &Identifier {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
238
lib/rust/parser/debug/LoadParser/LoadParser.java
Normal file
238
lib/rust/parser/debug/LoadParser/LoadParser.java
Normal file
@ -0,0 +1,238 @@
|
||||
package org.enso.checkparser;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.FileVisitor;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import org.enso.compiler.EnsoCompiler;
|
||||
import org.enso.compiler.codegen.AstToIr;
|
||||
import org.enso.compiler.core.IR;
|
||||
import org.enso.compiler.core.IR$Comment$Documentation;
|
||||
import org.enso.compiler.core.IR$Module$Scope$Definition;
|
||||
import org.enso.compiler.core.IR$Type$Ascription;
|
||||
import org.enso.syntax.text.AST;
|
||||
import org.enso.syntax.text.Shape;
|
||||
import org.enso.syntax2.Parser;
|
||||
import org.enso.syntax2.Tree;
|
||||
import org.graalvm.polyglot.Source;
|
||||
import scala.Function1;
|
||||
import scala.collection.immutable.List;
|
||||
|
||||
class LoadParser implements FileVisitor<Path>, AutoCloseable {
|
||||
private final File root;
|
||||
private final Parser parser;
|
||||
private final EnsoCompiler compiler;
|
||||
private final Set<Path> visited = new LinkedHashSet<>();
|
||||
private final Map<Path,Exception> failed = new LinkedHashMap<>();
|
||||
private final Set<Path> irTested = new LinkedHashSet<>();
|
||||
private final Map<Path,Exception> irFailed = new LinkedHashMap<>();
|
||||
private final Set<Path> irDiff = new LinkedHashSet<>();
|
||||
|
||||
private LoadParser(File root) {
|
||||
this.parser = Parser.create();
|
||||
this.compiler = new EnsoCompiler();
|
||||
this.root = root;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws Exception {
|
||||
parser.close();
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
var root = new File(".").getAbsoluteFile();
|
||||
try (LoadParser checker = new LoadParser(root)) {
|
||||
checker.scan("distribution");
|
||||
checker.scan("test");
|
||||
|
||||
checker.printSummary(true);
|
||||
}
|
||||
}
|
||||
|
||||
private void scan(String path) throws IOException {
|
||||
var dir = root.toPath().resolve(path);
|
||||
assert Files.isDirectory(dir) : "isDirectory: " + dir;
|
||||
|
||||
Files.walkFileTree(dir, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
private static Exception newExceptionNoStack(String msg) {
|
||||
var ex = new Exception(msg);
|
||||
ex.setStackTrace(new StackTraceElement[0]);
|
||||
return ex;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
if (!file.getFileName().toString().endsWith(".enso")) {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
visited.add(file);
|
||||
|
||||
System.err.println("processing " + file);
|
||||
Source src = Source.newBuilder("enso", file.toFile()).build();
|
||||
TEST: try {
|
||||
Tree tree = parser.parse(src.getCharacters().toString());
|
||||
if (tree == null) {
|
||||
failed.put(file, newExceptionNoStack("Rust failed"));
|
||||
} else {
|
||||
IR.Module ir;
|
||||
try {
|
||||
irTested.add(file);
|
||||
IR.Module m = compiler.generateIR(tree);
|
||||
if (m == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
ir = sanitize(m);
|
||||
} catch (Exception ex) {
|
||||
if (ex.getClass().getName().contains("UnhandledEntity")) {
|
||||
if (ex.getMessage().contains("= Invalid[")) {
|
||||
failed.put(file, newExceptionNoStack("Rust produces Invalid AST"));
|
||||
break TEST;
|
||||
}
|
||||
if (ex.getMessage().contains("translateCaseBranch = Case[null, null")) {
|
||||
failed.put(file, newExceptionNoStack("Rust provides null case"));
|
||||
break TEST;
|
||||
}
|
||||
}
|
||||
irFailed.put(file, ex);
|
||||
break TEST;
|
||||
}
|
||||
|
||||
var oldAst = new org.enso.syntax.text.Parser().runWithIds(src.getCharacters().toString());
|
||||
var oldIr = sanitize(AstToIr.translate((AST.ASTOf<Shape>)(Object)oldAst));
|
||||
|
||||
Function<IR, String> filter = (i) -> {
|
||||
var txt = i.pretty().replaceAll("id = [0-9a-f\\-]*", "id = _");
|
||||
for (;;) {
|
||||
final String pref = "IdentifiedLocation(";
|
||||
int at = txt.indexOf(pref);
|
||||
if (at == -1) {
|
||||
break;
|
||||
}
|
||||
int to = at + pref.length();
|
||||
int depth = 1;
|
||||
while (depth > 0) {
|
||||
switch (txt.charAt(to)) {
|
||||
case '(': depth++; break;
|
||||
case ')': depth--; break;
|
||||
}
|
||||
to++;
|
||||
}
|
||||
txt = txt.substring(0, at) + "IdentifiedLocation[_]" + txt.substring(to);
|
||||
}
|
||||
var sb = new StringBuilder();
|
||||
for (String l : txt.split("\n")) {
|
||||
final String pref = "IR.Comment.Documentation";
|
||||
if (l.contains(pref)) {
|
||||
continue;
|
||||
}
|
||||
sb.append(l).append("\n");
|
||||
}
|
||||
return sb.toString();
|
||||
};
|
||||
|
||||
var old = filter.apply(oldIr);
|
||||
var now = filter.apply(ir);
|
||||
if (!old.equals(now)) {
|
||||
irDiff.add(file);
|
||||
var oldFile = file.getParent().resolve(file.getFileName() + ".old");
|
||||
var nowFile = file.getParent().resolve(file.getFileName() + ".now");
|
||||
System.err.println("difference1: " + oldFile);
|
||||
System.err.println("difference2: " + nowFile);
|
||||
Files.writeString(oldFile , old, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE, StandardOpenOption.WRITE);
|
||||
Files.writeString(nowFile, now, StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE, StandardOpenOption.WRITE);
|
||||
}
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
failed.put(file, ex);
|
||||
System.err.println("failed " + file);
|
||||
}
|
||||
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
|
||||
visited.add(file);
|
||||
failed.put(file, exc);
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
private void printSummary(boolean verbose) {
|
||||
if (verbose) {
|
||||
for (var en : failed.entrySet()) {
|
||||
var key = en.getKey();
|
||||
var value = en.getValue();
|
||||
System.err.println("Problem " + key);
|
||||
value.printStackTrace();
|
||||
}
|
||||
for (var en : irFailed.entrySet()) {
|
||||
var key = en.getKey();
|
||||
var value = en.getValue();
|
||||
System.err.println("File " + key);
|
||||
value.printStackTrace();
|
||||
}
|
||||
}
|
||||
System.out.println("Found " + visited.size() + " files. " + failed.size() + " failed to parse");
|
||||
System.out.println("From " + irTested.size() + " files " + irFailed.size() + " failed to produce IR");
|
||||
System.out.println("From " + (irTested.size() - irFailed.size()) + " files " + irDiff.size() + " have different IR");
|
||||
}
|
||||
|
||||
private static IR.Module sanitize(IR.Module m) {
|
||||
class NoComments implements Function1<IR.Expression, IR.Expression> {
|
||||
@Override
|
||||
public IR.Expression apply(IR.Expression exp) {
|
||||
if (exp == null) {
|
||||
return null;
|
||||
}
|
||||
if (exp instanceof IR$Comment$Documentation) {
|
||||
return null;
|
||||
}
|
||||
return exp.mapExpressions(this);
|
||||
}
|
||||
}
|
||||
class NoCommentsInBindings implements Function1<IR$Module$Scope$Definition, Boolean> {
|
||||
@Override
|
||||
public Boolean apply(IR$Module$Scope$Definition exp) {
|
||||
if (exp instanceof IR$Comment$Documentation) {
|
||||
return false;
|
||||
} else if (exp instanceof IR$Type$Ascription) {
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
var m1 = m.mapExpressions(new NoComments());
|
||||
var m2 = m1.copy(
|
||||
m1.copy$default$1(),
|
||||
m1.copy$default$2(),
|
||||
(List<IR$Module$Scope$Definition>) m1.bindings().filter(new NoCommentsInBindings()),
|
||||
m1.copy$default$4(),
|
||||
m1.copy$default$5(),
|
||||
m1.copy$default$6(),
|
||||
m1.copy$default$7()
|
||||
);
|
||||
return m2;
|
||||
}
|
||||
}
|
12
lib/rust/parser/debug/LoadParser/LoadParser.sh
Executable file
12
lib/rust/parser/debug/LoadParser/LoadParser.sh
Executable file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash -x
|
||||
|
||||
set -e
|
||||
|
||||
GRAALVM=$HOME/bin/graalvm/
|
||||
|
||||
# build runtime.jar including the new parser classes
|
||||
sbt --java-home $GRAALVM bootstrap
|
||||
sbt --java-home $GRAALVM buildEngineDistribution
|
||||
|
||||
# run test: parser all .enso files in the repository
|
||||
$GRAALVM/bin/java -cp runtime.jar lib/rust/parser/debug/LoadParser/LoadParser.java
|
@ -34,6 +34,12 @@ macro_rules! block {
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! test {
|
||||
( $code:expr, $($statements:tt)* ) => {
|
||||
test($code, block![$( $statements )*]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
// =============
|
||||
@ -117,9 +123,9 @@ fn doc_comments() {
|
||||
#[rustfmt::skip]
|
||||
test(&lines.join("\n"), block![
|
||||
(Documented
|
||||
(#((Section " The Identity Function\n")
|
||||
(#((Section " The Identity Function") (Section "\n")
|
||||
(Section "\n")
|
||||
(Section "Arguments:\n")
|
||||
(Section "Arguments:") (Section "\n")
|
||||
(Section "- x: value to do nothing to"))
|
||||
#(()))
|
||||
(Function (Ident id) #((() (Ident x) () ())) "=" (Ident x)))]);
|
||||
@ -207,14 +213,18 @@ fn type_operator_methods() {
|
||||
"type Foo",
|
||||
" + : Foo -> Foo -> Foo",
|
||||
" + self b = b",
|
||||
" Foo.+ : Foo",
|
||||
" Foo.+ self b = b",
|
||||
];
|
||||
#[rustfmt::skip]
|
||||
let expected = block![
|
||||
(TypeDef type Foo #() #()
|
||||
#((OperatorTypeSignature "+" ":"
|
||||
#((TypeSignature (Ident #"+") ":"
|
||||
(OprApp (Ident Foo) (Ok "->") (OprApp (Ident Foo) (Ok "->") (Ident Foo))))
|
||||
(OperatorFunction "+" #((() (Ident self) () ()) (() (Ident b) () ()))
|
||||
"=" (Ident b))))];
|
||||
(Function (Ident #"+") #((() (Ident self) () ()) (() (Ident b) () ())) "=" (Ident b))
|
||||
(TypeSignature (OprApp (Ident Foo) (Ok ".") (Ident #"+")) ":" (Ident Foo))
|
||||
(Function (OprApp (Ident Foo) (Ok ".") (Ident #"+"))
|
||||
#((() (Ident self) () ()) (() (Ident b) () ())) "=" (Ident b))))];
|
||||
test(&code.join("\n"), expected);
|
||||
}
|
||||
|
||||
@ -327,6 +337,20 @@ fn function_qualified() {
|
||||
(Function (OprApp (Ident Id) (Ok ".") (Ident id)) #((() (Ident x) () ())) "=" (Ident x))]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignored_arguments() {
|
||||
test!("f ~_ = x", (Function (Ident f) #(("~" (Wildcard -1) () ())) "=" (Ident x)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn foreign_functions() {
|
||||
test!("foreign python my_method a b = \"42\"",
|
||||
(ForeignFunction foreign python my_method
|
||||
#((() (Ident a) () ()) (() (Ident b) () ()))
|
||||
"="
|
||||
(TextLiteral #((Section "42")))));
|
||||
}
|
||||
|
||||
|
||||
// === Named arguments ===
|
||||
|
||||
@ -344,8 +368,15 @@ fn named_arguments() {
|
||||
|
||||
#[test]
|
||||
fn default_app() {
|
||||
let cases = [("f default", block![(DefaultApp (Ident f) default)])];
|
||||
cases.into_iter().for_each(|(code, expected)| test(code, expected));
|
||||
test!("f default", (DefaultApp (Ident f) default));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn argument_named_default() {
|
||||
test!("f default x = x",
|
||||
(Function (Ident f) #((() (Ident default) () ()) (() (Ident x) () ())) "=" (Ident x)));
|
||||
test!("f x default = x",
|
||||
(Function (Ident f) #((() (Ident x) () ()) (() (Ident default) () ())) "=" (Ident x)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -546,24 +577,24 @@ fn precedence() {
|
||||
(OprApp (Ident y) (Ok "*") (Ident z)))]),
|
||||
];
|
||||
cases.into_iter().for_each(|(code, expected)| test(code, expected));
|
||||
test!("x - 1 + 2",
|
||||
(OprApp (OprApp (Ident x) (Ok "-") (Number () "1" ())) (Ok "+") (Number () "2" ())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dot_operator_precedence() {
|
||||
test!("x y . f v", (App (OprApp (App (Ident x) (Ident y)) (Ok ".") (Ident f)) (Ident v)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn right_associative_operators() {
|
||||
let code = ["x --> y ---> z"];
|
||||
let expected = block![
|
||||
(OprApp (Ident x) (Ok "-->") (OprApp (Ident y) (Ok "--->") (Ident z)))
|
||||
];
|
||||
test(&code.join("\n"), expected);
|
||||
test!("x --> y ---> z", (OprApp (Ident x) (Ok "-->") (OprApp (Ident y) (Ok "--->") (Ident z))));
|
||||
test!("x <| y <<| z", (OprApp (Ident x) (Ok "<|") (OprApp (Ident y) (Ok "<<|") (Ident z))));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn left_associative_operators() {
|
||||
let code = ["x + y + z"];
|
||||
let expected = block![
|
||||
(OprApp (OprApp (Ident x) (Ok "+") (Ident y)) (Ok "+") (Ident z))
|
||||
];
|
||||
test(&code.join("\n"), expected);
|
||||
test!("x + y + z", (OprApp (OprApp (Ident x) (Ok "+") (Ident y)) (Ok "+") (Ident z)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -691,19 +722,15 @@ fn minus_section() {
|
||||
|
||||
#[test]
|
||||
fn minus_unary() {
|
||||
#[rustfmt::skip]
|
||||
let cases = [
|
||||
("f -x", block![(App (Ident f) (UnaryOprApp "-" (Ident x)))]),
|
||||
("-x", block![(UnaryOprApp "-" (Ident x))]),
|
||||
("(-x)", block![(Group (UnaryOprApp "-" (Ident x)))]),
|
||||
("-(x * x)", block![
|
||||
(UnaryOprApp "-" (Group (OprApp (Ident x) (Ok "*") (Ident x))))]),
|
||||
("x=-x", block![(Assignment (Ident x) "=" (UnaryOprApp "-" (Ident x)))]),
|
||||
("-x+x", block![(OprApp (UnaryOprApp "-" (Ident x)) (Ok "+") (Ident x))]),
|
||||
("-x*x", block![(OprApp (UnaryOprApp "-" (Ident x)) (Ok "*") (Ident x))]),
|
||||
("-1.x", block![(OprApp (UnaryOprApp "-" (Number () "1" ())) (Ok ".") (Ident x))]),
|
||||
];
|
||||
cases.into_iter().for_each(|(code, expected)| test(code, expected));
|
||||
test!("f -x", (App (Ident f) (UnaryOprApp "-" (Ident x))));
|
||||
test!("-x", (UnaryOprApp "-" (Ident x)));
|
||||
test!("(-x)", (Group (UnaryOprApp "-" (Ident x))));
|
||||
test!("-(x * x)", (UnaryOprApp "-" (Group (OprApp (Ident x) (Ok "*") (Ident x)))));
|
||||
test!("x=-x", (Assignment (Ident x) "=" (UnaryOprApp "-" (Ident x))));
|
||||
test!("-x+x", (OprApp (UnaryOprApp "-" (Ident x)) (Ok "+") (Ident x)));
|
||||
test!("-x*x", (OprApp (UnaryOprApp "-" (Ident x)) (Ok "*") (Ident x)));
|
||||
test!("-2.1", (UnaryOprApp "-" (Number () "2" ("." "1"))));
|
||||
//test!("-1.x", (OprApp (UnaryOprApp "-" (Number () "1" ())) (Ok ".") (Ident x)));
|
||||
}
|
||||
|
||||
|
||||
@ -749,6 +776,11 @@ fn import() {
|
||||
()
|
||||
((Ident as) (Ident Java_URI))
|
||||
())]),
|
||||
("from Standard.Base import Foo, Bar, Baz", block![
|
||||
(Import ()
|
||||
((Ident from) (OprApp (Ident Standard) (Ok ".") (Ident Base)))
|
||||
((Ident import) (OprApp (OprApp (Ident Foo) (Ok ",") (Ident Bar)) (Ok ",") (Ident Baz)))
|
||||
() () ())]),
|
||||
];
|
||||
cases.into_iter().for_each(|(code, expected)| test(code, expected));
|
||||
test_invalid("from Standard.Base.Data.Array import new as array_new");
|
||||
@ -890,11 +922,12 @@ x"#;
|
||||
#[rustfmt::skip]
|
||||
let expected = block![
|
||||
(TextLiteral
|
||||
#((Section "part of the string\n")
|
||||
(Section " 3-spaces indented line, part of the Text Block\n")
|
||||
(Section "this does not end the string -> '''\n")
|
||||
#((Section "part of the string") (Section "\n")
|
||||
(Section " 3-spaces indented line, part of the Text Block") (Section "\n")
|
||||
(Section "this does not end the string -> '''") (Section "\n")
|
||||
(Section "\n")
|
||||
(Section "`also` part of the string\n")))
|
||||
(Section "`also` part of the string")))
|
||||
()
|
||||
(Ident x)
|
||||
];
|
||||
test(code, expected);
|
||||
@ -907,6 +940,7 @@ x"#;
|
||||
(Ident x)
|
||||
];
|
||||
test(code, expected);
|
||||
|
||||
let code = " x = \"\"\"\n Indented multiline\n x";
|
||||
#[rustfmt::skip]
|
||||
let expected = block![
|
||||
@ -915,11 +949,7 @@ x"#;
|
||||
];
|
||||
test(code, expected);
|
||||
let code = "'''\n \\nEscape at start\n";
|
||||
#[rustfmt::skip]
|
||||
let expected = block![
|
||||
(TextLiteral #((Escape '\n') (Section "Escape at start\n")))
|
||||
];
|
||||
test(code, expected);
|
||||
test!(code, (TextLiteral #((Escape '\n') (Section "Escape at start"))) ());
|
||||
let code = "x =\n x = '''\n x\nx";
|
||||
#[rustfmt::skip]
|
||||
let expected = block![
|
||||
@ -928,6 +958,11 @@ x"#;
|
||||
(Ident x)
|
||||
];
|
||||
test(code, expected);
|
||||
test!("foo = bar '''\n baz",
|
||||
(Assignment (Ident foo) "=" (App (Ident bar) (TextLiteral #((Section "baz"))))));
|
||||
test!("'''\n \\t'", (TextLiteral #((Escape '\t') (Section "'"))));
|
||||
test!("'''\n x\n \\t'",
|
||||
(TextLiteral #((Section "x") (Section "\n") (Escape '\t') (Section "'"))));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1136,18 +1171,30 @@ fn tuple_literals() {
|
||||
|
||||
#[test]
|
||||
fn numbers() {
|
||||
let cases = [
|
||||
("100_000", block![(Number () "100_000" ())]),
|
||||
("10_000.99", block![(Number () "10_000" ("." "99"))]),
|
||||
("1 . 0", block![(OprApp (Number () "1" ()) (Ok ".") (Number () "0" ()))]),
|
||||
("1 .0", block![(App (Number () "1" ()) (OprSectionBoundary 1 (OprApp () (Ok ".") (Number () "0" ()))))]),
|
||||
("1. 0", block![(OprSectionBoundary 1 (App (OprApp (Number () "1" ()) (Ok ".") ()) (Number () "0" ())))]),
|
||||
("0b10101010", block![(Number "0b" "10101010" ())]),
|
||||
("0o122137", block![(Number "0o" "122137" ())]),
|
||||
("0xAE2F14", block![(Number "0x" "AE2F14" ())]),
|
||||
("pi = 3.14", block![(Assignment (Ident pi) "=" (Number () "3" ("." "14")))])
|
||||
];
|
||||
cases.into_iter().for_each(|(code, expected)| test(code, expected));
|
||||
test!("1 . 0", (OprApp (Number () "1" ()) (Ok ".") (Number () "0" ())));
|
||||
test!("1 .0",
|
||||
(App (Number () "1" ()) (OprSectionBoundary 1 (OprApp () (Ok ".") (Number () "0" ())))));
|
||||
test!("1. 0",
|
||||
(OprSectionBoundary 1 (App (OprApp (Number () "1" ()) (Ok ".") ()) (Number () "0" ()))));
|
||||
test!("0b10101010", (Number "0b" "10101010" ()));
|
||||
test!("0o122137", (Number "0o" "122137" ()));
|
||||
test!("0xAE2F14", (Number "0x" "AE2F14" ()));
|
||||
test!("pi = 3.14", (Assignment (Ident pi) "=" (Number () "3" ("." "14"))));
|
||||
}
|
||||
|
||||
#[test]
|
||||
// This syntax cannot be used until we remove old-nondecimal number support, which is
|
||||
// needed for compatibility until the old parser is fully replaced.
|
||||
#[ignore]
|
||||
fn new_delimited_numbers() {
|
||||
test!("100_000", (Number () "100_000" ()));
|
||||
test!("10_000.99", (Number () "10_000" ("." "99")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn old_nondecimal_numbers() {
|
||||
test!("2_01101101", (Number "2_" "01101101" ()));
|
||||
test!("16_17ffffffffffffffa", (Number "16_" "17ffffffffffffffa" ()));
|
||||
}
|
||||
|
||||
|
||||
|
@ -19,6 +19,9 @@ use std::str;
|
||||
/// An optimization constant. Based on it, the estimated memory is allocated on the beginning of
|
||||
/// parsing.
|
||||
pub const AVERAGE_TOKEN_LEN: usize = 5;
|
||||
/// Within an indented text block, this sets the minimum whitespace to be trimmed from the start of
|
||||
/// each line.
|
||||
const MIN_TEXT_TRIM: VisibleOffset = VisibleOffset(4);
|
||||
|
||||
|
||||
|
||||
@ -108,7 +111,7 @@ pub enum State {
|
||||
/// Reading a multi-line text literal.
|
||||
MultilineText {
|
||||
/// Indentation level of the quote symbol introducing the block.
|
||||
quote_indent: VisibleOffset,
|
||||
block_indent: VisibleOffset,
|
||||
/// Indentation level of the first line of the block.
|
||||
initial_indent: Option<VisibleOffset>,
|
||||
},
|
||||
@ -543,10 +546,12 @@ impl token::Variant {
|
||||
#[inline(always)]
|
||||
pub fn new_ident_unchecked(repr: &str) -> token::variant::Ident {
|
||||
let info = IdentInfo::new(repr);
|
||||
let is_operator = false;
|
||||
token::variant::Ident(
|
||||
info.starts_with_underscore,
|
||||
info.lift_level,
|
||||
info.starts_with_uppercase,
|
||||
is_operator,
|
||||
info.is_default,
|
||||
)
|
||||
}
|
||||
@ -561,7 +566,8 @@ impl token::Variant {
|
||||
} else {
|
||||
let is_free = info.starts_with_underscore;
|
||||
let is_type = info.starts_with_uppercase;
|
||||
token::Variant::ident(is_free, info.lift_level, is_type, info.is_default)
|
||||
let is_operator = false;
|
||||
token::Variant::ident(is_free, info.lift_level, is_type, is_operator, info.is_default)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -660,8 +666,8 @@ fn analyze_operator(token: &str) -> token::OperatorProperties {
|
||||
.as_annotation(),
|
||||
"-" =>
|
||||
return operator
|
||||
.with_unary_prefix_mode(token::Precedence::max())
|
||||
.with_binary_infix_precedence(14),
|
||||
.with_unary_prefix_mode(token::Precedence::unary_minus())
|
||||
.with_binary_infix_precedence(15),
|
||||
// "There are a few operators with the lowest precedence possible."
|
||||
// - These 3 "consume everything to the right".
|
||||
"=" =>
|
||||
@ -684,12 +690,14 @@ fn analyze_operator(token: &str) -> token::OperatorProperties {
|
||||
.with_lhs_section_termination(operator::SectionTermination::Unwrap)
|
||||
.as_compile_time_operation()
|
||||
.as_arrow(),
|
||||
"|" | "\\\\" | "&" => return operator.with_binary_infix_precedence(4),
|
||||
"!" => return operator.with_binary_infix_precedence(3),
|
||||
"||" | "\\\\" | "&&" => return operator.with_binary_infix_precedence(4),
|
||||
">>" | "<<" => return operator.with_binary_infix_precedence(5),
|
||||
"|>" | "|>>" | "<|" | "<<|" => return operator.with_binary_infix_precedence(6),
|
||||
"|>" | "|>>" => return operator.with_binary_infix_precedence(6),
|
||||
"<|" | "<<|" => return operator.with_binary_infix_precedence(6).as_right_associative(),
|
||||
// Other special operators.
|
||||
"<=" | ">=" => return operator.with_binary_infix_precedence(14),
|
||||
"==" => return operator.with_binary_infix_precedence(1),
|
||||
"==" | "!=" => return operator.with_binary_infix_precedence(5),
|
||||
"," =>
|
||||
return operator
|
||||
.with_binary_infix_precedence(1)
|
||||
@ -697,7 +705,7 @@ fn analyze_operator(token: &str) -> token::OperatorProperties {
|
||||
.as_special()
|
||||
.as_sequence(),
|
||||
"." =>
|
||||
return operator.with_binary_infix_precedence(21).with_decimal_interpretation().as_dot(),
|
||||
return operator.with_binary_infix_precedence(80).with_decimal_interpretation().as_dot(),
|
||||
_ => (),
|
||||
}
|
||||
// "The precedence of all other operators is determined by the operator's Precedence Character:"
|
||||
@ -755,23 +763,54 @@ impl<'s> Lexer<'s> {
|
||||
fn number(&mut self) {
|
||||
let mut base = None;
|
||||
let token = self.token(|this| {
|
||||
while this.take_while_1(is_decimal_digit) {
|
||||
if this.current_char == Some('_') {
|
||||
let mut old_hex_chars_matched = 0;
|
||||
let mut old_bin_chars_matched = 0;
|
||||
let mut new_based_chars_matched = 0;
|
||||
match this.current_char {
|
||||
Some('0') => new_based_chars_matched = 1,
|
||||
Some('1') => old_hex_chars_matched = 1,
|
||||
Some('2') => old_bin_chars_matched = 1,
|
||||
Some(d) if is_decimal_digit(d) => (),
|
||||
_ => return,
|
||||
}
|
||||
this.next_input_char();
|
||||
let mut prev_was_underscore = false;
|
||||
match this.current_char {
|
||||
Some('_') if old_bin_chars_matched == 1 => base = Some(token::Base::Binary),
|
||||
Some('_') => prev_was_underscore = true,
|
||||
Some('b') if new_based_chars_matched == 1 => base = Some(token::Base::Binary),
|
||||
Some('o') if new_based_chars_matched == 1 => base = Some(token::Base::Octal),
|
||||
Some('x') if new_based_chars_matched == 1 => base = Some(token::Base::Hexadecimal),
|
||||
Some('6') if old_hex_chars_matched == 1 => old_hex_chars_matched = 2,
|
||||
Some(d) if is_decimal_digit(d) => (),
|
||||
_ => return,
|
||||
}
|
||||
this.next_input_char();
|
||||
if base.is_some() {
|
||||
return;
|
||||
}
|
||||
let mut was_underscore = false;
|
||||
match this.current_char {
|
||||
Some('_') if old_hex_chars_matched == 2 => {
|
||||
base = Some(token::Base::Hexadecimal);
|
||||
this.next_input_char();
|
||||
continue;
|
||||
return;
|
||||
}
|
||||
if this.current_offset == Bytes(1) {
|
||||
base = match this.current_char {
|
||||
Some('b') => Some(token::Base::Binary),
|
||||
Some('o') => Some(token::Base::Octal),
|
||||
Some('x') => Some(token::Base::Hexadecimal),
|
||||
_ => None,
|
||||
};
|
||||
if base.is_some() {
|
||||
this.next_input_char();
|
||||
return;
|
||||
}
|
||||
Some('_') if !prev_was_underscore => was_underscore = true,
|
||||
Some(d) if is_decimal_digit(d) => (),
|
||||
_ => return,
|
||||
}
|
||||
prev_was_underscore = was_underscore;
|
||||
this.next_input_char();
|
||||
loop {
|
||||
let mut was_underscore = false;
|
||||
match this.current_char {
|
||||
Some('_') if !prev_was_underscore => was_underscore = true,
|
||||
Some(d) if is_decimal_digit(d) => (),
|
||||
_ => return,
|
||||
}
|
||||
prev_was_underscore = was_underscore;
|
||||
this.next_input_char();
|
||||
}
|
||||
});
|
||||
if let Some(token) = token {
|
||||
@ -858,7 +897,7 @@ impl<'s> Lexer<'s> {
|
||||
fn multiline_text(
|
||||
&mut self,
|
||||
open_quote_start: (Bytes, Offset<'s>),
|
||||
quote_indent: VisibleOffset,
|
||||
block_indent: VisibleOffset,
|
||||
text_type: TextType,
|
||||
) {
|
||||
let open_quote_end = self.mark();
|
||||
@ -868,22 +907,19 @@ impl<'s> Lexer<'s> {
|
||||
let mut initial_indent = None;
|
||||
if text_type.expects_initial_newline() && let Some(newline) = self.line_break() {
|
||||
self.output.push(newline.with_variant(token::Variant::text_initial_newline()));
|
||||
if self.last_spaces_visible_offset > quote_indent {
|
||||
if self.last_spaces_visible_offset > block_indent {
|
||||
initial_indent = self.last_spaces_visible_offset.into();
|
||||
}
|
||||
}
|
||||
let text_start = self.mark();
|
||||
self.text_content(
|
||||
Some(text_start),
|
||||
None,
|
||||
text_type.is_interpolated(),
|
||||
State::MultilineText { quote_indent, initial_indent },
|
||||
);
|
||||
self.text_content(None, text_type.is_interpolated(), State::MultilineText {
|
||||
block_indent,
|
||||
initial_indent,
|
||||
});
|
||||
}
|
||||
|
||||
fn inline_quote(&mut self, quote_char: char, text_type: TextType) {
|
||||
let is_interpolated = text_type.is_interpolated();
|
||||
self.text_content(None, quote_char.into(), is_interpolated, State::InlineText);
|
||||
self.text_content(quote_char.into(), is_interpolated, State::InlineText);
|
||||
}
|
||||
|
||||
fn end_splice(&mut self, state: State) {
|
||||
@ -896,63 +932,86 @@ impl<'s> Lexer<'s> {
|
||||
match state {
|
||||
State::InlineText => self.inline_quote('\'', TextType::Interpolated),
|
||||
State::MultilineText { .. } => {
|
||||
self.text_content(None, None, true, state);
|
||||
self.text_content(None, true, state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn text_content(
|
||||
&mut self,
|
||||
start: Option<(Bytes, Offset<'s>)>,
|
||||
closing_char: Option<char>,
|
||||
interpolate: bool,
|
||||
mut state: State,
|
||||
) -> TextEndedAt {
|
||||
let mut text_start = start.unwrap_or_else(|| self.mark());
|
||||
let mut text_start = self.mark();
|
||||
let is_multiline = matches!(state, State::MultilineText { .. });
|
||||
while let Some(char) = self.current_char {
|
||||
if closing_char == Some(char) || (!is_multiline && is_newline_char(char)) {
|
||||
break;
|
||||
}
|
||||
let before_newline = self.mark();
|
||||
let mut newline = self.take_1('\r');
|
||||
newline = newline || self.take_1('\n');
|
||||
if newline && let State::MultilineText { quote_indent, initial_indent } = &mut state {
|
||||
let text_end = self.mark();
|
||||
if let Some(indent) = *initial_indent {
|
||||
self.spaces_after_lexeme_with_limit(indent);
|
||||
} else {
|
||||
self.spaces_after_lexeme();
|
||||
}
|
||||
if let Some(char) = self.current_char && !is_newline_char(char) {
|
||||
let block_indent = self.last_spaces_visible_offset;
|
||||
if block_indent <= *quote_indent {
|
||||
let token = self.make_token(
|
||||
text_start,
|
||||
before_newline.clone(),
|
||||
token::Variant::text_section(),
|
||||
);
|
||||
if !(token.code.is_empty() && token.left_offset.code.is_empty()) {
|
||||
self.output.push(token);
|
||||
}
|
||||
self.output.push(Token::from(token::text_end("", "")));
|
||||
self.end_blocks(block_indent);
|
||||
let token =
|
||||
self.make_token(before_newline, text_end, token::Variant::newline());
|
||||
if let State::MultilineText { block_indent, initial_indent } = &mut state {
|
||||
// Consume newlines and following whitespace until we encounter a line that, after
|
||||
// left-trimming, is not empty.
|
||||
//
|
||||
// Buffer the newline tokens, because whether they are interpreted as part of the
|
||||
// text content or code formatting after the block depends on whether non-empty text
|
||||
// lines follow.
|
||||
let mut newlines = vec![];
|
||||
let mut new_indent = None;
|
||||
loop {
|
||||
let mut before_newline = self.mark();
|
||||
if before_newline.0 == text_start.0 {
|
||||
before_newline = text_start.clone();
|
||||
}
|
||||
let mut newline = self.take_1('\r');
|
||||
newline = self.take_1('\n') || newline;
|
||||
if !newline {
|
||||
break;
|
||||
}
|
||||
let token = self.make_token(
|
||||
text_start.clone(),
|
||||
before_newline.clone(),
|
||||
token::Variant::text_section(),
|
||||
);
|
||||
if !(token.code.is_empty() && token.left_offset.code.is_empty()) {
|
||||
self.output.push(token);
|
||||
return TextEndedAt::End;
|
||||
} else {
|
||||
before_newline = text_start;
|
||||
}
|
||||
if initial_indent.is_none() {
|
||||
*initial_indent = block_indent.into();
|
||||
let newline_end = self.mark();
|
||||
let token =
|
||||
self.make_token(before_newline, newline_end, token::Variant::newline());
|
||||
newlines.push(token);
|
||||
if let Some(initial) = *initial_indent {
|
||||
let trim = std::cmp::max(initial, *block_indent + MIN_TEXT_TRIM);
|
||||
self.spaces_after_lexeme_with_limit(trim);
|
||||
} else {
|
||||
self.spaces_after_lexeme();
|
||||
}
|
||||
let new_indent_ = self.last_spaces_visible_offset;
|
||||
new_indent = new_indent_.into();
|
||||
if initial_indent.is_none() && new_indent_ > *block_indent {
|
||||
*initial_indent = new_indent_.into();
|
||||
}
|
||||
};
|
||||
let token =
|
||||
self.make_token(text_start.clone(), text_end.clone(), token::Variant::text_section());
|
||||
if !token.code.is_empty() {
|
||||
self.output.push(token);
|
||||
text_start = self.mark();
|
||||
}
|
||||
continue;
|
||||
if let Some(indent) = new_indent {
|
||||
if indent <= *block_indent {
|
||||
self.output.push(Token::from(token::text_end("", "")));
|
||||
self.end_blocks(indent);
|
||||
self.output.extend(newlines);
|
||||
if self.current_offset == text_start.0 {
|
||||
self.last_spaces_visible_offset = text_start.1.visible;
|
||||
self.last_spaces_offset = text_start.1.code.len();
|
||||
}
|
||||
return TextEndedAt::End;
|
||||
}
|
||||
let newlines = newlines
|
||||
.into_iter()
|
||||
.map(|token| token.with_variant(token::Variant::text_section()));
|
||||
self.output.extend(newlines);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if interpolate && char == '\\' {
|
||||
let mut backslash_start = self.mark();
|
||||
@ -1334,7 +1393,8 @@ pub mod test {
|
||||
let is_free = code.starts_with('_');
|
||||
let lift_level = code.chars().rev().take_while(|t| *t == '\'').count();
|
||||
let is_uppercase = code.chars().next().map(|c| c.is_uppercase()).unwrap_or_default();
|
||||
token::ident_(left_offset, code, is_free, lift_level, is_uppercase, false)
|
||||
let is_operator = false;
|
||||
token::ident_(left_offset, code, is_free, lift_level, is_uppercase, is_operator, false)
|
||||
}
|
||||
|
||||
/// Constructor.
|
||||
|
@ -214,16 +214,6 @@ fn expression_to_statement(mut tree: syntax::Tree<'_>) -> syntax::Tree<'_> {
|
||||
let colon = annotated.operator;
|
||||
let type_ = annotated.type_;
|
||||
let variable = annotated.expression;
|
||||
if let Tree {
|
||||
variant: box Variant::OprApp(OprApp { lhs: None, opr: Ok(name), rhs: None }),
|
||||
span: inner,
|
||||
} = variable
|
||||
{
|
||||
let mut tree = Tree::operator_type_signature(name, colon, type_);
|
||||
tree.span.left_offset += span.left_offset;
|
||||
tree.span.left_offset += inner.left_offset;
|
||||
return tree;
|
||||
}
|
||||
let mut tree = Tree::type_signature(variable, colon, type_);
|
||||
tree.span.left_offset += span.left_offset;
|
||||
return tree;
|
||||
@ -237,15 +227,6 @@ fn expression_to_statement(mut tree: syntax::Tree<'_>) -> syntax::Tree<'_> {
|
||||
_ => return tree,
|
||||
};
|
||||
if let OprApp { lhs: Some(lhs), opr: Ok(opr), rhs } = opr_app && opr.properties.is_assignment() {
|
||||
if let Tree { variant: box Variant::OprApp(
|
||||
OprApp { lhs: None, opr: Ok(name), rhs: Some(args) }), span } = lhs {
|
||||
let args = collect_arguments_inclusive(mem::take(args));
|
||||
let name = mem::take(name);
|
||||
let mut result = Tree::operator_function(name, args, mem::take(opr), mem::take(rhs));
|
||||
result.span.left_offset += mem::take(&mut span.left_offset);
|
||||
result.span.left_offset += left_offset;
|
||||
return result;
|
||||
}
|
||||
let (leftmost, args) = collect_arguments(lhs.clone());
|
||||
if let Some(rhs) = rhs {
|
||||
if let Variant::Ident(ident) = &*leftmost.variant && ident.token.variant.is_type {
|
||||
@ -386,6 +367,21 @@ pub fn parse_argument_application<'s>(
|
||||
close,
|
||||
})
|
||||
}
|
||||
box Variant::DefaultApp(DefaultApp { func, default: default_ }) => {
|
||||
let pattern = Tree::ident(default_.clone());
|
||||
func.span.left_offset += mem::take(&mut expression.span.left_offset);
|
||||
*expression = func.clone();
|
||||
Some(ArgumentDefinition {
|
||||
open: default(),
|
||||
open2: default(),
|
||||
suspension: default(),
|
||||
pattern,
|
||||
type_: default(),
|
||||
close2: default(),
|
||||
default: default(),
|
||||
close: default(),
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@ -433,6 +429,10 @@ pub fn parse_argument_definition(mut pattern: syntax::Tree) -> syntax::tree::Arg
|
||||
pattern = expression;
|
||||
}
|
||||
let mut suspension = default();
|
||||
if let box Variant::TemplateFunction(TemplateFunction { mut ast, .. }) = pattern.variant {
|
||||
ast.span.left_offset += pattern.span.left_offset;
|
||||
pattern = ast;
|
||||
}
|
||||
if let Variant::UnaryOprApp(UnaryOprApp { opr, rhs: Some(rhs) }) = &*pattern.variant && opr.properties.is_suspension() {
|
||||
let mut opr = opr.clone();
|
||||
opr.left_offset += pattern.span.left_offset;
|
||||
|
@ -36,6 +36,7 @@ fn statement() -> resolver::SegmentMap<'static> {
|
||||
register_import_macros(&mut macro_map);
|
||||
register_export_macros(&mut macro_map);
|
||||
macro_map.register(type_def());
|
||||
macro_map.register(foreign());
|
||||
macro_map
|
||||
}
|
||||
|
||||
@ -86,7 +87,11 @@ fn import_body(segments: NonEmptyVec<MatchedSegment>) -> syntax::Tree {
|
||||
&mut from
|
||||
}
|
||||
"import" => {
|
||||
body = sequence_tree(&mut parser, tokens, expect_qualified);
|
||||
let expect = match from {
|
||||
Some(_) => expect_ident,
|
||||
None => expect_qualified,
|
||||
};
|
||||
body = sequence_tree(&mut parser, tokens, expect);
|
||||
&mut import
|
||||
}
|
||||
"all" => {
|
||||
@ -148,7 +153,11 @@ fn export_body(segments: NonEmptyVec<MatchedSegment>) -> syntax::Tree {
|
||||
&mut from
|
||||
}
|
||||
"export" => {
|
||||
body = sequence_tree(&mut parser, tokens, expect_qualified);
|
||||
let expect = match from {
|
||||
Some(_) => expect_ident,
|
||||
None => expect_qualified,
|
||||
};
|
||||
body = sequence_tree(&mut parser, tokens, expect);
|
||||
&mut export
|
||||
}
|
||||
"all" => {
|
||||
@ -246,7 +255,8 @@ fn type_def_body(matched_segments: NonEmptyVec<MatchedSegment>) -> syntax::Tree
|
||||
variant: syntax::token::Variant::Ident(ident),
|
||||
})) => syntax::Token(left_offset, code, ident),
|
||||
_ => {
|
||||
let placeholder = Tree::ident(syntax::token::ident("", "", false, 0, false, false));
|
||||
let placeholder =
|
||||
Tree::ident(syntax::token::ident("", "", false, 0, false, false, false));
|
||||
return placeholder.with_error("Expected identifier after `type` keyword.");
|
||||
}
|
||||
};
|
||||
@ -255,6 +265,27 @@ fn type_def_body(matched_segments: NonEmptyVec<MatchedSegment>) -> syntax::Tree
|
||||
.map(crate::collect_arguments_inclusive)
|
||||
.unwrap_or_default();
|
||||
let mut builder = TypeDefBodyBuilder::default();
|
||||
let mut beginning_of_line = true;
|
||||
for item in &mut block {
|
||||
match item {
|
||||
syntax::Item::Token(syntax::Token {
|
||||
variant: syntax::token::Variant::Newline(_),
|
||||
..
|
||||
}) => {
|
||||
beginning_of_line = true;
|
||||
continue;
|
||||
}
|
||||
syntax::Item::Token(syntax::Token { variant, .. })
|
||||
if beginning_of_line && matches!(variant, syntax::token::Variant::Operator(_)) =>
|
||||
{
|
||||
let opr_ident =
|
||||
syntax::token::variant::Ident { is_operator_lexically: true, ..default() };
|
||||
*variant = syntax::token::Variant::Ident(opr_ident);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
beginning_of_line = false;
|
||||
}
|
||||
for block::Line { newline, expression } in block::lines(block) {
|
||||
builder.line(newline, expression);
|
||||
}
|
||||
@ -630,27 +661,16 @@ fn sequence_tree<'s>(
|
||||
) -> Option<syntax::Tree<'s>> {
|
||||
use syntax::tree::*;
|
||||
let (first, rest) = sequence(parser, tokens);
|
||||
let first = first.map(&mut f);
|
||||
let mut rest = rest.into_iter().rev();
|
||||
let mut invalid = false;
|
||||
if let Some(OperatorDelimitedTree { operator, body }) = rest.next() {
|
||||
let mut tree = body.map(f);
|
||||
invalid = invalid || tree.is_none();
|
||||
let mut prev_op = operator;
|
||||
for OperatorDelimitedTree { operator, body } in rest {
|
||||
invalid = invalid || body.is_none();
|
||||
tree = Tree::opr_app(body, Ok(prev_op), tree).into();
|
||||
prev_op = operator;
|
||||
}
|
||||
invalid = invalid || first.is_none();
|
||||
let mut tree = Tree::opr_app(first, Ok(prev_op), tree);
|
||||
if invalid {
|
||||
tree = tree.with_error("Malformed comma-delimited sequence.");
|
||||
}
|
||||
tree.into()
|
||||
} else {
|
||||
first
|
||||
let mut invalid = first.is_none();
|
||||
let mut tree = first.map(&mut f);
|
||||
for OperatorDelimitedTree { operator, body } in rest {
|
||||
invalid = invalid || body.is_none();
|
||||
tree = Tree::opr_app(tree, Ok(operator), body).into();
|
||||
}
|
||||
if invalid {
|
||||
tree = tree.map(|tree| tree.with_error("Malformed comma-delimited sequence."));
|
||||
}
|
||||
tree
|
||||
}
|
||||
|
||||
fn splice<'s>() -> Definition<'s> {
|
||||
@ -668,6 +688,77 @@ fn splice_body(segments: NonEmptyVec<MatchedSegment>) -> syntax::Tree {
|
||||
syntax::Tree::text_literal(default(), default(), vec![splice], default(), default())
|
||||
}
|
||||
|
||||
fn foreign<'s>() -> Definition<'s> {
|
||||
crate::macro_definition! {("foreign", everything()) foreign_body}
|
||||
}
|
||||
|
||||
fn foreign_body(segments: NonEmptyVec<MatchedSegment>) -> syntax::Tree {
|
||||
let segment = segments.pop().0;
|
||||
let keyword = into_ident(segment.header);
|
||||
let tokens = segment.result.tokens().into_iter();
|
||||
match try_foreign_body(keyword.clone(), tokens.clone()) {
|
||||
Ok(foreign) => foreign,
|
||||
Err(error) => (match operator::resolve_operator_precedence_if_non_empty(tokens) {
|
||||
Some(rhs) => syntax::Tree::app(keyword.into(), rhs),
|
||||
None => keyword.into(),
|
||||
})
|
||||
.with_error(error),
|
||||
}
|
||||
}
|
||||
|
||||
fn try_foreign_body<'s>(
|
||||
keyword: syntax::token::Ident<'s>,
|
||||
tokens: impl IntoIterator<Item = syntax::Item<'s>>,
|
||||
) -> Result<syntax::Tree, &'static str> {
|
||||
let mut tokens = tokens.into_iter();
|
||||
let language = tokens
|
||||
.next()
|
||||
.and_then(try_into_token)
|
||||
.and_then(try_token_into_ident)
|
||||
.ok_or("Expected an identifier specifying foreign method's language.")?;
|
||||
let expected_name = "Expected an identifier specifying foreign function's name.";
|
||||
let function =
|
||||
operator::resolve_operator_precedence_if_non_empty(tokens).ok_or(expected_name)?;
|
||||
let expected_function = "Expected a function definition after foreign declaration.";
|
||||
let box syntax::tree::Variant::OprApp(
|
||||
syntax::tree::OprApp { lhs: Some(lhs), opr: Ok(equals), rhs: Some(body) }) = function.variant else {
|
||||
return Err(expected_function)
|
||||
};
|
||||
if !equals.properties.is_assignment() {
|
||||
return Err(expected_function);
|
||||
};
|
||||
let (name, args) = crate::collect_arguments(lhs);
|
||||
let mut name = try_tree_into_ident(name).ok_or(expected_name)?;
|
||||
name.left_offset += function.span.left_offset;
|
||||
Ok(syntax::Tree::foreign_function(keyword, language, name, args, equals, body))
|
||||
}
|
||||
|
||||
// === Token conversions ===
|
||||
|
||||
fn try_into_token(item: syntax::Item) -> Option<syntax::Token> {
|
||||
match item {
|
||||
syntax::Item::Token(token) => Some(token),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn try_token_into_ident(token: syntax::Token) -> Option<syntax::token::Ident> {
|
||||
match token.variant {
|
||||
syntax::token::Variant::Ident(ident) => {
|
||||
let syntax::token::Token { left_offset, code, .. } = token;
|
||||
Some(syntax::Token(left_offset, code, ident))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn try_tree_into_ident(tree: syntax::Tree) -> Option<syntax::token::Ident> {
|
||||
match tree.variant {
|
||||
box syntax::tree::Variant::Ident(syntax::tree::Ident { token }) => Some(token),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn into_open_symbol(token: syntax::token::Token) -> syntax::token::OpenSymbol {
|
||||
let syntax::token::Token { left_offset, code, .. } = token;
|
||||
syntax::token::open_symbol(left_offset, code)
|
||||
@ -680,9 +771,12 @@ fn into_close_symbol(token: syntax::token::Token) -> syntax::token::CloseSymbol
|
||||
|
||||
fn into_ident(token: syntax::token::Token) -> syntax::token::Ident {
|
||||
let syntax::token::Token { left_offset, code, .. } = token;
|
||||
syntax::token::ident(left_offset, code, false, 0, false, false)
|
||||
syntax::token::ident(left_offset, code, false, 0, false, false, false)
|
||||
}
|
||||
|
||||
|
||||
// === Validators ===
|
||||
|
||||
fn expect_ident(tree: syntax::Tree) -> syntax::Tree {
|
||||
if matches!(&*tree.variant, syntax::tree::Variant::Ident(_)) {
|
||||
tree
|
||||
|
@ -161,6 +161,22 @@ impl<'s> ExpressionBuilder<'s> {
|
||||
/// Extend the expression with an operand.
|
||||
pub fn operand(&mut self, operand: Operand<syntax::Tree<'s>>) {
|
||||
if self.prev_type == Some(ItemType::Ast) {
|
||||
if let Some(Operand { value: syntax::Tree { variant: box
|
||||
syntax::tree::Variant::TextLiteral(ref mut lhs), .. }, .. }) = self.output.last_mut()
|
||||
&& !lhs.closed
|
||||
&& let box syntax::tree::Variant::TextLiteral(mut rhs) = operand.value.variant {
|
||||
syntax::tree::join_text_literals(lhs, &mut rhs, operand.value.span);
|
||||
if let syntax::tree::TextLiteral { open: Some(open), newline: None, elements, closed: true, close: None } = lhs
|
||||
&& open.code.starts_with('#') {
|
||||
let elements = mem::take(elements);
|
||||
let mut open = open.clone();
|
||||
let lhs_tree = self.output.pop().unwrap().value;
|
||||
open.left_offset += lhs_tree.span.left_offset;
|
||||
let doc = syntax::tree::DocComment { open, elements, newlines: default() };
|
||||
self.output.push(syntax::Tree::documented(doc, default()).into());
|
||||
}
|
||||
return;
|
||||
}
|
||||
self.application();
|
||||
}
|
||||
self.output.push(operand);
|
||||
@ -225,16 +241,16 @@ impl<'s> ExpressionBuilder<'s> {
|
||||
if self.prev_type == Some(ItemType::Opr)
|
||||
&& let Some(prev_opr) = self.operator_stack.last_mut()
|
||||
&& let Arity::Binary { tokens, .. } = &mut prev_opr.opr {
|
||||
if tokens.len() == 1 && opr.properties.is_type_annotation() {
|
||||
let prev = match self.operator_stack.pop().unwrap().opr {
|
||||
Arity::Binary { tokens, .. } => tokens.into_iter().next().unwrap(),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
self.output.push(Operand::from(syntax::Tree::opr_app(None, Ok(prev), None)));
|
||||
} else {
|
||||
tokens.push(opr);
|
||||
if tokens.len() == 1 && tokens[0].properties.is_dot() {
|
||||
let Token { left_offset, code, .. } = opr;
|
||||
let is_operator = true;
|
||||
let opr_ident = token::ident(left_offset, code, default(), default(), default(), is_operator, default());
|
||||
self.output.push(Operand::from(syntax::Tree::ident(opr_ident)));
|
||||
self.prev_type = Some(ItemType::Ast);
|
||||
return;
|
||||
}
|
||||
tokens.push(opr);
|
||||
return;
|
||||
}
|
||||
self.push_operator(prec, assoc, Arity::binary(opr));
|
||||
}
|
||||
|
@ -263,14 +263,14 @@ macro_rules! with_token_definition { ($f:ident ($($args:tt)*)) => { $f! { $($arg
|
||||
},
|
||||
AutoScope,
|
||||
Ident {
|
||||
pub is_free: bool,
|
||||
pub lift_level: usize,
|
||||
pub is_free: bool,
|
||||
pub lift_level: usize,
|
||||
#[reflect(rename = "is_type_or_constructor")]
|
||||
pub is_type: bool,
|
||||
pub is_operator_lexically: bool,
|
||||
#[serde(skip)]
|
||||
#[reflect(skip)]
|
||||
pub is_type: bool,
|
||||
#[serde(skip)]
|
||||
#[reflect(skip)]
|
||||
pub is_default: bool,
|
||||
pub is_default: bool,
|
||||
},
|
||||
Operator {
|
||||
#[serde(skip)]
|
||||
@ -527,6 +527,11 @@ impl Precedence {
|
||||
pub fn application() -> Self {
|
||||
Precedence { value: 80 }
|
||||
}
|
||||
|
||||
/// Return the precedence of unary minus.
|
||||
pub fn unary_minus() -> Self {
|
||||
Precedence { value: 79 }
|
||||
}
|
||||
}
|
||||
|
||||
/// Associativity (left or right).
|
||||
|
@ -231,17 +231,20 @@ macro_rules! with_ast_definition { ($f:ident ($($args:tt)*)) => { $f! { $($args)
|
||||
/// It is an error for this to be empty.
|
||||
pub body: Option<Tree<'s>>,
|
||||
},
|
||||
/// An operator definition, like `== self rhs = True`.
|
||||
OperatorFunction {
|
||||
/// The operator being defined.
|
||||
pub name: token::Operator<'s>,
|
||||
/// A foreign function definition.
|
||||
ForeignFunction {
|
||||
/// The `foreign` keyword.
|
||||
pub foreign: token::Ident<'s>,
|
||||
/// The function's language.
|
||||
pub language: token::Ident<'s>,
|
||||
/// The name to which the function should be bound.
|
||||
pub name: token::Ident<'s>,
|
||||
/// The argument patterns.
|
||||
pub args: Vec<ArgumentDefinition<'s>>,
|
||||
pub args: Vec<ArgumentDefinition<'s>>,
|
||||
/// The `=` token.
|
||||
pub equals: token::Operator<'s>,
|
||||
/// The body, which will typically be an inline expression or a `BodyBlock` expression.
|
||||
/// It is an error for this to be empty.
|
||||
pub body: Option<Tree<'s>>,
|
||||
pub equals: token::Operator<'s>,
|
||||
/// The body, which is source code for the specified language.
|
||||
pub body: Tree<'s>,
|
||||
},
|
||||
/// An import statement.
|
||||
Import {
|
||||
@ -278,16 +281,6 @@ macro_rules! with_ast_definition { ($f:ident ($($args:tt)*)) => { $f! { $($args)
|
||||
#[reflect(rename = "type")]
|
||||
pub type_: Tree<'s>,
|
||||
},
|
||||
/// Statement declaring the type of an operator.
|
||||
OperatorTypeSignature {
|
||||
/// Operator whose type is being declared.
|
||||
pub operator: token::Operator<'s>,
|
||||
/// The `:` token.
|
||||
pub colon: token::Operator<'s>,
|
||||
/// The method's type.
|
||||
#[reflect(rename = "type")]
|
||||
pub type_: Tree<'s>,
|
||||
},
|
||||
/// An expression with explicit type information attached.
|
||||
TypeAnnotated {
|
||||
/// The expression whose type is being annotated.
|
||||
@ -763,18 +756,6 @@ impl<'s> span::Builder<'s> for OperatorDelimitedTree<'s> {
|
||||
/// application has special semantics.
|
||||
pub fn apply<'s>(mut func: Tree<'s>, mut arg: Tree<'s>) -> Tree<'s> {
|
||||
match (&mut *func.variant, &mut *arg.variant) {
|
||||
(Variant::TextLiteral(lhs), Variant::TextLiteral(rhs)) if !lhs.closed => {
|
||||
join_text_literals(lhs, rhs, mem::take(&mut arg.span));
|
||||
if let TextLiteral { open: Some(open), newline: None, elements, closed: true, close: None } = lhs
|
||||
&& open.code.starts_with('#') {
|
||||
let mut open = open.clone();
|
||||
open.left_offset += func.span.left_offset;
|
||||
let elements = mem::take(elements);
|
||||
let doc = DocComment { open, elements, newlines: default() };
|
||||
return Tree::documented(doc, default());
|
||||
}
|
||||
func
|
||||
}
|
||||
(Variant::Number(func_ @ Number { base: _, integer: None, fractional_digits: None }),
|
||||
Variant::Number(Number { base: None, integer, fractional_digits })) => {
|
||||
func_.integer = mem::take(integer);
|
||||
@ -838,7 +819,8 @@ pub fn apply<'s>(mut func: Tree<'s>, mut arg: Tree<'s>) -> Tree<'s> {
|
||||
}
|
||||
}
|
||||
|
||||
fn join_text_literals<'s>(
|
||||
/// Join two text literals, merging contents as appropriate to each field.
|
||||
pub fn join_text_literals<'s>(
|
||||
lhs: &mut TextLiteral<'s>,
|
||||
rhs: &mut TextLiteral<'s>,
|
||||
rhs_span: Span<'s>,
|
||||
@ -934,7 +916,7 @@ pub fn apply_unary_operator<'s>(opr: token::Operator<'s>, rhs: Option<Tree<'s>>)
|
||||
impl<'s> From<Token<'s>> for Tree<'s> {
|
||||
fn from(token: Token<'s>) -> Self {
|
||||
match token.variant {
|
||||
token::Variant::Ident(ident) => Tree::ident(token.with_variant(ident)),
|
||||
token::Variant::Ident(ident) => token.with_variant(ident).into(),
|
||||
token::Variant::Digits(number) =>
|
||||
Tree::number(None, Some(token.with_variant(number)), None),
|
||||
token::Variant::NumberBase(base) =>
|
||||
@ -973,7 +955,7 @@ impl<'s> From<Token<'s>> for Tree<'s> {
|
||||
// Map an error case in the lexer to an error in the AST.
|
||||
| token::Variant::Invalid(_) => {
|
||||
let message = format!("Unexpected token: {token:?}");
|
||||
let ident = token::variant::Ident(false, 0, false, false);
|
||||
let ident = token::variant::Ident(false, 0, false, false, false);
|
||||
let value = Tree::ident(token.with_variant(ident));
|
||||
Tree::with_error(value, message)
|
||||
}
|
||||
@ -981,6 +963,12 @@ impl<'s> From<Token<'s>> for Tree<'s> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'s> From<token::Ident<'s>> for Tree<'s> {
|
||||
fn from(token: token::Ident<'s>) -> Self {
|
||||
Tree::ident(token)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
// =============================
|
||||
@ -1013,9 +1001,8 @@ pub fn recurse_left_mut_while<'s>(
|
||||
| Variant::Lambda(_)
|
||||
| Variant::Array(_)
|
||||
| Variant::Annotated(_)
|
||||
| Variant::OperatorFunction(_)
|
||||
| Variant::OperatorTypeSignature(_)
|
||||
| Variant::Documented(_)
|
||||
| Variant::ForeignFunction(_)
|
||||
| Variant::Tuple(_) => break,
|
||||
// Optional LHS.
|
||||
Variant::ArgumentBlockApplication(ArgumentBlockApplication { lhs, .. })
|
||||
|
@ -31,10 +31,10 @@ main =
|
||||
Bench.measure ((random_vec.drop (First 20)).sum) "Drop First 20 and Sum" iter_size num_iterations
|
||||
Bench.measure ((random_vec.drop (Last 20)).sum) "Drop Last 20 and Sum" iter_size num_iterations
|
||||
Bench.measure (random_vec.filter (x -> x % 3 == 1)) "Filter" iter_size num_iterations
|
||||
Bench.measure (random_vec.filter_with_index (i->x -> (i+x) % 3 == 1)) "Filter With Index" iter_size num_iterations
|
||||
Bench.measure (random_vec.filter_with_index (i-> x-> (i+x) % 3 == 1)) "Filter With Index" iter_size num_iterations
|
||||
Bench.measure (random_vec . map (x -> x + random_gen.nextLong) . filter (x -> x % 3 == 1)) "Map & Filter" iter_size num_iterations
|
||||
Bench.measure (random_vec.partition (x -> x % 3 == 1)) "Partition" iter_size num_iterations
|
||||
Bench.measure (random_vec.partition_with_index (i->x -> (i+x) % 3 == 1)) "Partition With Index" iter_size num_iterations
|
||||
Bench.measure (random_vec.partition_with_index (i-> x-> (i+x) % 3 == 1)) "Partition With Index" iter_size num_iterations
|
||||
|
||||
stateful_fun x =
|
||||
s = State.get Number
|
||||
|
@ -7,9 +7,9 @@ main = Test_Suite.run_main <|
|
||||
secret = enso_project.data / 'secret.json'
|
||||
api = Google_Api.initialize secret
|
||||
|
||||
Test.group "Google Spreadsheets"
|
||||
Test.group "Google Spreadsheets" <|
|
||||
|
||||
Test.specify "should allow downloading a spreadsheet"
|
||||
Test.specify "should allow downloading a spreadsheet" <|
|
||||
sheet_id = '1WjVQhYdc04RwdWB22RNLgfQiLeWYhxiij1_xj22RDq0'
|
||||
sheet_range = 'Sheet1!A1:B6'
|
||||
table = api.spreadsheets.get_table sheet_id sheet_range
|
||||
|
@ -74,7 +74,7 @@ spec =
|
||||
res = Text.from t format=(Delimited ";")
|
||||
res.should_equal expected
|
||||
|
||||
Test.specify 'should allow forced quoting of records'
|
||||
Test.specify 'should allow forced quoting of records' <|
|
||||
c1 = ['name', ['Robert");DROP TABLE Students;--', 'This;Name;;Is""Strange', 'Marcin,,']]
|
||||
c2 = ['grade', [10, 20, 'hello;world']]
|
||||
t = Table.new [c1, c2]
|
||||
|
@ -42,7 +42,7 @@ postgres_specific_spec connection db_name pending =
|
||||
new_schema . should_equal "information_schema"
|
||||
|
||||
databases = connection.databases.filter d->((d!=db_name) && (d!='rdsadmin'))
|
||||
pending_database = if databases.length != 0 then Nothing else "Cannot tests changing database unless two databases defined."
|
||||
pending_database = if databases.length != 0 then Nothing else "Cannot test changing database unless two databases defined."
|
||||
Test.specify "should allow changing database" pending=pending_database <|
|
||||
new_connection = connection.set_database databases.first
|
||||
new_database = new_connection.read (SQL_Query.Raw_SQL "SELECT current_database()") . at 0 . to_vector . first
|
||||
|
@ -43,7 +43,7 @@ spec = Test.group "Natural Order" <|
|
||||
["255.255.0.0", "127.0.0.1", "255.255.255.0", "200"].sort by=Natural_Order.compare . should_equal ["127.0.0.1", "200", "255.255.0.0", "255.255.255.0"]
|
||||
["100-200.300", "1.2.3", "4.5.6", "4-5-6"].sort by=Natural_Order.compare . should_equal ["1.2.3", "4-5-6", "4.5.6", "100-200.300"]
|
||||
|
||||
Test.specify "does not treat a floating point in a special way"
|
||||
Test.specify "does not treat a floating point in a special way" <|
|
||||
Natural_Order.compare "0" "0.0" . should_equal Ordering.Less
|
||||
Natural_Order.compare "0" "1.0001" . should_equal Ordering.Less
|
||||
Natural_Order.compare "1.0001" "1.01" . should_equal Ordering.Less
|
||||
|
@ -128,7 +128,7 @@ spec = Test.group "Range" <|
|
||||
Test.specify "should allow conversion to vector" <|
|
||||
1.up_to 6 . to_vector . should_equal [1, 2, 3, 4, 5]
|
||||
|
||||
Test.specify "should allow checking if a value is in the range"
|
||||
Test.specify "should allow checking if a value is in the range" <|
|
||||
0.up_to 10 . contains 5 . should_be_true
|
||||
0.up_to 10 . contains 0 . should_be_true
|
||||
0.up_to 10 . contains 9 . should_be_true
|
||||
|
@ -193,7 +193,7 @@ spec =
|
||||
series_c = [Nothing,0.769797,0.281678,0.462145,0.727132,0.327978,Nothing,0.648639,0.562636,Nothing,0.159836,0.367404,0.877087,0.365483,Nothing,0.931873,0.723546,0.558085,0.163396,0.940997,0.399685,0.617509]
|
||||
series = [series_a, series_b, series_c]
|
||||
|
||||
Test.specify "can compute Covariance, Correlation and R Squared between a pair of series"
|
||||
Test.specify "can compute Covariance, Correlation and R Squared between a pair of series" <|
|
||||
series_a.compute (Covariance series_b) . should_equal -0.0053554 epsilon=double_error
|
||||
series_a.compute (Pearson series_b) . should_equal -0.08263943 epsilon=double_error
|
||||
series_a.compute (Spearman series_b) . should_equal -0.09313725 epsilon=double_error
|
||||
|
@ -266,7 +266,7 @@ spec =
|
||||
"ABCDEFGH".drop (Every 3 first=1) . should_equal "ACDFG"
|
||||
"ABCDEFGHI".drop (Every 3 first=1) . should_equal "ACDFGI"
|
||||
|
||||
Test.specify "should allow taking or dropping a random sample of a substring"
|
||||
Test.specify "should allow taking or dropping a random sample of a substring" <|
|
||||
"AAAAA".take (Sample 3) . should_equal "AAA"
|
||||
"AAAAA".drop (Sample 3) . should_equal "AA"
|
||||
|
||||
@ -802,8 +802,8 @@ spec =
|
||||
long_text = """
|
||||
Hello from a long text. EOL
|
||||
SOL Hmm...
|
||||
long_text . contains "EOL.SOL" ((Regex_Matcher.Regex_Matcher_Data case_sensitivity=Case_Sensitivity.Sensitive) dot_matches_newline=True) . should_be_true
|
||||
long_text . contains "EOL.SOL" ((Regex_Matcher.Regex_Matcher_Data case_sensitivity=Case_Sensitivity.Sensitive) dot_matches_newline=False) . should_be_false
|
||||
long_text . contains "EOL.SOL" (Regex_Matcher.Regex_Matcher_Data case_sensitivity=Case_Sensitivity.Sensitive dot_matches_newline=True) . should_be_true
|
||||
long_text . contains "EOL.SOL" (Regex_Matcher.Regex_Matcher_Data case_sensitivity=Case_Sensitivity.Sensitive dot_matches_newline=False) . should_be_false
|
||||
|
||||
Test.specify "should check for starts_with using Unicode normalization" <|
|
||||
"Hello".starts_with "He" . should_be_true
|
||||
@ -889,8 +889,8 @@ spec =
|
||||
long_text = """
|
||||
EOL
|
||||
SOL Hmm...
|
||||
long_text . starts_with "EOL.SOL" ((Regex_Matcher.Regex_Matcher_Data case_sensitivity=Case_Sensitivity.Sensitive) dot_matches_newline=True) . should_be_true
|
||||
long_text . starts_with "EOL.SOL" ((Regex_Matcher.Regex_Matcher_Data case_sensitivity=Case_Sensitivity.Sensitive) dot_matches_newline=False) . should_be_false
|
||||
long_text . starts_with "EOL.SOL" (Regex_Matcher.Regex_Matcher_Data case_sensitivity=Case_Sensitivity.Sensitive dot_matches_newline=True) . should_be_true
|
||||
long_text . starts_with "EOL.SOL" (Regex_Matcher.Regex_Matcher_Data case_sensitivity=Case_Sensitivity.Sensitive dot_matches_newline=False) . should_be_false
|
||||
|
||||
"aaazzz" . starts_with "a|b" (Regex_Matcher.Regex_Matcher_Data case_sensitivity=Case_Sensitivity.Sensitive) . should_be_true
|
||||
"bbbzzz" . starts_with "a|b" (Regex_Matcher.Regex_Matcher_Data case_sensitivity=Case_Sensitivity.Sensitive) . should_be_true
|
||||
@ -1233,7 +1233,7 @@ spec =
|
||||
abc.location_of_all "" matcher=regex . should_equal [Span_Data (Range_Data 0 0) abc, Span_Data (Range_Data 0 0) abc, Span_Data (Range_Data 1 1) abc, Span_Data (Range_Data 2 2) abc, Span_Data (Range_Data 3 3) abc]
|
||||
abc.location_of "" matcher=regex mode=Matching_Mode.Last . should_equal (Span_Data (Range_Data 3 3) abc)
|
||||
|
||||
Test.specify "should handle overlapping matches as shown in the examples"
|
||||
Test.specify "should handle overlapping matches as shown in the examples" <|
|
||||
"aaa".location_of "aa" mode=Matching_Mode.Last matcher=Text_Matcher.Case_Sensitive . should_equal (Span_Data (Range_Data 1 3) "aaa")
|
||||
"aaa".location_of "aa" mode=Matching_Mode.Last matcher=Regex_Matcher.Regex_Matcher_Data . should_equal (Span_Data (Range_Data 0 2) "aaa")
|
||||
|
||||
|
@ -235,9 +235,10 @@ spec =
|
||||
Panic.recover Text throw_a_bar_panicking . stack_trace . second . name . should_equal "Error_Spec.throw_a_bar_panicking"
|
||||
|
||||
Test.specify "Unsupported_Argument_Types message should be readable" <|
|
||||
Panic.catch Unsupported_Argument_Types_Data (Long.decode 42) handler=err ->
|
||||
check err =
|
||||
(err.payload.message) . should_equal "Cannot convert '42'(language: Java, type: java.lang.Long) to Java type 'java.lang.String': Invalid or lossy primitive coercion."
|
||||
(err.payload.message) . should_equal (err.payload.to_display_text)
|
||||
|
||||
Panic.catch Unsupported_Argument_Types_Data (Long.decode 42) handler=check
|
||||
|
||||
main = Test_Suite.run_main spec
|
||||
|
@ -77,7 +77,7 @@ spec = Test.group "Dataflow Warnings" <|
|
||||
mtp.should_equal (My_Type.Value 1 2 3)
|
||||
Warning.get_all mtp . map .value . should_equal [My_Warning.Value "warn!", My_Warning.Value "warn!!", My_Warning.Value "warn!!!"]
|
||||
|
||||
Test.specify "should thread warnings through method calls"
|
||||
Test.specify "should thread warnings through method calls" <|
|
||||
mtp = My_Type.Value 1 2 3
|
||||
warned = Warning.attach "omgggg" mtp
|
||||
r = warned.my_method
|
||||
|
@ -30,7 +30,7 @@ spec = Test.group "Serializable Visualization Identifiers" <|
|
||||
v_1.to_json.should_equal (expected "enso_dev.Visualization_Tests" "My Vis")
|
||||
v_2.to_json.should_equal (expected "Standard.Base" "Other Vis")
|
||||
|
||||
Test.specify "specifies default JSON visualization for any type"
|
||||
Test.specify "specifies default JSON visualization for any type" <|
|
||||
My_Type.Value 30 . default_visualization . should_equal Visualization.Id.json
|
||||
[1,2,3].default_visualization.should_equal Visualization.Id.json
|
||||
"foobar".default_visualization.should_equal Visualization.Id.json
|
||||
|
Loading…
Reference in New Issue
Block a user