mirror of
https://github.com/AleoHQ/leo.git
synced 2024-12-29 20:44:52 +03:00
merge upstream changes
This commit is contained in:
commit
8cb4b19e66
67
.github/workflows/acl2.yml
vendored
67
.github/workflows/acl2.yml
vendored
@ -21,7 +21,18 @@ jobs:
|
|||||||
|
|
||||||
- name: Generate asts
|
- name: Generate asts
|
||||||
run: |
|
run: |
|
||||||
cargo -q run -p leo-test-framework --bin tgc
|
# This currently has to be on testnet3 to work:
|
||||||
|
cd compiler/parser
|
||||||
|
cargo -q install --path . --example parser
|
||||||
|
# To ensure full file tests,
|
||||||
|
# just do compiler tests and remove expectation: Fail tests
|
||||||
|
cd ../../tests/compiler
|
||||||
|
find . -type f -exec grep -q 'expectation *: *Fail' {} \; -delete
|
||||||
|
find . -name '*.leo' -execdir parser {} \; > /dev/null
|
||||||
|
# Show how many there are:
|
||||||
|
echo "Counts (.leo / .json):"
|
||||||
|
find . -name '*.leo' -print | wc -l
|
||||||
|
find . -name '*.json' -print | wc -l
|
||||||
|
|
||||||
# Pull the latest release from the leo-acl2-bin repo, and put it into the
|
# Pull the latest release from the leo-acl2-bin repo, and put it into the
|
||||||
# repo/acl2 directory. After it's done, unpack the tgz file locally.
|
# repo/acl2 directory. After it's done, unpack the tgz file locally.
|
||||||
@ -29,7 +40,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
mkdir acl2 && cd acl2;
|
mkdir acl2 && cd acl2;
|
||||||
wget $( curl -s https://api.github.com/repos/AleoHQ/leo-acl2-bin/releases/latest \
|
wget $( curl -s https://api.github.com/repos/AleoHQ/leo-acl2-bin/releases/latest \
|
||||||
| jq -r '.assets[0].browser_download_url' )
|
| jq -r '.assets[].browser_download_url|scan("^.*leo-acl2--v.*\\.tgz$")' )
|
||||||
|
|
||||||
echo "Unpacking $(ls):"
|
echo "Unpacking $(ls):"
|
||||||
tar -xvzf $(ls)
|
tar -xvzf $(ls)
|
||||||
@ -37,28 +48,31 @@ jobs:
|
|||||||
# Run theorem generation and checking using the prepared ASTs and the pulled and unzipped leo-acl2 tarball.
|
# Run theorem generation and checking using the prepared ASTs and the pulled and unzipped leo-acl2 tarball.
|
||||||
- name: Run tgc over ASTs
|
- name: Run tgc over ASTs
|
||||||
run: |
|
run: |
|
||||||
canonicalization_errors=();
|
parsing_errors=();
|
||||||
type_inference_errors=();
|
num_cases=0;
|
||||||
num_dirs=0;
|
# Note that PWD gets reset at the beginning of each the step
|
||||||
for dir in `ls tmp/tgc`;
|
# to /home/runner/work/leo/leo
|
||||||
|
acl2dir="${PWD}"/acl2
|
||||||
|
cd tests/compiler;
|
||||||
|
for tc in `find ${PWD} -name '*.leo' -print`;
|
||||||
do
|
do
|
||||||
cd tmp/tgc/$dir; # enter the directory
|
cd "${tc%/*}"; # enter the directory
|
||||||
num_dirs=$((num_dirs + 1));
|
leofile="${tc##*/}";
|
||||||
./../../../acl2/tgc canonicalization initial_ast.json canonicalization_ast.json canonicalization-theorem.lisp > canonicalization_result.out || canonicalization_errors+=("$dir");
|
jsonfile="${leofile%.leo}.json";
|
||||||
./../../../acl2/tgc type-inference canonicalization_ast.json type_inferenced_ast.json type-inference-theorem.lisp > type_inference_result.out || type_inference_errors+=("$dir");
|
num_cases=$((num_cases + 1));
|
||||||
cd ../../..
|
"$acl2dir"/tgc parsing "$leofile" "$jsonfile" parsing-theorem.lisp > parsing_result.out || parsing_errors+=("$tc");
|
||||||
done;
|
done;
|
||||||
|
|
||||||
echo "----------------"
|
echo "----------------"
|
||||||
echo "Ran tgc in ${num_dirs} directories."
|
echo "Ran tgc on ${num_cases} programs."
|
||||||
echo "----------------"
|
echo "----------------"
|
||||||
if [ ${#canonicalization_errors[@]} -eq 0 ]; then
|
if [ ${#parsing_errors[@]} -eq 0 ]; then
|
||||||
echo "Canonicalization - Success!"
|
echo "Parsing - Total Success!"
|
||||||
else
|
else
|
||||||
echo "Canonicalization Failures (total: ${#canonicalization_errors[@]}):"
|
echo "Parsing Failures (total: ${#parsing_errors[@]}):"
|
||||||
for dir in ${canonicalization_errors[@]};
|
for tc in ${parsing_errors[@]};
|
||||||
do
|
do
|
||||||
echo $dir
|
echo $tc
|
||||||
done;
|
done;
|
||||||
|
|
||||||
#echo "Attaching logs:"
|
#echo "Attaching logs:"
|
||||||
@ -69,24 +83,7 @@ jobs:
|
|||||||
#done;
|
#done;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "----------------"
|
if [[ ${#parsing_errors[@]} -ne 0 ]]; then
|
||||||
if [ ${#type_inference_errors[@]} -eq 0 ]; then
|
|
||||||
echo "Type Inference - Success!"
|
|
||||||
else
|
|
||||||
echo "Type Inference Failures (total: ${#type_inference_errors[@]}):"
|
|
||||||
for dir in ${type_inference_errors[@]};
|
|
||||||
do
|
|
||||||
echo $dir
|
|
||||||
done;
|
|
||||||
|
|
||||||
#echo "Attaching logs:"
|
|
||||||
#for dir in ${type_inference_errors[@]};
|
|
||||||
#do
|
|
||||||
# cat tmp/tgc/$dir/type_inference_result.out
|
|
||||||
#done;
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ${#canonicalization_errors[@]} -ne 0 || ${#type_inference_errors[@]} -ne 0 ]]; then
|
|
||||||
echo "----------------"
|
echo "----------------"
|
||||||
echo "Exiting with status 1 due to at least one tgc error."
|
echo "Exiting with status 1 due to at least one tgc error."
|
||||||
exit 1
|
exit 1
|
||||||
|
8
Cargo.lock
generated
8
Cargo.lock
generated
@ -2006,9 +2006,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "regex"
|
name = "regex"
|
||||||
version = "1.5.5"
|
version = "1.5.6"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286"
|
checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aho-corasick",
|
"aho-corasick",
|
||||||
"memchr",
|
"memchr",
|
||||||
@ -2023,9 +2023,9 @@ checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "regex-syntax"
|
name = "regex-syntax"
|
||||||
version = "0.6.25"
|
version = "0.6.26"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
|
checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "remove_dir_all"
|
name = "remove_dir_all"
|
||||||
|
@ -21,14 +21,14 @@ use crate::{Char, CharValue};
|
|||||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
pub enum ValueExpression {
|
pub enum ValueExpression {
|
||||||
// todo: deserialize values here
|
// todo: deserialize values here
|
||||||
/// An address literal, e.g., `aleo1..`.
|
/// An address literal, e.g., `aleo1qnr4dkkvkgfqph0vzc3y6z2eu975wnpz2925ntjccd5cfqxtyu8s7pyjh9`.
|
||||||
Address(String, #[serde(with = "leo_span::span_json")] Span),
|
Address(String, #[serde(with = "leo_span::span_json")] Span),
|
||||||
/// A boolean literal, either `true` or `false`.
|
/// A boolean literal, either `true` or `false`.
|
||||||
Boolean(String, #[serde(with = "leo_span::span_json")] Span),
|
Boolean(String, #[serde(with = "leo_span::span_json")] Span),
|
||||||
/// A char literal, e.g., `'a'`, representing a single unicode code point.
|
/// A char literal, e.g., `'a'`, representing a single unicode code point.
|
||||||
Char(CharValue),
|
Char(CharValue),
|
||||||
/// A field literal, e.g., `42field`.
|
/// A field literal, e.g., `42field`.
|
||||||
/// That is, a signed number followed by the keyword `field`.
|
/// A signed number followed by the keyword `field`.
|
||||||
Field(String, #[serde(with = "leo_span::span_json")] Span),
|
Field(String, #[serde(with = "leo_span::span_json")] Span),
|
||||||
/// A group literal, either product or affine.
|
/// A group literal, either product or affine.
|
||||||
/// For example, `42group` or `(12, 52)group`.
|
/// For example, `42group` or `(12, 52)group`.
|
||||||
|
@ -53,9 +53,9 @@ impl Type {
|
|||||||
match (self, other) {
|
match (self, other) {
|
||||||
(Type::Address, Type::Address)
|
(Type::Address, Type::Address)
|
||||||
| (Type::Boolean, Type::Boolean)
|
| (Type::Boolean, Type::Boolean)
|
||||||
|
| (Type::Char, Type::Char)
|
||||||
| (Type::Field, Type::Field)
|
| (Type::Field, Type::Field)
|
||||||
| (Type::Group, Type::Group)
|
| (Type::Group, Type::Group)
|
||||||
| (Type::Char, Type::Char)
|
|
||||||
| (Type::Scalar, Type::Scalar) => true,
|
| (Type::Scalar, Type::Scalar) => true,
|
||||||
(Type::IntegerType(left), Type::IntegerType(right)) => left.eq(right),
|
(Type::IntegerType(left), Type::IntegerType(right)) => left.eq(right),
|
||||||
_ => false,
|
_ => false,
|
||||||
|
@ -64,9 +64,10 @@ const FIELD_SCALAR_INT_TYPES: [Type; 12] = create_type_superset(FIELD_INT_TYPES,
|
|||||||
|
|
||||||
const FIELD_GROUP_INT_TYPES: [Type; 12] = create_type_superset(FIELD_INT_TYPES, [Type::Group]);
|
const FIELD_GROUP_INT_TYPES: [Type; 12] = create_type_superset(FIELD_INT_TYPES, [Type::Group]);
|
||||||
|
|
||||||
const ALL_NUMERICAL_TYPES: [Type; 13] = create_type_superset(FIELD_GROUP_INT_TYPES, [Type::Scalar]);
|
const FIELD_GROUP_SCALAR_INT_TYPES: [Type; 13] = create_type_superset(FIELD_GROUP_INT_TYPES, [Type::Scalar]);
|
||||||
|
|
||||||
impl<'a> TypeChecker<'a> {
|
impl<'a> TypeChecker<'a> {
|
||||||
|
/// Returns a new type checker given a symbol table and error handler.
|
||||||
pub fn new(symbol_table: &'a mut SymbolTable<'a>, handler: &'a Handler) -> Self {
|
pub fn new(symbol_table: &'a mut SymbolTable<'a>, handler: &'a Handler) -> Self {
|
||||||
Self {
|
Self {
|
||||||
symbol_table,
|
symbol_table,
|
||||||
@ -76,6 +77,7 @@ impl<'a> TypeChecker<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Checks wether two given types are the same and if not emits an error.
|
||||||
pub(crate) fn assert_eq_types(&self, t1: Option<Type>, t2: Option<Type>, span: Span) {
|
pub(crate) fn assert_eq_types(&self, t1: Option<Type>, t2: Option<Type>, span: Span) {
|
||||||
match (t1, t2) {
|
match (t1, t2) {
|
||||||
(Some(t1), Some(t2)) if t1 != t2 => self
|
(Some(t1), Some(t2)) if t1 != t2 => self
|
||||||
@ -88,6 +90,7 @@ impl<'a> TypeChecker<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the given type if it equals the expected type or the expected type is none.
|
||||||
pub(crate) fn assert_type(&self, type_: Type, expected: Option<Type>, span: Span) -> Type {
|
pub(crate) fn assert_type(&self, type_: Type, expected: Option<Type>, span: Span) -> Type {
|
||||||
if let Some(expected) = expected {
|
if let Some(expected) = expected {
|
||||||
if type_ != expected {
|
if type_ != expected {
|
||||||
@ -99,14 +102,10 @@ impl<'a> TypeChecker<'a> {
|
|||||||
type_
|
type_
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn assert_one_of_types(&self, type_: Option<Type>, expected: &[Type], span: Span) -> Option<Type> {
|
/// Emits an error to the error handler if the given type is not equal to any of the expected types.
|
||||||
|
pub(crate) fn assert_one_of_types(&self, type_: Option<Type>, expected: &[Type], span: Span) {
|
||||||
if let Some(type_) = type_ {
|
if let Some(type_) = type_ {
|
||||||
for t in expected.iter() {
|
if !expected.iter().any(|t: &Type| t == &type_) {
|
||||||
if &type_ == t {
|
|
||||||
return Some(type_);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.handler.emit_err(
|
self.handler.emit_err(
|
||||||
TypeCheckerError::expected_one_type_of(
|
TypeCheckerError::expected_one_type_of(
|
||||||
expected.iter().map(|t| t.to_string() + ",").collect::<String>(),
|
expected.iter().map(|t| t.to_string() + ",").collect::<String>(),
|
||||||
@ -116,35 +115,26 @@ impl<'a> TypeChecker<'a> {
|
|||||||
.into(),
|
.into(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
type_
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn _assert_arith_type(&self, type_: Option<Type>, span: Span) -> Option<Type> {
|
/// Emits an error to the handler if the given type is not a field or integer.
|
||||||
self.assert_one_of_types(type_, &FIELD_GROUP_INT_TYPES, span)
|
pub(crate) fn assert_field_int_type(&self, type_: Option<Type>, span: Span) {
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn _assert_field_or_int_type(&self, type_: Option<Type>, span: Span) -> Option<Type> {
|
|
||||||
self.assert_one_of_types(type_, &FIELD_INT_TYPES, span)
|
self.assert_one_of_types(type_, &FIELD_INT_TYPES, span)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn _assert_int_type(&self, type_: Option<Type>, span: Span) -> Option<Type> {
|
/// Emits an error to the handler if the given type is not a field, scalar, or integer.
|
||||||
self.assert_one_of_types(type_, &INT_TYPES, span)
|
pub(crate) fn assert_field_scalar_int_type(&self, type_: Option<Type>, span: Span) {
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn assert_field_group_scalar_int_type(&self, type_: Option<Type>, span: Span) -> Option<Type> {
|
|
||||||
self.assert_one_of_types(type_, &ALL_NUMERICAL_TYPES, span)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn assert_field_group_int_type(&self, type_: Option<Type>, span: Span) -> Option<Type> {
|
|
||||||
self.assert_one_of_types(type_, &FIELD_GROUP_INT_TYPES, span)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn assert_field_scalar_int_type(&self, type_: Option<Type>, span: Span) -> Option<Type> {
|
|
||||||
self.assert_one_of_types(type_, &FIELD_SCALAR_INT_TYPES, span)
|
self.assert_one_of_types(type_, &FIELD_SCALAR_INT_TYPES, span)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn assert_field_int_type(&self, type_: Option<Type>, span: Span) -> Option<Type> {
|
/// Emits an error to the handler if the given type is not a field, group, or integer.
|
||||||
self.assert_one_of_types(type_, &FIELD_INT_TYPES, span)
|
pub(crate) fn assert_field_group_int_type(&self, type_: Option<Type>, span: Span) {
|
||||||
|
self.assert_one_of_types(type_, &FIELD_GROUP_INT_TYPES, span)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Emits an error to the handler if the given type is not a field, group, scalar or integer.
|
||||||
|
pub(crate) fn assert_field_group_scalar_int_type(&self, type_: Option<Type>, span: Span) {
|
||||||
|
self.assert_one_of_types(type_, &FIELD_GROUP_SCALAR_INT_TYPES, span)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Binary file not shown.
@ -375,7 +375,7 @@ input-expression = literal
|
|||||||
|
|
||||||
input-item = identifier ":" input-type "=" input-expression ";"
|
input-item = identifier ":" input-type "=" input-expression ";"
|
||||||
|
|
||||||
input-title = "[" identifier "]"
|
input-title = "[" ( %s"public" / %s"private" / %s"constant" / %s"const" ) "]"
|
||||||
|
|
||||||
input-section = input-title *input-item
|
input-section = input-title *input-item
|
||||||
|
|
||||||
|
@ -45,27 +45,32 @@ generated instead. A PR should contain changes to expectations as well as to tes
|
|||||||
|
|
||||||
Here is the list of all possible configuration options for compiler and parser tests.
|
Here is the list of all possible configuration options for compiler and parser tests.
|
||||||
|
|
||||||
#### namespace
|
### namespace
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
- Mandatory: yes
|
- Mandatory: yes
|
||||||
- Namespace: all
|
- Namespace: all
|
||||||
- Values: Compile / Parse
|
- Values: ...
|
||||||
```
|
```
|
||||||
|
|
||||||
Only two values are supported: `Parse` and `Compile`. The former is meant to be a parser test, the latter
|
Several values are supported, but they vary depending on the directory you are in.
|
||||||
is a full compiler test.
|
|
||||||
|
|
||||||
Besides the `Parse` value,
|
Parser Directory namespaces:
|
||||||
there are actually additional possible values for this field:
|
|
||||||
`ParseStatement`, `ParseExpression`, and `Token`.
|
|
||||||
Each one of them allows testing Leo parser on different levels - lexer tokens or just expressions/statements.
|
|
||||||
|
|
||||||
Compiler tests always include complete Leo programs.
|
- `Parse` - Test a file to check that it is a valid Leo program.
|
||||||
|
- `ParseExpression` - Test a file line by line to check that each line is a valid Leo expression.
|
||||||
|
- `ParseStatement` - Test a file consuming multiple lines till a blank line to check that it contains a valid Leo statement.
|
||||||
|
- `Serialize` - Test a file to check that it can be serialized to JSON.
|
||||||
|
- `Input` - Test an input file to check that it is a valid Leo input file.
|
||||||
|
- `Token` - Test a file line by line to check that it contains zero or more valid Leo parser tokens.
|
||||||
|
|
||||||
|
Compiler Directory namespaces:
|
||||||
|
|
||||||
|
- `Compiler` - Test a file to check that it is a valid Leo program, and it can be compiled without errors.
|
||||||
|
|
||||||
### expectation
|
### expectation
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
- Mandatory: yes
|
- Mandatory: yes
|
||||||
- Namespace: all
|
- Namespace: all
|
||||||
- Values: Pass / Fail
|
- Values: Pass / Fail
|
||||||
@ -77,7 +82,7 @@ you'll know that something went wrong and the test or the compiler/parser needs
|
|||||||
|
|
||||||
### input_file (Compile)
|
### input_file (Compile)
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
- Mandatory: no
|
- Mandatory: no
|
||||||
- Namespace: Compile
|
- Namespace: Compile
|
||||||
- Values: <input file path>, ...
|
- Values: <input file path>, ...
|
||||||
@ -87,7 +92,7 @@ This setting allows using one or more input files for the Leo program.
|
|||||||
The program will be run with every provided input.
|
The program will be run with every provided input.
|
||||||
See this example:
|
See this example:
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
/*
|
/*
|
||||||
namespace: Compile
|
namespace: Compile
|
||||||
expectation: Pass
|
expectation: Pass
|
||||||
|
@ -0,0 +1,82 @@
|
|||||||
|
# Leo Test Framework
|
||||||
|
|
||||||
|
This directory includes the code for the Leo Test Framework.
|
||||||
|
|
||||||
|
## How it works
|
||||||
|
|
||||||
|
You would first create a rust test file inside the folder of some part of the compiler, as the test framework tests are run by the rust test framework.
|
||||||
|
|
||||||
|
### Namespaces
|
||||||
|
|
||||||
|
Then you would create a `struct` that represents a `Namespace` where you have to implement the following:
|
||||||
|
|
||||||
|
#### Parse Type
|
||||||
|
|
||||||
|
Each `namespace` must have a function, `parse_type` that returns a `ParseType`. There are several kinds of `ParseTypes`:
|
||||||
|
|
||||||
|
- Line - Parses the File line one by one.
|
||||||
|
- ContinuousLines - Parses lines continuously as one item until an empty line is encountered.
|
||||||
|
- Whole - Parses the whole file.
|
||||||
|
|
||||||
|
#### Run Test
|
||||||
|
|
||||||
|
Each `namespace` must have a function, that runs and dictates how you want the tests for that namespace to work. To make running a test possible you are given information about the test file, like its name, content, path, etc. It allows you to return any type of output to be written to an expectation file as long as it's serializable.
|
||||||
|
|
||||||
|
### Runner
|
||||||
|
|
||||||
|
Then you would create a `struct` that represents a `Runner` where you have to implement the following:
|
||||||
|
|
||||||
|
#### Resolve Namespace
|
||||||
|
|
||||||
|
Each test file only needs one runner that represents the namespace resolution to which type of test should be run with a given string.
|
||||||
|
|
||||||
|
i.e.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
struct ParseTestRunner;
|
||||||
|
|
||||||
|
impl Runner for ParseTestRunner {
|
||||||
|
fn resolve_namespace(&self, name: &str) -> Option<Box<dyn Namespace>> {
|
||||||
|
Some(match name {
|
||||||
|
"Parse" => Box::new(ParseNamespace),
|
||||||
|
"ParseExpression" => Box::new(ParseExpressionNamespace),
|
||||||
|
"ParseStatement" => Box::new(ParseStatementNamespace),
|
||||||
|
"Serialize" => Box::new(SerializeNamespace),
|
||||||
|
"Input" => Box::new(InputNamespace),
|
||||||
|
"Token" => Box::new(TokenNamespace),
|
||||||
|
_ => return None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Rust test Function
|
||||||
|
|
||||||
|
A rust test function that calls the framework over the runner, as well as the name of the directory, is the last thing necessary.
|
||||||
|
|
||||||
|
i.e.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
#[test]
|
||||||
|
pub fn parser_tests() {
|
||||||
|
// The second argument indicates the directory where tests(.leo files)
|
||||||
|
// would be found(tests/parser).
|
||||||
|
leo_test_framework::run_tests(&ParseTestRunner, "parser");
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Clearing Expectations
|
||||||
|
|
||||||
|
To do so you can simply remove the corresponding `.out` file in the `tests/expectations` directory. Doing so will cause the output to be regenerated. There is an easier way to mass change them as well discussed in the next section.
|
||||||
|
|
||||||
|
### Test Framework Environment Variables
|
||||||
|
|
||||||
|
To make several aspects of the test framework easier to work with there are several environment variables:
|
||||||
|
|
||||||
|
- `TEST_FILTER` - runs all tests that contain the specified name.
|
||||||
|
- `CLEAR_LEO_TEST_EXPECTATIONS` - which if set clears all current expectations and regenerates them all.
|
||||||
|
|
||||||
|
To set environment variables please look at your Shell(bash/powershell/cmd/fish/etc) specific implementation for doing so
|
||||||
|
|
||||||
|
**NOTE**: Don't forget to clear the environment variable after running it with that setting, or set a temporary env variable if your shell supports it.
|
@ -22,6 +22,7 @@
|
|||||||
//! To regenerate the tests after a syntax change or failing test, delete the [`tests/expectations/`]
|
//! To regenerate the tests after a syntax change or failing test, delete the [`tests/expectations/`]
|
||||||
//! directory and run the [`parser_tests()`] test in [`parser/src/test.rs`].
|
//! directory and run the [`parser_tests()`] test in [`parser/src/test.rs`].
|
||||||
|
|
||||||
|
#![cfg(not(doctest))] // Don't doctest the markdown.
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
|
|
||||||
pub mod error;
|
pub mod error;
|
||||||
|
Loading…
Reference in New Issue
Block a user