Merge branch 'master' of github.com:AleoHQ/leo into bug/abnf-parser-bugs

This commit is contained in:
gluaxspeed 2021-07-08 04:07:00 -07:00
commit 01717ee86f
23 changed files with 1104 additions and 303 deletions

25
Cargo.lock generated
View File

@ -52,9 +52,9 @@ checksum = "15af2628f6890fe2609a3b91bef4c83450512802e59489f9c1cb1fa5df064a61"
[[package]]
name = "assert_cmd"
version = "1.0.5"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a88b6bd5df287567ffdf4ddf4d33060048e1068308e5f62d81c6f9824a045a48"
checksum = "3d20831bd004dda4c7c372c19cdabff369f794a95e955b3f13fe460e3e1ae95f"
dependencies = [
"bstr",
"doc-comment",
@ -488,10 +488,10 @@ dependencies = [
]
[[package]]
name = "difference"
version = "2.0.0"
name = "difflib"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198"
checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8"
[[package]]
name = "digest"
@ -1772,11 +1772,12 @@ checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
[[package]]
name = "predicates"
version = "1.0.8"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f49cfaf7fdaa3bfacc6fa3e7054e65148878354a5cfddcf661df4c851f8021df"
checksum = "c6e46ca79eb4e21e2ec14430340c71250ab69332abf85521c95d3a8bc336aa76"
dependencies = [
"difference",
"difflib",
"itertools 0.10.1",
"predicates-core",
]
@ -2629,18 +2630,18 @@ dependencies = [
[[package]]
name = "thiserror"
version = "1.0.25"
version = "1.0.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa6f76457f59514c7eeb4e59d891395fab0b2fd1d40723ae737d64153392e9c6"
checksum = "93119e4feac1cbe6c798c34d3a53ea0026b0b1de6a120deef895137c0529bfe2"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.25"
version = "1.0.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a36768c0fbf1bb15eca10defa29526bda730a2376c2ab4393ccfa16fb1a318d"
checksum = "060d69a0afe7796bf42e9e2ff91f5ee691fb15c53d38b4b62a9a53eb23164745"
dependencies = [
"proc-macro2 1.0.27",
"quote 1.0.9",

View File

@ -162,7 +162,7 @@ version = "0.12.1"
version = "0.11.2"
[dev-dependencies.assert_cmd]
version = "1.0.5"
version = "1.0.7"
[dev-dependencies.test_dir]
version = "0.1.0"

40
ast/src/errors/ast.rs Normal file
View File

@ -0,0 +1,40 @@
// Copyright (C) 2019-2021 Aleo Systems Inc.
// This file is part of the Leo library.
// The Leo library is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// The Leo library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with the Leo library. If not, see <https://www.gnu.org/licenses/>.
use crate::{FormattedError, LeoError, ReducerError, Span};
#[derive(Debug, Error)]
pub enum AstError {
#[error("{}", _0)]
Error(#[from] FormattedError),
#[error("{}", _0)]
IOError(#[from] std::io::Error),
#[error("{}", _0)]
ReducerError(#[from] ReducerError),
#[error("{}", _0)]
SerdeJsonError(#[from] ::serde_json::Error),
}
impl LeoError for AstError {}
impl AstError {
fn _new_from_span(message: String, span: &Span) -> Self {
AstError::Error(FormattedError::new_from_span(message, span))
}
}

View File

@ -34,10 +34,4 @@ impl CombinerError {
Self::new_from_span(message, span)
}
pub fn illegal_compound_array_range(span: &Span) -> Self {
let message = "Illegal compound assignement with array range".to_string();
Self::new_from_span(message, span)
}
}

View File

@ -14,6 +14,9 @@
// You should have received a copy of the GNU General Public License
// along with the Leo library. If not, see <https://www.gnu.org/licenses/>.
pub mod ast;
pub use ast::*;
pub mod canonicalization;
pub use canonicalization::*;

View File

@ -86,7 +86,7 @@ impl Ast {
}
/// Mutates the program ast by preforming canonicalization on it.
pub fn canonicalize(&mut self) -> Result<(), ReducerError> {
pub fn canonicalize(&mut self) -> Result<(), AstError> {
self.ast = ReconstructingDirector::new(Canonicalizer::default()).reduce_program(self.as_repr())?;
Ok(())
}
@ -101,12 +101,20 @@ impl Ast {
}
/// Serializes the ast into a JSON string.
pub fn to_json_string(&self) -> Result<String, serde_json::Error> {
serde_json::to_string_pretty(&self.ast)
pub fn to_json_string(&self) -> Result<String, AstError> {
Ok(serde_json::to_string_pretty(&self.ast)?)
}
pub fn to_json_file(&self, mut path: std::path::PathBuf, file_name: &str) -> Result<(), AstError> {
path.push(file_name);
let file = std::fs::File::create(path)?;
let writer = std::io::BufWriter::new(file);
serde_json::to_writer_pretty(writer, &self.ast)?;
Ok(())
}
/// Deserializes the JSON string into a ast.
pub fn from_json_string(json: &str) -> Result<Self, serde_json::Error> {
pub fn from_json_string(json: &str) -> Result<Self, AstError> {
let ast: Program = serde_json::from_str(json)?;
Ok(Self { ast })
}

View File

@ -55,6 +55,14 @@ impl Canonicalizer {
span: span.clone(),
}));
}
AssigneeAccess::ArrayRange(start, stop) => {
left = Box::new(Expression::ArrayRangeAccess(ArrayRangeAccessExpression {
array: left,
left: start.map(Box::new),
right: stop.map(Box::new),
span: span.clone(),
}));
}
AssigneeAccess::Tuple(positive_number, _) => {
left = Box::new(Expression::TupleAccess(TupleAccessExpression {
tuple: left,
@ -69,7 +77,6 @@ impl Canonicalizer {
span: span.clone(),
}));
}
_ => return Err(ReducerError::from(CombinerError::illegal_compound_array_range(&span))),
}
}

View File

@ -15,7 +15,6 @@
// along with the Leo library. If not, see <https://www.gnu.org/licenses/>.
//! Compiles a Leo program from a file path.
use crate::{
constraints::{generate_constraints, generate_test_constraints},
errors::CompilerError,
@ -23,6 +22,8 @@ use crate::{
GroupType,
Output,
OutputFile,
TheoremOptions,
TypeInferencePhase,
};
pub use leo_asg::{new_context, AsgContext as Context, AsgContext};
use leo_asg::{Asg, AsgPass, FormattedError, Program as AsgProgram};
@ -66,6 +67,7 @@ pub struct Compiler<'a, F: PrimeField, G: GroupType<F>> {
context: AsgContext<'a>,
asg: Option<AsgProgram<'a>>,
options: CompilerOptions,
proof_options: TheoremOptions,
_engine: PhantomData<F>,
_group: PhantomData<G>,
}
@ -80,6 +82,7 @@ impl<'a, F: PrimeField, G: GroupType<F>> Compiler<'a, F, G> {
output_directory: PathBuf,
context: AsgContext<'a>,
options: Option<CompilerOptions>,
proof_options: Option<TheoremOptions>,
) -> Self {
Self {
program_name: package_name.clone(),
@ -90,6 +93,7 @@ impl<'a, F: PrimeField, G: GroupType<F>> Compiler<'a, F, G> {
asg: None,
context,
options: options.unwrap_or_default(),
proof_options: proof_options.unwrap_or_default(),
_engine: PhantomData,
_group: PhantomData,
}
@ -108,8 +112,16 @@ impl<'a, F: PrimeField, G: GroupType<F>> Compiler<'a, F, G> {
output_directory: PathBuf,
context: AsgContext<'a>,
options: Option<CompilerOptions>,
proof_options: Option<TheoremOptions>,
) -> Result<Self, CompilerError> {
let mut compiler = Self::new(package_name, main_file_path, output_directory, context, options);
let mut compiler = Self::new(
package_name,
main_file_path,
output_directory,
context,
options,
proof_options,
);
compiler.parse_program()?;
@ -139,8 +151,16 @@ impl<'a, F: PrimeField, G: GroupType<F>> Compiler<'a, F, G> {
state_path: &Path,
context: AsgContext<'a>,
options: Option<CompilerOptions>,
proof_options: Option<TheoremOptions>,
) -> Result<Self, CompilerError> {
let mut compiler = Self::new(package_name, main_file_path, output_directory, context, options);
let mut compiler = Self::new(
package_name,
main_file_path,
output_directory,
context,
options,
proof_options,
);
compiler.parse_input(input_string, input_path, state_string, state_path)?;
@ -218,11 +238,19 @@ impl<'a, F: PrimeField, G: GroupType<F>> Compiler<'a, F, G> {
pub fn parse_program_from_string(&mut self, program_string: &str) -> Result<(), CompilerError> {
// Use the parser to construct the abstract syntax tree (ast).
let mut ast = parse_ast(self.main_file_path.to_str().unwrap_or_default(), program_string)?;
let mut ast: leo_ast::Ast = parse_ast(self.main_file_path.to_str().unwrap_or_default(), program_string)?;
if self.proof_options.initial {
ast.to_json_file(self.output_directory.clone(), "inital_ast.json")?;
}
// Preform compiler optimization via canonicalizing AST if its enabled.
if self.options.canonicalization_enabled {
ast.canonicalize()?;
if self.proof_options.canonicalized {
ast.to_json_file(self.output_directory.clone(), "canonicalization_ast.json")?;
}
}
// Store the main program file.
@ -238,6 +266,13 @@ impl<'a, F: PrimeField, G: GroupType<F>> Compiler<'a, F, G> {
&mut leo_imports::ImportParser::new(self.main_file_path.clone()),
)?;
if self.proof_options.type_inferenced {
let new_ast = TypeInferencePhase::default()
.phase_ast(&self.program, &asg.clone().into_repr())
.expect("Failed to produce type inference ast.");
new_ast.to_json_file(self.output_directory.clone(), "type_inferenced_ast.json")?;
}
tracing::debug!("ASG generation complete");
// Store the ASG.

View File

@ -16,7 +16,7 @@
use crate::errors::{ExpressionError, FunctionError, ImportError, StatementError};
use leo_asg::{AsgConvertError, FormattedError};
use leo_ast::{LeoError, ReducerError};
use leo_ast::{AstError, LeoError};
use leo_input::InputParserError;
use leo_parser::SyntaxError;
use leo_state::LocalDataVerificationError;
@ -62,7 +62,7 @@ pub enum CompilerError {
AsgConvertError(#[from] AsgConvertError),
#[error("{}", _0)]
ReducerError(#[from] ReducerError),
AstError(#[from] AstError),
#[error("{}", _0)]
StatementError(#[from] StatementError),

View File

@ -36,3 +36,20 @@ impl Default for CompilerOptions {
}
}
}
#[derive(Clone)]
pub struct TheoremOptions {
pub initial: bool,
pub canonicalized: bool,
pub type_inferenced: bool,
}
impl Default for TheoremOptions {
fn default() -> Self {
Self {
initial: false,
canonicalized: false,
type_inferenced: false,
}
}
}

View File

@ -19,7 +19,7 @@
use std::convert::TryInto;
use crate::{
errors::{ExpressionError, IntegerError, StatementError},
errors::{ExpressionError, StatementError},
program::ConstrainedProgram,
value::ConstrainedValue,
GroupType,
@ -55,9 +55,6 @@ impl<'a, F: PrimeField, G: GroupType<F>> ConstrainedProgram<'a, F, G> {
))
} else {
let target = input.get_mut(index).unwrap();
if let ConstrainedValue::Integer(int) = target {
println!("RTAAI T {}", int);
}
if context.remaining_accesses.is_empty() {
self.enforce_assign_context(cs, &context, target)
} else {
@ -130,6 +127,7 @@ impl<'a, F: PrimeField, G: GroupType<F>> ConstrainedProgram<'a, F, G> {
_ => Err(StatementError::array_assign_interior_index(&context.span)),
}
} else if context.from_range && input_len != 0 {
context.from_range = false;
if let Some(index) = index_resolved.to_usize() {
if index >= input_len {
return Err(StatementError::array_assign_index_bounds(
@ -147,12 +145,63 @@ impl<'a, F: PrimeField, G: GroupType<F>> ConstrainedProgram<'a, F, G> {
self.resolve_target_access(cs, context)
}
} else {
// index is input variable
let span = index.span().cloned().unwrap_or_default();
{
let array_len: u32 = context
.input
.len()
.try_into()
.map_err(|_| ExpressionError::array_length_out_of_bounds(&span))?;
self.array_bounds_check(cs, &&index_resolved, array_len, &span)?;
}
Err(StatementError::from(IntegerError::invalid_integer(
index_resolved.to_string(),
&span,
)))
for (i, item) in context.input.iter_mut().enumerate() {
let namespace_string = format!(
"evaluate dyn array assignment eq {} {}:{}",
i, span.line_start, span.col_start
);
let eq_namespace = cs.ns(|| namespace_string);
let index_bounded = i
.try_into()
.map_err(|_| ExpressionError::array_index_out_of_legal_bounds(&span))?;
let const_index = ConstInt::U32(index_bounded).cast_to(&index_resolved.get_type());
let index_comparison = index_resolved
.evaluate_equal(eq_namespace, &Integer::new(&const_index))
.map_err(|_| ExpressionError::cannot_evaluate("==".to_string(), &span))?;
let mut unique_namespace = cs.ns(|| {
format!(
"select array dyn assignment {} {}:{}",
i, span.line_start, span.col_start
)
});
let temp_item = {
let mut item = item.clone();
let mut new_context = ResolverContext {
input: vec![&mut item],
span: context.span.clone(),
target_value: context.target_value.clone(),
remaining_accesses: context.remaining_accesses,
indicator: context.indicator,
operation: context.operation,
from_range: false,
};
if context.remaining_accesses.is_empty() {
let item = new_context.input.remove(0);
self.enforce_assign_context(&mut unique_namespace, &new_context, item)?;
} else {
self.resolve_target_access(&mut unique_namespace, new_context)?;
}
item
};
let value =
ConstrainedValue::conditionally_select(unique_namespace, &index_comparison, &temp_item, &item)
.map_err(|e| ExpressionError::cannot_enforce("conditional select".to_string(), e, &span))?;
**item = value;
}
Ok(())
}
} else {
Err(StatementError::array_assign_interior_index(&context.span))

View File

@ -32,8 +32,6 @@ impl<'a, F: PrimeField, G: GroupType<F>> ConstrainedProgram<'a, F, G> {
start: Option<&'a Expression<'a>>,
stop: Option<&'a Expression<'a>>,
) -> Result<(), StatementError> {
context.from_range = true;
let start_index = start
.map(|start| self.enforce_index(cs, start, &context.span))
.transpose()?
@ -52,8 +50,9 @@ impl<'a, F: PrimeField, G: GroupType<F>> ConstrainedProgram<'a, F, G> {
.transpose()?;
let start_index = start_index.unwrap_or(0);
if context.input.len() == 1 {
if !context.from_range {
// not a range of a range
context.from_range = true;
match context.input.remove(0) {
ConstrainedValue::Array(old) => {
let stop_index = stop_index.unwrap_or(old.len());

File diff suppressed because it is too large Load Diff

View File

@ -16,9 +16,11 @@ function main() {
w += x;
console.assert(w == 33u32);
let y = [1u8, 2u8];
let y = [1u8, 2u8, 3, 4];
y[0] += 3u8;
y[0..3][1] *= 3;
console.assert(y[0] == 4u8);
console.assert(y[1] == 6u8);
let z = (1u8, 2u8);
z.1 += 3u8;

View File

@ -52,7 +52,7 @@ fn new_compiler() -> EdwardsTestCompiler {
let path = PathBuf::from("/test/src/main.leo");
let output_dir = PathBuf::from(TEST_OUTPUT_DIRECTORY);
EdwardsTestCompiler::new(program_name, path, output_dir, make_test_context(), None)
EdwardsTestCompiler::new(program_name, path, output_dir, make_test_context(), None, None)
}
pub(crate) fn parse_program(program_string: &str) -> Result<EdwardsTestCompiler, CompilerError> {

View File

@ -40,7 +40,7 @@ fn new_compiler(path: PathBuf) -> EdwardsTestCompiler {
let program_name = "test".to_string();
let output_dir = PathBuf::from("/output/");
EdwardsTestCompiler::new(program_name, path, output_dir, make_test_context(), None)
EdwardsTestCompiler::new(program_name, path, output_dir, make_test_context(), None, None)
}
pub(crate) fn parse_program(program_string: &str) -> Result<EdwardsTestCompiler, CompilerError> {

View File

@ -18,30 +18,62 @@ DRAFT
# Summary
This proposal aims to improve the import management system in Leo programs to
make program environment more reproducible and predictable. To achieve that
we suggest few changes to Leo CLI and Manifest:
make program environment more reproducible, predictable and compatible. To achieve
that we suggest few changes to Leo CLI and Manifest:
- add a "dependencies" section to Leo Manifest and add a command to pull those dependencies;
- allow custom names for imports to manually resolve name conflicts;
- store imports as they are called in Leo Manifest;
- add "curve" and "proving system" sections to the Manifest;
- add "include" and "exclude" parameters for "proving system" and "curve";
Later this solution can be improved by adding a lock-file which would lock
imported packages based on both their contents and version.
# Motivation
What problems does it solve? What is the background?
The current design of imports does not provide any guarantees on what's stored
in program imports and published with the program to Aleo Package Manager.
When a dependency is "added," it is stored inside imports folder, and it is possible
to manually edit and/or add packages in this folder.
Current state:
- imports are published with a program to Aleo PM;
- we treat programs as files with no verification of imports (they can be changed locally and published in that state);
- name collisions cannot be resolved; a new import overwrites existing;
Also, imports are stored under the package name which makes it impossible to import
two different packages with the same name.
TBD
Another important detail in the scope of this proposal is that in future Leo
programs will have the ability to be run with different proving systems
and curves, possibly creating incompatibility between programs written
for different proving systems or curves. To make a foundation for these features,
imports need to be managed with include/exclude lists for allowed (compatible)
proving systems and curves.
# Design
## Leo Manifest
## Leo Manifest - target section
To lay the foundation for the future of the Leo ecosystem and start integrating
information about programs compatibility we suggest adding two new fields to
the new `[target]` section of the Leo Manifest: `proving_system` and `curve`.
Currently, the Leo compiler only supports `Groth16` for the proving system and `Bls12_377`
for the curve, they are meant to be default values in Leo Manifest.
```toml
[project]
name = "first"
version = "0.1.0"
description = "The first package"
license = "MIT"
[target]
curve = "Bls12_377"
proving_system = "Groth16"
```
These fields are meant to be used to determine whether imported program is
compatible to the original when support for different curves and proving systems
is added.
## Leo Manifest - dependencies
Dependencies section:
@ -49,13 +81,27 @@ Dependencies section:
[dependencies]
name = { author = "author", package = "package", version = "version" }
# alternative way of adding dependency record
[dependencies.name]
author = "author"
package = "package"
version = "1.0"
```
TBD
### Parameters description
`name` field sets the name of the dependency in Leo code. That way we allow
developer to resolve collisions in import names manually. So, for example,
if a developer is adding `howard/silly-sudoku` package to his program, he
might define its in-code name as `sudoku` and import it with that name:
```ts
import sudoku;
```
`package`, `author` and `version` are package name, package author and
version respectively. They are already used as arguments in `leo add`
command, so these fields are already understood by the Leo developers.
## Leo CLI
@ -63,21 +109,62 @@ To support updated Manifest new command should be added to Leo CLI.
```bash
# pull imports
leo pull
leo install
```
Alternatively it can be called `pull`.
```
leo pull
```
## Imports Restructurization
One of the goals of proposed changes is to allow importing packages with the
same name but different authors. To resolve name conflict we suggest storing
imports as they are named in Leo Manifest file (Leo.toml).
same name but different authors. This has to be solved not only on the
language level but also on the level of storing program imports.
We suggest using set of all 3 possible program identifiers for import
folder name: `author-package@version`. Later it can be extended to
include hash for version, but having the inital set already solves name
collisions.
<!-- The suggested change is soft. It changes only the way imports are organized
with minimal changes to other parts of the language.
So, updated imports would look like:
We can consider implementing imports/username-package storage, but imports
will have to be resolved on a different level in compiler. -->
```
leo-program
├── Leo.toml
├── README.md
├── imports
│ ├── author1-program@0.1.0
│ │ └── ...
│ ├── author2-program2@1.0.4
│ └── ...
├── inputs
│ └── ...
└── src
└── main.leo
```
This change would also affect the way imports are being processed on the ASG
level, and we'd need to add an imports map as an argument to the Leo compiler.
The Leo Manifest's dependencies sections needs to be parsed and passed as
a hashmap to the compiler:
```
first-program => author1-program@0.1.0
second-program => author2-program2@1.0.4
```
## Recursive Dependencies
This improvement introduces recursive dependencies. To solve this case preemptively
Leo CLI needs to check the dependency tree and throw an error when a recursive dependency
is met. We suggest implementing simple dependency tree checking while fetching
imports - if imported dependency is met on a higher level - abort the execution.
Later this solution can be improved by building a lock file containing all the
information on program dependencies, and the file itself will have enough data
to track and prevent recursion.
# Drawbacks
@ -85,6 +172,8 @@ This change might require the update of already published programs on Aleo PM du
Leo Manifest change. However it is possible to implement it in a backward-compatible
way.
It also introduces the danger of having recursive dependencies, this problem is addressed in the Design section above.
# Effect on Ecosystem
Proposed improvement provides safety inside Leo programs and should not affect

View File

@ -0,0 +1,231 @@
# Leo RFC 004: Integer Type Casts
## Authors
- Max Bruce
- Collin Chin
- Alessandro Coglio
- Eric McCarthy
- Jon Pavlik
- Damir Shamanaev
- Damon Sicore
- Howard Wu
## Status
DRAFT
# Summary
This proposal provides support for casts among integer types in Leo.
The syntax is similar to Rust.
Two possible semantics are discussed:
_value-preserving casts_,
which just serve to change types
but cause errors when values are not representable in the new types;
and _values-changing casts_,
which never cause errors but may change the mathematical values.
# Motivation
Currently the Leo integer types are "siloed":
arithmetic integer operations require operands of the same type
and return results of the same type.
There are no implicit or explicit ways to turn, for example,
a `u8` into a `u16`, even though
every non-negative integers that fits in 8 bits also fits in 16 bits.
However, the ability to convert values between different (integer) types
is a useful feature that is normally found in programming languages.
# Design
## Background
Recall that Leo supports the following _integer types_:
```
u8 u16 u32 u64 u128
i8 i16 i32 i64 i128
```
## Scope
This RFC proposes type casts between any two integer types,
but not between two non-integer types
or between an integer type and a non-integer type.
This RFC does not propose any implicit cast,
even widening casts (i.e. upcasts)
from a type to another type with the same signedness
and with the same or larger size
(e.g. from `u8` to `u16`).
All the type casts must be explicit.
## Syntax and Static Semantics
The proposed syntax is
```
<expression> as <integer-type>
```
where `<expression>` must have an integer type.
The ABNF grammar is modified as follows:
```
; add this rule:
cast-expression = unary-expression
/ cast-expression %s"as" integer-type
; modify this rule:
exponential-expression = cast-expression
/ cast-expression "**" exponential-expression
```
There is no need to modify the `keyword` rule
because it already includes `as` as one of the keywords.
Note the use of `integer-type` in the `cast-expression` rule;
an alternative is to use `type` there
and check post-parsing that the type is in fact an integer one.
The above grammar rules imply that casts bind
tighter than binary operators and looser than unary operators.
For instance,
```
x + - y as u8
```
is like
```
x + ((- y) as u8)
```
This precedence is the same as in Rust:
see [here](https://doc.rust-lang.org/stable/reference/expressions.html#expression-precedence).
## Dynamic Semantics
When the mathematical integer value of the expression
is representable in the type that the expression is cast to,
there is no question that the cast must succeed
and merely change the type of the Leo value,
but not its mathematical integer value.
This is always the case when the cast is to a type
with the same signedness and with the same or larger size.
This is also the case when
the cast is to a type whose range does not cover the range of the source type
but the value in question is in the intersection of the two ranges.
When the mathematical integer value of the expression
is not representable in the type that the expression is cast to,
there are two possible approaches, discussed below.
### Value-Preserving Casts
The first approach is to deem that situation erroneous.
That is, to require casts to always preserve the mathematical integer values.
In this approach, casts only serve to change types, never values.
When values are to be changed, separate (built-in) functions can be used,
e.g. to mask bits and achieve the same effect as
the value-changing casts discussed below.
From a point of view, this approach seems to match Leo's
treatment of potentially erroneous situations like integer overflows:
the principle is that developers should explicitly use
operations that may overflow if that is their intention,
rather than having those situation possibly occur unexpectedly.
A value-preserving cast to a type
whose range does not cover the original type's range
implicitly expresses a developer expectation that the value
is actually in the intersection of the two types' ranges,
in the same way that the use of integer addition
implicitly expresses the expectation that the addition does not overflow.
Consider this somewhat abstract example:
```
... // some computations on u32 values, which could not be done with u16
let r: u32 = ...; // this is the final result of the u32 operations above
let s: u16 = r as u16; // but r is expected to fit in u16, so we cast it here
```
With value-preserving casts, the expectation mentioned above
is checked by the Leo compiler during proof generation,
in the same way as with integer overflow.
In the example above,
if instead the variable `s` is meant to contain the low 16 bits of `r`,
e.g. in a cryptographic computation,
then the value-preserving cast should be preceded by
an explicit operation to obtain the low 16 bits, making the intent clear:
```
... // some computations on u32 values, which could not be done with u16
let r: u32 = ...; // this is the final result of the u32 operations above
let r_low16: u32 = r & 0xFFFF; // assuming we have bitwise ops and hex literals
let s: u16 = r_low16 as u16; // no value change here
```
### Value-Changing Casts
The second approach is the following:
1. `uN` to `uM` with `N < M`: just change type of value.
2. `uN` to `uM` with `N > M`: take low `M` bits of value.
3. `iN` to `iM` with `N < M`: just change type of value.
4. `iN` to `iM` with `N > M`: take low `M` bits of value.
5. `uN` to `iM` with `N < M`: zero-extend to `M` bits and re-interpret as signed.
6. `uN` to `iM` with `N > M`: take low `M` bits and re-interpret as signed.
7. `uN` to `iN`: re-interpret as signed
8. `iN` to `uM` with `N < M`: sign-extend to `M` bits and re-interpret as unsigned.
9. `iN` to `uM` with `N > M`: take low `M` bits and re-interpret as unsigned.
10. `iN` to `uN`: re-interpret as unsigned
Except for the 1st and 3rd cases, the value may change.
This approach is common in other programming languages.
However, it should be noted that other programming languages
typically do not check for overflow in integer operations either
(at least, not for production code).
Presumably, the behavior of type casts in those programming languages
is motivated by efficiency of execution, at least in part.
Since in Leo the input data is available at compile time,
considerations that apply to typical programming languages
do not necessarily apply to Leo.
Back to the somewhat abstract example in the section on value-preserving casts,
with value-changing casts, the expectation that the final result fits in `u16`
would have to be checked with explicit code:
```
... // some computations on u32 values, which could not be done with u16
let r: u32 = ...; // this is the final result of the u32 operations above
if (r > 0xFFFF) {
... // error
}
let s: u16 = r as u16; // could change value in principle, but does not here
```
However, it would be easy for a developer to neglect to add the checking code,
and thus have the Leo code silently produce an unexpected result.
## Compilation to R1CS
It should be possible to compile Leo casts to the same R1CS constraints
whether we choose the value-preserving or value-changing semantics.
If the R1CS constraints represent Leo integers as bits,
the bits of the new value can be determined from the bits of the old value,
with additional zero or sign extension bits when needed.
This is clearly the value-changing behavior.
With the value-preserving behavior,
all casts for the known inputs are checked,
and thus we value-changing behavior coincides
with the value-preserving behavior if the checks succeed.
Thus, if he behavior of the R1CS constraints is "don't care"
for Leo program inputs that cause errors (such as cast errors),
the compilation strategy for value-changing casts
should be also adequate for value-preserving casts.
# Drawbacks
This proposal does not appear to bring any drawbacks,
other than making the language and compiler inevitably more complex.
But the benefits to support type casts justifies the extra complexity.
# Effect on Ecosystem
This proposal does not appear to have any direct effects on the ecosystem.
# Alternatives
The 'Design' section above already discusses two alternative semantics.
After we settle on one, the other one could be mentioned in this section.

View File

@ -19,6 +19,7 @@ use leo_compiler::{
compiler::{thread_leaked_context, Compiler},
group::targets::edwards_bls12::EdwardsGroupType,
CompilerOptions,
TheoremOptions,
};
use leo_package::{
inputs::*,
@ -43,6 +44,14 @@ pub struct BuildOptions {
pub disable_code_elimination: bool,
#[structopt(long, help = "Disable all compiler optimizations")]
pub disable_all_optimizations: bool,
#[structopt(long, help = "Writes all theorem input AST files.")]
pub enable_all_theorems: bool,
#[structopt(long, help = "Writes AST files needed for the initial theorem before any changes.")]
pub enable_initial_theorem: bool,
#[structopt(long, help = "Writes AST files needed for canonicalization theorem.")]
pub enable_canonicalized_theorem: bool,
#[structopt(long, help = "Writes AST files needed for type inference theorem.")]
pub enable_type_inferenced_theorem: bool,
}
impl Default for BuildOptions {
@ -51,6 +60,10 @@ impl Default for BuildOptions {
disable_constant_folding: true,
disable_code_elimination: true,
disable_all_optimizations: true,
enable_all_theorems: false,
enable_initial_theorem: false,
enable_canonicalized_theorem: false,
enable_type_inferenced_theorem: false,
}
}
}
@ -73,6 +86,24 @@ impl From<BuildOptions> for CompilerOptions {
}
}
impl From<BuildOptions> for TheoremOptions {
fn from(options: BuildOptions) -> Self {
if options.enable_all_theorems {
TheoremOptions {
initial: true,
canonicalized: true,
type_inferenced: true,
}
} else {
TheoremOptions {
initial: options.enable_initial_theorem,
canonicalized: options.enable_canonicalized_theorem,
type_inferenced: options.enable_type_inferenced_theorem,
}
}
}
}
/// Compile and build program command.
#[derive(StructOpt, Debug)]
#[structopt(setting = structopt::clap::AppSettings::ColoredHelp)]
@ -141,6 +172,7 @@ impl Command for Build {
&state_string,
&state_path,
thread_leaked_context(),
Some(self.compiler_options.clone().into()),
Some(self.compiler_options.into()),
)?;

View File

@ -112,6 +112,7 @@ impl Command for Test {
output_directory.clone(),
thread_leaked_context(),
Some(self.compiler_options.clone().into()),
Some(self.compiler_options.clone().into()),
)?;
let temporary_program = program;

View File

@ -5,19 +5,26 @@ input_file:
- input/complex_access.in
*/
function main (a: [u8; 8], b: [[u8; 3]; 3], c: [(u8, u32); 1]) -> bool {
function main (a: [u8; 8], b: u32, c: [[u8; 3]; 3], d: [(u8, u32); 1], e: [u8; (3, 4)] ) -> bool {
a[0..3][b] = 93;
a[2..6][1] = 87;
a[2..6][1] *= 2;
a[2..3] = [42u8];
a[6..][0] = 43u8;
a[0..1][0..1] = [200];
b[0..2][0] = [1u8; 3];
b[1..][1][1..2][0] = 126;
b[1..][0] = [42, 43, 44];
c[0..2][0] = [1u8; 3];
c[1..][1][1..2][0] = 126;
c[1..][0] = [42, 43, 44];
c[..1][0].1 = 1;
d[..1][0].1 = 1;
e[0..][0] = [22; 4];
e[0..][0][0] = 33;
return
a == [1u8, 2, 42, 87, 5, 6, 43, 8]
&& b == [[1u8, 1, 1], [42, 43, 44], [7, 126, 9]]
&& c == [(0u8, 1u32)];
a == [200u8, 93, 42, 174, 5, 6, 43, 8]
&& c == [[1u8, 1, 1], [42, 43, 44], [7, 126, 9]]
&& d == [(0u8, 1u32)]
&& e == [[33u8, 22, 22, 22], [0, 0, 0, 0], [0, 0, 0, 0]];
}

View File

@ -1,7 +1,9 @@
[main]
a: [u8; 8] = [1u8, 2, 3, 4, 5, 6, 7, 8];
b: [[u8; 3]; 3] = [[1u8, 2, 3], [4, 5, 6], [7, 8, 9]];
c: [(u8, u32); 1] = [(0u8, 0u32)];
b: u32 = 1;
c: [[u8; 3]; 3] = [[1u8, 2, 3], [4, 5, 6], [7, 8, 9]];
d: [(u8, u32); 1] = [(0u8, 0u32)];
e: [u8; (3, 4)] = [0u8; (3, 4)];
[registers]
out: bool = true;

View File

@ -4,11 +4,11 @@ expectation: Pass
outputs:
- circuit:
num_public_variables: 0
num_private_variables: 239
num_constraints: 239
at: 61be39f73914fdb4059c5d5e5776840262ab56dc414aa09106ddfee1d27ba174
bt: 3f2d18346ecde92782f60a29649e187402d8f2059cf934b31f5606c4d4381883
ct: db5fb9e237a13bca372ce7d3f232661fdf07c8ae7ef3d99517438a3532f59a41
num_private_variables: 637
num_constraints: 662
at: 9f1bcfcac007139af57f04172704330d348767a9c51fe8ce15fb60f457fa6337
bt: b6500ddf37d10958fdc014dcb2f54b37a2c684f05b7f0aa4776556331db8c924
ct: a15c712d74487a80767116103c38fa7277066705d0c7f1f9902a6016466706f0
output:
- input_file: input/complex_access.in
output: