Merge branch 'trunk' into list-map-ownership

This commit is contained in:
Folkert de Vries 2022-05-07 11:59:51 +02:00 committed by GitHub
commit 201d09d9bf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
71 changed files with 2250 additions and 2560 deletions

View File

@ -78,3 +78,4 @@ Jared Cone <jared.cone@gmail.com>
Sean Hagstrom <sean@seanhagstrom.com>
Kas Buunk <kasbuunk@icloud.com>
Oskar Hahn <mail@oshahn.de>
Nuno Ferreira <nunogcferreira@gmail.com>

View File

@ -1,10 +1,9 @@
use bumpalo::Bump;
use roc_load::{LoadedModule, Threading};
use roc_target::TargetInfo;
use std::path::Path;
use bumpalo::Bump;
use roc_load::LoadedModule;
use roc_target::TargetInfo;
pub fn load_module(src_file: &Path) -> LoadedModule {
pub fn load_module(src_file: &Path, threading: Threading) -> LoadedModule {
let subs_by_module = Default::default();
let arena = Bump::new();
@ -20,6 +19,7 @@ pub fn load_module(src_file: &Path) -> LoadedModule {
subs_by_module,
TargetInfo::default_x86_64(),
roc_reporting::report::RenderTarget::ColorTerminal,
threading,
);
match loaded {

View File

@ -4,7 +4,7 @@ use roc_build::{
program::{self, Problems},
};
use roc_builtins::bitcode;
use roc_load::LoadingProblem;
use roc_load::{LoadingProblem, Threading};
use roc_mono::ir::OptLevel;
use roc_reporting::report::RenderTarget;
use roc_target::TargetInfo;
@ -40,6 +40,7 @@ pub fn build_file<'a>(
surgically_link: bool,
precompiled: bool,
target_valgrind: bool,
threading: Threading,
) -> Result<BuiltFile, LoadingProblem<'a>> {
let compilation_start = SystemTime::now();
let target_info = TargetInfo::from(target);
@ -55,6 +56,7 @@ pub fn build_file<'a>(
target_info,
// TODO: expose this from CLI?
RenderTarget::ColorTerminal,
threading,
)?;
use target_lexicon::Architecture;
@ -366,6 +368,7 @@ pub fn check_file(
target_info,
// TODO: expose this from CLI?
RenderTarget::ColorTerminal,
Threading::Multi,
)?;
let buf = &mut String::with_capacity(1024);

View File

@ -2,29 +2,17 @@ use std::ffi::OsStr;
use std::path::{Path, PathBuf};
use crate::FormatMode;
use bumpalo::collections::Vec;
use bumpalo::Bump;
use roc_error_macros::{internal_error, user_error};
use roc_fmt::def::fmt_def;
use roc_fmt::module::fmt_module;
use roc_fmt::Buf;
use roc_module::called_via::{BinOp, UnaryOp};
use roc_parse::ast::{
AbilityMember, AssignedField, Collection, Expr, Has, HasClause, Pattern, Spaced, StrLiteral,
StrSegment, Tag, TypeAnnotation, TypeDef, TypeHeader, ValueDef, WhenBranch,
};
use roc_parse::header::{
AppHeader, ExposedName, HostedHeader, ImportsEntry, InterfaceHeader, ModuleName, PackageEntry,
PackageName, PlatformHeader, PlatformRequires, To, TypedIdent,
};
use roc_fmt::spaces::RemoveSpaces;
use roc_fmt::{Ast, Buf};
use roc_parse::{
ast::{Def, Module},
ident::UppercaseIdent,
module::{self, module_defs},
parser::{Parser, SyntaxError},
state::State,
};
use roc_region::all::{Loc, Region};
fn flatten_directories(files: std::vec::Vec<PathBuf>) -> std::vec::Vec<PathBuf> {
let mut to_flatten = files;
@ -166,12 +154,6 @@ pub fn format(files: std::vec::Vec<PathBuf>, mode: FormatMode) -> Result<(), Str
Ok(())
}
#[derive(Debug, PartialEq)]
struct Ast<'a> {
module: Module<'a>,
defs: Vec<'a, Loc<Def<'a>>>,
}
fn parse_all<'a>(arena: &'a Bump, src: &'a str) -> Result<Ast<'a>, SyntaxError<'a>> {
let (module, state) = module::parse_header(arena, State::new(src.as_bytes()))
.map_err(|e| SyntaxError::Header(e.problem))?;
@ -189,575 +171,3 @@ fn fmt_all<'a>(arena: &'a Bump, buf: &mut Buf<'a>, ast: &'a Ast) {
buf.fmt_end_of_file();
}
/// RemoveSpaces normalizes the ast to something that we _expect_ to be invariant under formatting.
///
/// Currently this consists of:
/// * Removing newlines
/// * Removing comments
/// * Removing parens in Exprs
///
/// Long term, we actuall want this transform to preserve comments (so we can assert they're maintained by formatting)
/// - but there are currently several bugs where they're _not_ preserved.
/// TODO: ensure formatting retains comments
trait RemoveSpaces<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self;
}
impl<'a> RemoveSpaces<'a> for Ast<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
Ast {
module: self.module.remove_spaces(arena),
defs: {
let mut defs = Vec::with_capacity_in(self.defs.len(), arena);
for d in &self.defs {
defs.push(d.remove_spaces(arena))
}
defs
},
}
}
}
impl<'a> RemoveSpaces<'a> for Module<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match self {
Module::Interface { header } => Module::Interface {
header: InterfaceHeader {
name: header.name.remove_spaces(arena),
exposes: header.exposes.remove_spaces(arena),
imports: header.imports.remove_spaces(arena),
before_header: &[],
after_interface_keyword: &[],
before_exposes: &[],
after_exposes: &[],
before_imports: &[],
after_imports: &[],
},
},
Module::App { header } => Module::App {
header: AppHeader {
name: header.name.remove_spaces(arena),
packages: header.packages.remove_spaces(arena),
imports: header.imports.remove_spaces(arena),
provides: header.provides.remove_spaces(arena),
provides_types: header.provides_types.map(|ts| ts.remove_spaces(arena)),
to: header.to.remove_spaces(arena),
before_header: &[],
after_app_keyword: &[],
before_packages: &[],
after_packages: &[],
before_imports: &[],
after_imports: &[],
before_provides: &[],
after_provides: &[],
before_to: &[],
after_to: &[],
},
},
Module::Platform { header } => Module::Platform {
header: PlatformHeader {
name: header.name.remove_spaces(arena),
requires: header.requires.remove_spaces(arena),
exposes: header.exposes.remove_spaces(arena),
packages: header.packages.remove_spaces(arena),
imports: header.imports.remove_spaces(arena),
provides: header.provides.remove_spaces(arena),
before_header: &[],
after_platform_keyword: &[],
before_requires: &[],
after_requires: &[],
before_exposes: &[],
after_exposes: &[],
before_packages: &[],
after_packages: &[],
before_imports: &[],
after_imports: &[],
before_provides: &[],
after_provides: &[],
},
},
Module::Hosted { header } => Module::Hosted {
header: HostedHeader {
name: header.name.remove_spaces(arena),
exposes: header.exposes.remove_spaces(arena),
imports: header.imports.remove_spaces(arena),
generates: header.generates.remove_spaces(arena),
generates_with: header.generates_with.remove_spaces(arena),
before_header: &[],
after_hosted_keyword: &[],
before_exposes: &[],
after_exposes: &[],
before_imports: &[],
after_imports: &[],
before_generates: &[],
after_generates: &[],
before_with: &[],
after_with: &[],
},
},
}
}
}
impl<'a> RemoveSpaces<'a> for &'a str {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
self
}
}
impl<'a, T: RemoveSpaces<'a> + Copy> RemoveSpaces<'a> for Spaced<'a, T> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
Spaced::Item(a) => Spaced::Item(a.remove_spaces(arena)),
Spaced::SpaceBefore(a, _) => a.remove_spaces(arena),
Spaced::SpaceAfter(a, _) => a.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for ExposedName<'a> {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
*self
}
}
impl<'a> RemoveSpaces<'a> for ModuleName<'a> {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
*self
}
}
impl<'a> RemoveSpaces<'a> for PackageName<'a> {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
*self
}
}
impl<'a> RemoveSpaces<'a> for To<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
To::ExistingPackage(a) => To::ExistingPackage(a),
To::NewPackage(a) => To::NewPackage(a.remove_spaces(arena)),
}
}
}
impl<'a> RemoveSpaces<'a> for TypedIdent<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
TypedIdent {
ident: self.ident.remove_spaces(arena),
spaces_before_colon: &[],
ann: self.ann.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for PlatformRequires<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
PlatformRequires {
rigids: self.rigids.remove_spaces(arena),
signature: self.signature.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for UppercaseIdent<'a> {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
*self
}
}
impl<'a> RemoveSpaces<'a> for PackageEntry<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
PackageEntry {
shorthand: self.shorthand,
spaces_after_shorthand: &[],
package_name: self.package_name.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for ImportsEntry<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
ImportsEntry::Module(a, b) => ImportsEntry::Module(a, b.remove_spaces(arena)),
ImportsEntry::Package(a, b, c) => ImportsEntry::Package(a, b, c.remove_spaces(arena)),
}
}
}
impl<'a, T: RemoveSpaces<'a>> RemoveSpaces<'a> for Option<T> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
self.as_ref().map(|a| a.remove_spaces(arena))
}
}
impl<'a, T: RemoveSpaces<'a> + std::fmt::Debug> RemoveSpaces<'a> for Loc<T> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
let res = self.value.remove_spaces(arena);
Loc::at(Region::zero(), res)
}
}
impl<'a, A: RemoveSpaces<'a>, B: RemoveSpaces<'a>> RemoveSpaces<'a> for (A, B) {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
(self.0.remove_spaces(arena), self.1.remove_spaces(arena))
}
}
impl<'a, T: RemoveSpaces<'a>> RemoveSpaces<'a> for Collection<'a, T> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
let mut items = Vec::with_capacity_in(self.items.len(), arena);
for item in self.items {
items.push(item.remove_spaces(arena));
}
Collection::with_items(items.into_bump_slice())
}
}
impl<'a, T: RemoveSpaces<'a> + std::fmt::Debug> RemoveSpaces<'a> for &'a [T] {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
let mut items = Vec::with_capacity_in(self.len(), arena);
for item in *self {
let res = item.remove_spaces(arena);
items.push(res);
}
items.into_bump_slice()
}
}
impl<'a> RemoveSpaces<'a> for UnaryOp {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
*self
}
}
impl<'a> RemoveSpaces<'a> for BinOp {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
*self
}
}
impl<'a, T: RemoveSpaces<'a>> RemoveSpaces<'a> for &'a T {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
arena.alloc((*self).remove_spaces(arena))
}
}
impl<'a> RemoveSpaces<'a> for TypeDef<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
use TypeDef::*;
match *self {
Alias {
header: TypeHeader { name, vars },
ann,
} => Alias {
header: TypeHeader {
name: name.remove_spaces(arena),
vars: vars.remove_spaces(arena),
},
ann: ann.remove_spaces(arena),
},
Opaque {
header: TypeHeader { name, vars },
typ,
} => Opaque {
header: TypeHeader {
name: name.remove_spaces(arena),
vars: vars.remove_spaces(arena),
},
typ: typ.remove_spaces(arena),
},
Ability {
header: TypeHeader { name, vars },
loc_has,
members,
} => Ability {
header: TypeHeader {
name: name.remove_spaces(arena),
vars: vars.remove_spaces(arena),
},
loc_has: loc_has.remove_spaces(arena),
members: members.remove_spaces(arena),
},
}
}
}
impl<'a> RemoveSpaces<'a> for ValueDef<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
use ValueDef::*;
match *self {
Annotation(a, b) => Annotation(a.remove_spaces(arena), b.remove_spaces(arena)),
Body(a, b) => Body(
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
),
AnnotatedBody {
ann_pattern,
ann_type,
comment: _,
body_pattern,
body_expr,
} => AnnotatedBody {
ann_pattern: arena.alloc(ann_pattern.remove_spaces(arena)),
ann_type: arena.alloc(ann_type.remove_spaces(arena)),
comment: None,
body_pattern: arena.alloc(body_pattern.remove_spaces(arena)),
body_expr: arena.alloc(body_expr.remove_spaces(arena)),
},
Expect(a) => Expect(arena.alloc(a.remove_spaces(arena))),
}
}
}
impl<'a> RemoveSpaces<'a> for Def<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
Def::Type(def) => Def::Type(def.remove_spaces(arena)),
Def::Value(def) => Def::Value(def.remove_spaces(arena)),
Def::NotYetImplemented(a) => Def::NotYetImplemented(a),
Def::SpaceBefore(a, _) | Def::SpaceAfter(a, _) => a.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for Has<'a> {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
Has::Has
}
}
impl<'a> RemoveSpaces<'a> for AbilityMember<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
AbilityMember {
name: self.name.remove_spaces(arena),
typ: self.typ.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for WhenBranch<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
WhenBranch {
patterns: self.patterns.remove_spaces(arena),
value: self.value.remove_spaces(arena),
guard: self.guard.remove_spaces(arena),
}
}
}
impl<'a, T: RemoveSpaces<'a> + Copy + std::fmt::Debug> RemoveSpaces<'a> for AssignedField<'a, T> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
AssignedField::RequiredValue(a, _, c) => AssignedField::RequiredValue(
a.remove_spaces(arena),
arena.alloc([]),
arena.alloc(c.remove_spaces(arena)),
),
AssignedField::OptionalValue(a, _, c) => AssignedField::OptionalValue(
a.remove_spaces(arena),
arena.alloc([]),
arena.alloc(c.remove_spaces(arena)),
),
AssignedField::LabelOnly(a) => AssignedField::LabelOnly(a.remove_spaces(arena)),
AssignedField::Malformed(a) => AssignedField::Malformed(a),
AssignedField::SpaceBefore(a, _) => a.remove_spaces(arena),
AssignedField::SpaceAfter(a, _) => a.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for StrLiteral<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
StrLiteral::PlainLine(t) => StrLiteral::PlainLine(t),
StrLiteral::Line(t) => StrLiteral::Line(t.remove_spaces(arena)),
StrLiteral::Block(t) => StrLiteral::Block(t.remove_spaces(arena)),
}
}
}
impl<'a> RemoveSpaces<'a> for StrSegment<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
StrSegment::Plaintext(t) => StrSegment::Plaintext(t),
StrSegment::Unicode(t) => StrSegment::Unicode(t.remove_spaces(arena)),
StrSegment::EscapedChar(c) => StrSegment::EscapedChar(c),
StrSegment::Interpolated(t) => StrSegment::Interpolated(t.remove_spaces(arena)),
}
}
}
impl<'a> RemoveSpaces<'a> for Expr<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
Expr::Float(a) => Expr::Float(a),
Expr::Num(a) => Expr::Num(a),
Expr::NonBase10Int {
string,
base,
is_negative,
} => Expr::NonBase10Int {
string,
base,
is_negative,
},
Expr::Str(a) => Expr::Str(a.remove_spaces(arena)),
Expr::Access(a, b) => Expr::Access(arena.alloc(a.remove_spaces(arena)), b),
Expr::AccessorFunction(a) => Expr::AccessorFunction(a),
Expr::List(a) => Expr::List(a.remove_spaces(arena)),
Expr::RecordUpdate { update, fields } => Expr::RecordUpdate {
update: arena.alloc(update.remove_spaces(arena)),
fields: fields.remove_spaces(arena),
},
Expr::Record(a) => Expr::Record(a.remove_spaces(arena)),
Expr::Var { module_name, ident } => Expr::Var { module_name, ident },
Expr::Underscore(a) => Expr::Underscore(a),
Expr::Tag(a) => Expr::Tag(a),
Expr::OpaqueRef(a) => Expr::OpaqueRef(a),
Expr::Closure(a, b) => Expr::Closure(
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
),
Expr::Defs(a, b) => {
Expr::Defs(a.remove_spaces(arena), arena.alloc(b.remove_spaces(arena)))
}
Expr::Backpassing(a, b, c) => Expr::Backpassing(
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
arena.alloc(c.remove_spaces(arena)),
),
Expr::Expect(a, b) => Expr::Expect(
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
),
Expr::Apply(a, b, c) => Expr::Apply(
arena.alloc(a.remove_spaces(arena)),
b.remove_spaces(arena),
c,
),
Expr::BinOps(a, b) => {
Expr::BinOps(a.remove_spaces(arena), arena.alloc(b.remove_spaces(arena)))
}
Expr::UnaryOp(a, b) => {
Expr::UnaryOp(arena.alloc(a.remove_spaces(arena)), b.remove_spaces(arena))
}
Expr::If(a, b) => Expr::If(a.remove_spaces(arena), arena.alloc(b.remove_spaces(arena))),
Expr::When(a, b) => {
Expr::When(arena.alloc(a.remove_spaces(arena)), b.remove_spaces(arena))
}
Expr::ParensAround(a) => {
// The formatter can remove redundant parentheses, so also remove these when normalizing for comparison.
a.remove_spaces(arena)
}
Expr::MalformedIdent(a, b) => Expr::MalformedIdent(a, b),
Expr::MalformedClosure => Expr::MalformedClosure,
Expr::PrecedenceConflict(a) => Expr::PrecedenceConflict(a),
Expr::SpaceBefore(a, _) => a.remove_spaces(arena),
Expr::SpaceAfter(a, _) => a.remove_spaces(arena),
Expr::SingleQuote(a) => Expr::Num(a),
}
}
}
impl<'a> RemoveSpaces<'a> for Pattern<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
Pattern::Identifier(a) => Pattern::Identifier(a),
Pattern::Tag(a) => Pattern::Tag(a),
Pattern::OpaqueRef(a) => Pattern::OpaqueRef(a),
Pattern::Apply(a, b) => Pattern::Apply(
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
),
Pattern::RecordDestructure(a) => Pattern::RecordDestructure(a.remove_spaces(arena)),
Pattern::RequiredField(a, b) => {
Pattern::RequiredField(a, arena.alloc(b.remove_spaces(arena)))
}
Pattern::OptionalField(a, b) => {
Pattern::OptionalField(a, arena.alloc(b.remove_spaces(arena)))
}
Pattern::NumLiteral(a) => Pattern::NumLiteral(a),
Pattern::NonBase10Literal {
string,
base,
is_negative,
} => Pattern::NonBase10Literal {
string,
base,
is_negative,
},
Pattern::FloatLiteral(a) => Pattern::FloatLiteral(a),
Pattern::StrLiteral(a) => Pattern::StrLiteral(a),
Pattern::Underscore(a) => Pattern::Underscore(a),
Pattern::Malformed(a) => Pattern::Malformed(a),
Pattern::MalformedIdent(a, b) => Pattern::MalformedIdent(a, b),
Pattern::QualifiedIdentifier { module_name, ident } => {
Pattern::QualifiedIdentifier { module_name, ident }
}
Pattern::SpaceBefore(a, _) => a.remove_spaces(arena),
Pattern::SpaceAfter(a, _) => a.remove_spaces(arena),
Pattern::SingleQuote(a) => Pattern::NumLiteral(a),
}
}
}
impl<'a> RemoveSpaces<'a> for TypeAnnotation<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
TypeAnnotation::Function(a, b) => TypeAnnotation::Function(
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
),
TypeAnnotation::Apply(a, b, c) => TypeAnnotation::Apply(a, b, c.remove_spaces(arena)),
TypeAnnotation::BoundVariable(a) => TypeAnnotation::BoundVariable(a),
TypeAnnotation::As(a, _, c) => {
TypeAnnotation::As(arena.alloc(a.remove_spaces(arena)), &[], c)
}
TypeAnnotation::Record { fields, ext } => TypeAnnotation::Record {
fields: fields.remove_spaces(arena),
ext: ext.remove_spaces(arena),
},
TypeAnnotation::TagUnion { ext, tags } => TypeAnnotation::TagUnion {
ext: ext.remove_spaces(arena),
tags: tags.remove_spaces(arena),
},
TypeAnnotation::Inferred => TypeAnnotation::Inferred,
TypeAnnotation::Wildcard => TypeAnnotation::Wildcard,
TypeAnnotation::Where(annot, has_clauses) => TypeAnnotation::Where(
arena.alloc(annot.remove_spaces(arena)),
arena.alloc(has_clauses.remove_spaces(arena)),
),
TypeAnnotation::SpaceBefore(a, _) => a.remove_spaces(arena),
TypeAnnotation::SpaceAfter(a, _) => a.remove_spaces(arena),
TypeAnnotation::Malformed(a) => TypeAnnotation::Malformed(a),
}
}
}
impl<'a> RemoveSpaces<'a> for HasClause<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
HasClause {
var: self.var.remove_spaces(arena),
ability: self.ability.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for Tag<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
Tag::Apply { name, args } => Tag::Apply {
name: name.remove_spaces(arena),
args: args.remove_spaces(arena),
},
Tag::Malformed(a) => Tag::Malformed(a),
Tag::SpaceBefore(a, _) => a.remove_spaces(arena),
Tag::SpaceAfter(a, _) => a.remove_spaces(arena),
}
}
}

View File

@ -7,7 +7,7 @@ use clap::Command;
use clap::{Arg, ArgMatches};
use roc_build::link::LinkType;
use roc_error_macros::user_error;
use roc_load::LoadingProblem;
use roc_load::{LoadingProblem, Threading};
use roc_mono::ir::OptLevel;
use std::env;
use std::io;
@ -321,6 +321,7 @@ pub fn build(
surgically_link,
precompiled,
target_valgrind,
Threading::Multi,
);
match res_binary_path {

View File

@ -106,6 +106,9 @@ comptime {
num.exportToIntCheckingMax(FROM, TO, ROC_BUILTINS ++ "." ++ NUM ++ ".int_to_" ++ @typeName(TO) ++ "_checking_max.");
num.exportToIntCheckingMaxAndMin(FROM, TO, ROC_BUILTINS ++ "." ++ NUM ++ ".int_to_" ++ @typeName(TO) ++ "_checking_max_and_min.");
}
num.exportRoundF32(FROM, ROC_BUILTINS ++ "." ++ NUM ++ ".round_f32.");
num.exportRoundF64(FROM, ROC_BUILTINS ++ "." ++ NUM ++ ".round_f64.");
}
inline for (FLOATS) |T| {
@ -114,7 +117,6 @@ comptime {
num.exportAtan(T, ROC_BUILTINS ++ "." ++ NUM ++ ".atan.");
num.exportIsFinite(T, ROC_BUILTINS ++ "." ++ NUM ++ ".is_finite.");
num.exportRound(T, ROC_BUILTINS ++ "." ++ NUM ++ ".round.");
}
}

View File

@ -90,10 +90,19 @@ pub fn exportAtan(comptime T: type, comptime name: []const u8) void {
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportRound(comptime T: type, comptime name: []const u8) void {
pub fn exportRoundF32(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(input: T) callconv(.C) i64 {
return @floatToInt(i64, (@round(input)));
fn func(input: f32) callconv(.C) T {
return @floatToInt(T, (@round(input)));
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportRoundF64(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(input: f64) callconv(.C) T {
return @floatToInt(T, (@round(input)));
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });

View File

@ -1,210 +0,0 @@
interface Dict
exposes
[
Dict,
contains,
difference,
empty,
get,
keys,
insert,
intersection,
len,
remove,
single,
union,
values,
walk
]
imports []
## A [dictionary](https://en.wikipedia.org/wiki/Associative_array) that lets you can associate keys with values.
##
## ### Inserting
##
## The most basic way to use a dictionary is to start with an empty one and then:
## 1. Call [Dict.insert] passing a key and a value, to associate that key with that value in the dictionary.
## 2. Later, call [Dict.get] passing the same key as before, and it will return the value you stored.
##
## Here's an example of a dictionary which uses a city's name as the key, and its population as the associated value.
##
## populationByCity =
## Dict.empty
## |> Dict.insert "London" 8_961_989
## |> Dict.insert "Philadelphia" 1_603_797
## |> Dict.insert "Shanghai" 24_870_895
## |> Dict.insert "Delhi" 16_787_941
## |> Dict.insert "Amsterdam" 872_680
##
## ### Converting to a [List]
##
## We can call [Dict.toList] on `populationByCity` to turn it into a list of key-value pairs:
##
## Dict.toList populationByCity == [
## { k: "London", v: 8961989 },
## { k: "Philadelphia", v: 1603797 },
## { k: "Shanghai", v: 24870895 },
## { k: "Delhi", v: 16787941 },
## { k: "Amsterdam", v: 872680 },
## ]
##
## We can use the similar [Dict.keyList] and [Dict.values] functions to get only the keys or only the values,
## instead of getting these `{ k, v }` records that contain both.
##
## You may notice that these lists have the same order as the original insertion order. This will be true if
## all you ever do is [insert] and [get] operations on the dictionary, but [remove] operations can change this order.
## Let's see how that looks.
##
## ### Removing
##
## We can remove an element from the dictionary, like so:
##
## populationByCity
## |> Dict.remove "Philadelphia"
## |> Dict.toList
## ==
## [
## { k: "London", v: 8961989 },
## { k: "Amsterdam", v: 872680 },
## { k: "Shanghai", v: 24870895 },
## { k: "Delhi", v: 16787941 },
## ]
##
## Notice that the order changed! Philadelphia has been not only removed from the list, but Amsterdam - the last
## entry we inserted - has been moved into the spot where Philadelphia was previously. This is exactly what
## [Dict.remove] does: it removes an element and moves the most recent insertion into the vacated spot.
##
## This move is done as a performance optimization, and it lets [remove] have
## [constant time complexity](https://en.wikipedia.org/wiki/Time_complexity#Constant_time). If you need a removal
## operation which preserves ordering, [Dict.removeShift] will remove the element and then shift everything after it
## over one spot. Be aware that this shifting requires copying every single entry after the removed element, though,
## so it can be massively more costly than [remove]! This makes [remove] the recommended default choice;
## [removeShift] should only be used if maintaining original insertion order is absolutely necessary.
##
##
## ### Removing
##
## ### Equality
##
## When comparing two dictionaries for equality, they are `==` only if their both their contents and their
## orderings match. This preserves the property that if `dict1 == dict2`, you should be able to rely on
## `fn dict1 == fn dict2` also being `True`, even if `fn` relies on the dictionary's ordering (for example, if
## `fn` is `Dict.toList` or calls it internally.)
##
## The [Dict.hasSameContents] function gives an alternative to `==` which ignores ordering
## and returns `True` if both dictionaries have the same keys and associated values.
Dict k v := [ Dict k v ] # TODO k should require a hashing and equating constraint
## An empty dictionary.
empty : Dict * *
size : Dict * * -> Nat
isEmpty : Dict * * -> Bool
## Returns a [List] of the dictionary's key/value pairs.
##
## See [walk] to walk over the key/value pairs without creating an intermediate data structure.
toList : Dict k v -> List { k, v }
## Returns a [List] of the dictionary's keys.
##
## See [keySet] to get a [Set] of keys instead, or [walkKeys] to walk over the keys without creating
## an intermediate data structure.
keyList : Dict key * -> List key
## Returns a [Set] of the dictionary's keys.
##
## See [keyList] to get a [List] of keys instead, or [walkKeys] to walk over the keys without creating
## an intermediate data structure.
keySet : Dict key * -> Set key
## Returns a [List] of the dictionary's values.
##
## See [walkValues] to walk over the values without creating an intermediate data structure.
values : Dict * value -> List value
walk : Dict k v, state, (state, k, v -> state) -> state
walkKeys : Dict key *, state, (state, key -> state) -> state
walkValues : Dict * value, state, (state, value -> state) -> state
## Convert each key and value in the #Dict to something new, by calling a conversion
## function on each of them. Then return a new #Map of the converted keys and values.
##
## >>> Dict.map {{ 3.14 => "pi", 1.0 => "one" }} \{ key, value } -> { key:
##
## >>> Dict.map {[ "", "a", "bc" ]} Str.isEmpty
##
## `map` functions like this are common in Roc, and they all work similarly.
## See for example [List.map], [Result.map], and `Set.map`.
map :
Dict beforeKey beforeVal,
({ k: beforeKey, v: beforeVal } -> { k: afterKey, v: afterVal })
-> Dict afterKey afterVal
# DESIGN NOTES: The reason for panicking when given NaN is that:
# * If we allowed NaN in, Dict.insert would no longer be idempotent.
# * If we allowed NaN but overrode its semantics to make it feel like "NaN == NaN" we'd need isNaN checks in all hashing operations as well as all equality checks (during collision detection), not just insert. This would be much worse for performance than panicking on insert, which only requires one extra conditional on insert.
# * It's obviously invalid; the whole point of NaN is that an error occurred. Giving a runtime error notifies you when this problem happens. Giving it only on insert is the best for performance, because it means you aren't paying for isNaN checks on lookups as well.
# TODO: removed `'` from signature because parser does not support it yet
# Original signature: insert : Dict 'key val, 'key, val -> Dict 'key val
## Make sure never to insert a key of *NaN* into a [Dict]! Because *NaN* is
## defined to be unequal to *NaN*, inserting a *NaN* key results in an entry
## that can never be retrieved or removed from the [Dict].
insert : Dict key val, key, val -> Dict key val
## Removes a key from the dictionary in [constant time](https://en.wikipedia.org/wiki/Time_complexity#Constant_time), without preserving insertion order.
##
## Since the internal [List] which determines the order of operations like [toList] and [walk] cannot have gaps in it,
## whenever an element is removed from the middle of that list, something must be done to eliminate the resulting gap.
##
## * [removeShift] eliminates the gap by shifting over every element after the removed one. This takes [linear time](https://en.wikipedia.org/wiki/Time_complexity#Linear_time),
## and preserves the original ordering.
## * [remove] eliminates the gap by replacing the removed element with the one at the end of the list - that is, the most recent insertion. This takes [constant time](https://en.wikipedia.org/wiki/Time_complexity#Constant_time), but does not preserve the original ordering.
##
## For example, suppose we have a `populationByCity` with these contents:
##
## Dict.toList populationByCity == [
## { k: "London", v: 8961989 },
## { k: "Philadelphia", v: 1603797 },
## { k: "Shanghai", v: 24870895 },
## { k: "Delhi", v: 16787941 },
## { k: "Amsterdam", v: 872680 },
## ]
##
## Using `Dict.remove "Philadelphia"` on this will replace the `"Philadelphia"` entry with the most recent insertion,
## which is `"Amsterdam"` in this case.
##
## populationByCity
## |> Dict.remove "Philadelphia"
## |> Dict.toList
## ==
## [
## { k: "London", v: 8961989 },
## { k: "Amsterdam", v: 872680 },
## { k: "Shanghai", v: 24870895 },
## { k: "Delhi", v: 16787941 },
## ]
##
## Both [remove] and [removeShift] leave the dictionary with the same contents; they only differ in ordering and in
## performance. Since ordering only affects operations like [toList] and [walk], [remove] is the better default
## choice because it has much better performance characteristics; [removeShift] should only be used when it's
## absolutely necessary for operations like [toList] and [walk] to preserve the exact original insertion order.
remove : Dict k v, k -> Dict k v
## Removes a key from the dictionary in [linear time](https://en.wikipedia.org/wiki/Time_complexity#Linear_time), while preserving insertion order.
##
## It's better to use [remove] than this by default, since [remove] has [constant time complexity](https://en.wikipedia.org/wiki/Time_complexity#Constant_time),
## which commonly leads [removeShift] to take many times as long to run as [remove] does. However, [remove] does not
## preserve insertion order, so the slower [removeShift] exists only for use cases where it's abolutely necessary for
## ordering-sensitive functions like [toList] and [walk] to preserve the exact original insertion order.
##
## See the [remove] documentation for more details about the differences between [remove] and [removeShift].
removeShift : Dict k v, k -> Dict k v
## Returns whether both dictionaries have the same keys, and the same values associated with those keys.
## This is different from `==` in that it disregards the ordering of the keys and values.
hasSameContents : Dict k v, Dict k v -> Bool

View File

@ -1,705 +0,0 @@
interface List
exposes
[
List,
append,
concat,
contains,
drop,
dropAt,
dropLast,
first,
get,
isEmpty,
join,
keepErrs,
keepIf,
keepOks,
last,
len,
map,
map2,
map3,
map4,
mapJoin,
mapOrDrop,
mapWithIndex,
prepend,
product,
range,
repeat,
reverse,
set,
single,
sortWith,
split,
sublist,
sum,
swap,
walk,
walkBackwards,
walkUntil
]
imports []
## Types
## A sequential list of values.
##
## >>> [ 1, 2, 3 ] # a list of numbers
## >>> [ "a", "b", "c" ] # a list of strings
## >>> [ [ 1.1 ], [], [ 2.2, 3.3 ] ] # a list of lists of numbers
##
## The list `[ 1, "a" ]` gives an error, because each element in a list must have
## the same type. If you want to put a mix of [I64] and [Str] values into a list, try this:
##
## ```
## mixedList : List [ IntElem I64, StrElem Str ]*
## mixedList = [ IntElem 1, IntElem 2, StrElem "a", StrElem "b" ]
## ```
##
## The maximum size of a [List] is limited by the amount of heap memory available
## to the current process. If there is not enough memory available, attempting to
## create the list could crash. (On Linux, where [overcommit](https://www.etalabs.net/overcommit.html)
## is normally enabled, not having enough memory could result in the list appearing
## to be created just fine, but then crashing later.)
##
## > The theoretical maximum length for a list created in Roc is half of
## > `Num.maxNat`. Attempting to create a list bigger than that
## > in Roc code will always fail, although in practice it is likely to fail
## > at much smaller lengths due to insufficient memory being available.
##
## ## Performance Details
##
## Under the hood, a list is a record containing a `len : Nat` field as well
## as a pointer to a reference count and a flat array of bytes. Unique lists
## store a capacity #Nat instead of a reference count.
##
## ## Shared Lists
##
## Shared lists are [reference counted](https://en.wikipedia.org/wiki/Reference_counting).
##
## Each time a given list gets referenced, its reference count ("refcount" for short)
## gets incremented. Each time a list goes out of scope, its refcount count gets
## decremented. Once a refcount, has been decremented more times than it has been
## incremented, we know nothing is referencing it anymore, and the list's memory
## will be immediately freed.
##
## Let's look at an example.
##
## ratings = [ 5, 4, 3 ]
##
## { foo: ratings, bar: ratings }
##
## The first line binds the name `ratings` to the list `[ 5, 4, 3 ]`. The list
## begins with a refcount of 1, because so far only `ratings` is referencing it.
##
## The second line alters this refcount. `{ foo: ratings` references
## the `ratings` list, which will result in its refcount getting incremented
## from 0 to 1. Similarly, `bar: ratings }` also references the `ratings` list,
## which will result in its refcount getting incremented from 1 to 2.
##
## Let's turn this example into a function.
##
## getRatings = \first ->
## ratings = [ first, 4, 3 ]
##
## { foo: ratings, bar: ratings }
##
## getRatings 5
##
## At the end of the `getRatings` function, when the record gets returned,
## the original `ratings =` binding has gone out of scope and is no longer
## accessible. (Trying to reference `ratings` outside the scope of the
## `getRatings` function would be an error!)
##
## Since `ratings` represented a way to reference the list, and that way is no
## longer accessible, the list's refcount gets decremented when `ratings` goes
## out of scope. It will decrease from 2 back down to 1.
##
## Putting these together, when we call `getRatings 5`, what we get back is
## a record with two fields, `foo`, and `bar`, each of which refers to the same
## list, and that list has a refcount of 1.
##
## Let's change the last line to be `(getRatings 5).bar` instead of `getRatings 5`:
##
## getRatings = \first ->
## ratings = [ first, 4, 3 ]
##
## { foo: ratings, bar: ratings }
##
## (getRatings 5).bar
##
## Now, when this expression returns, only the `bar` field of the record will
## be returned. This will mean that the `foo` field becomes inaccessible, causing
## the list's refcount to get decremented from 2 to 1. At this point, the list is back
## where it started: there is only 1 reference to it.
##
## Finally let's suppose the final line were changed to this:
##
## List.first (getRatings 5).bar
##
## This call to [List.first] means that even the list in the `bar` field has become
## inaccessible. As such, this line will cause the list's refcount to get
## decremented all the way to 0. At that point, nothing is referencing the list
## anymore, and its memory will get freed.
##
## Things are different if this is a list of lists instead of a list of numbers.
## Let's look at a simpler example using [List.first] - first with a list of numbers,
## and then with a list of lists, to see how they differ.
##
## Here's the example using a list of numbers.
##
## nums = [ 1, 2, 3, 4, 5, 6, 7 ]
##
## first = List.first nums
## last = List.last nums
##
## first
##
## It makes a list, calls [List.first] and [List.last] on it, and then returns `first`.
##
## Here's the equivalent code with a list of lists:
##
## lists = [ [ 1 ], [ 2, 3 ], [], [ 4, 5, 6, 7 ] ]
##
## first = List.first lists
## last = List.last lists
##
## first
##
## TODO explain how in the former example, when we go to free `nums` at the end,
## we can free it immediately because there are no other refcounts. However,
## in the case of `lists`, we have to iterate through the list and decrement
## the refcounts of each of its contained lists - because they, too, have
## refcounts! Importantly, because the first element had its refcount incremented
## because the function returned `first`, that element will actually end up
## *not* getting freed at the end - but all the others will be.
##
## In the `lists` example, `lists = [ ... ]` also creates a list with an initial
## refcount of 1. Separately, it also creates several other lists - each with
## their own refcounts - to go inside that list. (The empty list at the end
## does not use heap memory, and thus has no refcount.)
##
## At the end, we once again call [List.first] on the list, but this time
##
## * Copying small lists (64 elements or fewer) is typically slightly faster than copying small persistent data structures. This is because, at small sizes, persistent data structures tend to be thin wrappers around flat arrays anyway. They don't have any copying advantage until crossing a certain minimum size threshold.
## * Even when copying is faster, other list operations may still be slightly slower with persistent data structures. For example, even if it were a persistent data structure, [List.map], [List.walk], and [List.keepIf] would all need to traverse every element in the list and build up the result from scratch. These operations are all
## * Roc's compiler optimizes many list operations into in-place mutations behind the scenes, depending on how the list is being used. For example, [List.map], [List.keepIf], and [List.set] can all be optimized to perform in-place mutations.
## * If possible, it is usually best for performance to use large lists in a way where the optimizer can turn them into in-place mutations. If this is not possible, a persistent data structure might be faster - but this is a rare enough scenario that it would not be good for the average Roc program's performance if this were the way [List] worked by default. Instead, you can look outside Roc's standard modules for an implementation of a persistent data structure - likely built using [List] under the hood!
List elem := [ List elem ]
## Initialize
## A list with a single element in it.
##
## This is useful in pipelines, like so:
##
## websites =
## Str.concat domain ".com"
## |> List.single
##
single : elem -> List elem
## An empty list.
empty : List *
## Returns a list with the given length, where every element is the given value.
##
##
repeat : elem, Nat -> List elem
## Returns a list of all the integers between one and another,
## including both of the given numbers.
##
## >>> List.range 2 8
range : Int a, Int a -> List (Int a)
## Transform
## Returns the list with its elements reversed.
##
## >>> List.reverse [ 1, 2, 3 ]
reverse : List elem -> List elem
## Sorts a list using a function which specifies how two elements are ordered.
##
## When sorting by numeric values, it's more efficient to use [sortAsc] or
## [sortDesc] instead.
sort : List elem, (elem, elem -> [ Lt, Eq, Gt ]) -> List elem
## Sorts a list in ascending order (lowest to highest), using a function which
## specifies a way to represent each element as a number.
##
## This is more efficient than [sort] because it skips
## calculating the `[ Lt, Eq, Gt ]` value and uses the number directly instead.
##
## To sort in descending order (highest to lowest), use [List.sortDesc] instead.
sortAsc : List elem, (elem -> Num *) -> List elem
## Sorts a list in descending order (highest to lowest), using a function which
## specifies a way to represent each element as a number.
##
## This is more efficient than [sort] because it skips
## calculating the `[ Lt, Eq, Gt ]` value and uses the number directly instead.
##
## To sort in ascending order (lowest to highest), use [List.sortAsc] instead.
sortDesc : List elem, (elem -> Num *) -> List elem
## Convert each element in the list to something new, by calling a conversion
## function on each of them. Then return a new list of the converted values.
##
## > List.map [ 1, 2, 3 ] (\num -> num + 1)
##
## > List.map [ "", "a", "bc" ] Str.isEmpty
##
## `map` functions like this are common in Roc, and they all work similarly.
## See for example `Set.map`, `Dict.map`, and [Result.map].
map : List before, (before -> after) -> List after
## Run a transformation function on the first element of each list,
## and use that as the first element in the returned list.
## Repeat until a list runs out of elements.
##
## Some languages have a function named `zip`, which does something similar to
## calling [List.map2] passing two lists and `Pair`:
##
## >>> zipped = List.map2 [ "a", "b", "c" ] [ 1, 2, 3 ] Pair
map2 : List a, List b, (a, b -> c) -> List c
## Run a transformation function on the first element of each list,
## and use that as the first element in the returned list.
## Repeat until a list runs out of elements.
map3 : List a, List b, List c, (a, b, c -> d) -> List d
## Run a transformation function on the first element of each list,
## and use that as the first element in the returned list.
## Repeat until a list runs out of elements.
map4 : List a, List b, List c, List d, (a, b, c, d -> e) -> List e
## This works like [List.map], except it also passes the index
## of the element to the conversion function.
mapWithIndex : List before, (before, Nat -> after) -> List after
## This works like [List.map], except at any time you can return `Err` to
## cancel the entire operation immediately, and return that #Err.
mapOrCancel : List before, (before -> Result after err) -> Result (List after) err
## Like [List.map], except the transformation function specifies whether to
## `Keep` or `Drop` each element from the final [List].
##
## You may know a similar function named `filterMap` in other languages.
mapOrDrop : List before, (before -> [ Keep after, Drop ]) -> List after
## Like [List.map], except the transformation function wraps the return value
## in a list. At the end, all the lists get joined together into one list.
##
## You may know a similar function named `concatMap` in other languages.
mapJoin : List before, (before -> List after) -> List after
## This works like [List.map], except only the transformed values that are
## wrapped in `Ok` are kept. Any that are wrapped in `Err` are dropped.
##
## >>> List.mapOks [ [ "a", "b" ], [], [], [ "c", "d", "e" ] ] List.last
##
## >>> fn = \str -> if Str.isEmpty str then Err StrWasEmpty else Ok (Str.len str)
## >>>
## >>> List.mapOks [ "", "a", "bc", "", "d", "ef", "" ]
mapOks : List before, (before -> Result after *) -> List after
## Returns a list with the element at the given index having been transformed by
## the given function.
##
## For a version of this which gives you more control over when to perform
## the transformation, see `List.updater`
##
## ## Performance notes
##
## In particular when updating nested collections, this is potentially much more
## efficient than using [List.get] to obtain the element, transforming it,
## and then putting it back in the same place.
update : List elem, Nat, (elem -> elem) -> List elem
## A more flexible version of `List.update`, which returns an "updater" function
## that lets you delay performing the update until later.
updater : List elem, Nat -> { elem, new : (elem -> List elem) }
## If all the elements in the list are #Ok, return a new list containing the
## contents of those #Ok tags. If any elements are #Err, return #Err.
allOks : List (Result ok err) -> Result (List ok) err
## Add a single element to the end of a list.
##
## >>> List.append [ 1, 2, 3 ] 4
##
## >>> [ 0, 1, 2 ]
## >>> |> List.append 3
append : List elem, elem -> List elem
## Add a single element to the beginning of a list.
##
## >>> List.prepend [ 1, 2, 3 ] 0
##
## >>> [ 2, 3, 4 ]
## >>> |> List.prepend 1
prepend : List elem, elem -> List elem
## Put two lists together.
##
## >>> List.concat [ 1, 2, 3 ] [ 4, 5 ]
##
## >>> [ 0, 1, 2 ]
## >>> |> List.concat [ 3, 4 ]
concat : List elem, List elem -> List elem
## Join the given lists together into one list.
##
## >>> List.join [ [ 1, 2, 3 ], [ 4, 5 ], [], [ 6, 7 ] ]
##
## >>> List.join [ [], [] ]
##
## >>> List.join []
join : List (List elem) -> List elem
## Like [List.join], but only keeps elements tagged with `Ok`. Elements
## tagged with `Err` are dropped.
##
## This can be useful after using an operation that returns a #Result
## on each element of a list, for example [List.first]:
##
## >>> [ [ 1, 2, 3 ], [], [], [ 4, 5 ] ]
## >>> |> List.map List.first
## >>> |> List.joinOks
##
## Eventually, `oks` type signature will be `List [Ok elem]* -> List elem`.
## The implementation for that is a lot tricker then `List (Result elem *)`
## so we're sticking with `Result` for now.
oks : List (Result elem *) -> List elem
## Filter
## Run the given function on each element of a list, and return all the
## elements for which the function returned `True`.
##
## >>> List.keepIf [ 1, 2, 3, 4 ] (\num -> num > 2)
##
## ## Performance Details
##
## [List.keepIf] always returns a list that takes up exactly the same amount
## of memory as the original, even if its length decreases. This is because it
## can't know in advance exactly how much space it will need, and if it guesses a
## length that's too low, it would have to re-allocate.
##
## (If you want to do an operation like this which reduces the memory footprint
## of the resulting list, you can do two passes over the lis with [List.walk] - one
## to calculate the precise new size, and another to populate the new list.)
##
## If given a unique list, [List.keepIf] will mutate it in place to assemble the appropriate list.
## If that happens, this function will not allocate any new memory on the heap.
## If all elements in the list end up being kept, Roc will return the original
## list unaltered.
##
keepIf : List elem, (elem -> Bool) -> List elem
## Run the given function on each element of a list, and return all the
## elements for which the function returned `False`.
##
## >>> List.dropIf [ 1, 2, 3, 4 ] (\num -> num > 2)
##
## ## Performance Details
##
## `List.dropIf` has the same performance characteristics as [List.keepIf].
## See its documentation for details on those characteristics!
dropIf : List elem, (elem -> Bool) -> List elem
## Access
## Returns the first element in the list, or `ListWasEmpty` if it was empty.
first : List elem -> Result elem [ ListWasEmpty ]*
## Returns the last element in the list, or `ListWasEmpty` if it was empty.
last : List elem -> Result elem [ ListWasEmpty ]*
get : List elem, Nat -> Result elem [ OutOfBounds ]*
max : List (Num a) -> Result (Num a) [ ListWasEmpty ]*
min : List (Num a) -> Result (Num a) [ ListWasEmpty ]*
## Modify
## Replaces the element at the given index with a replacement.
##
## >>> List.set [ "a", "b", "c" ] 1 "B"
##
## If the given index is outside the bounds of the list, returns the original
## list unmodified.
##
## To drop the element at a given index, instead of replacing it, see [List.dropAt].
set : List elem, Nat, elem -> List elem
## Drops n elements from the beginning of the list.
drop : List elem, Nat -> List elem
## Drops the element at the given index from the list.
##
## This has no effect if the given index is outside the bounds of the list.
##
## To replace the element at a given index, instead of dropping it, see [List.set].
dropAt : List elem, Nat -> List elem
## Adds a new element to the end of the list.
##
## >>> List.append [ "a", "b" ] "c"
##
## ## Performance Details
##
## When given a Unique list, this adds the new element in-place if possible.
## This is only possible if the list has enough capacity. Otherwise, it will
## have to *clone and grow*. See the section on [capacity](#capacity) in this
## module's documentation.
append : List elem, elem -> List elem
## Adds a new element to the beginning of the list.
##
## >>> List.prepend [ "b", "c" ] "a"
##
## ## Performance Details
##
## This always clones the entire list, even when given a Unique list. That means
## it runs about as fast as `List.addLast` when both are given a Shared list.
##
## If you have a Unique list instead, [List.append] will run much faster than
## [List.append] except in the specific case where the list has no excess capacity,
## and needs to *clone and grow*. In that uncommon case, both [List.append] and
## [List.append] will run at about the same speed—since [List.append] always
## has to clone and grow.
##
## | Unique list | Shared list |
##---------+--------------------------------+----------------+
## append | in-place given enough capacity | clone and grow |
## prepend | clone and grow | clone and grow |
prepend : List elem, elem -> List elem
## Remove the last element from the list.
##
## Returns both the removed element as well as the new list (with the removed
## element missing), or `Err ListWasEmpty` if the list was empty.
##
## Here's one way you can use this:
##
## when List.pop list is
## Ok { others, last } -> ...
## Err ListWasEmpty -> ...
##
## ## Performance Details
##
## Calling `List.pop` on a Unique list runs extremely fast. It's essentially
## the same as a [List.last] except it also returns the [List] it was given,
## with its length decreased by 1.
##
## In contrast, calling `List.pop` on a Shared list creates a new list, then
## copies over every element in the original list except the last one. This
## takes much longer.
dropLast : List elem -> Result { others : List elem, last : elem } [ ListWasEmpty ]*
##
## Here's one way you can use this:
##
## when List.pop list is
## Ok { others, last } -> ...
## Err ListWasEmpty -> ...
##
## ## Performance Details
##
## When calling either `List.dropFirst` or `List.dropLast` on a Unique list, `List.dropLast`
## runs *much* faster. This is because for `List.dropLast`, removing the last element
## in-place is as easy as reducing the length of the list by 1. In contrast,
## removing the first element from the list involves copying every other element
## in the list into the index before it - which is massively more costly.
##
## In the case of a Shared list,
##
## | Unique list | Shared list |
##-----------+----------------------------------+---------------------------------+
## dropFirst | [List.last] + length change | [List.last] + clone rest of list |
## dropLast | [List.last] + clone rest of list | [List.last] + clone rest of list |
dropFirst : List elem -> Result { first: elem, others : List elem } [ ListWasEmpty ]*
## Returns the given number of elements from the beginning of the list.
##
## >>> List.takeFirst 4 [ 1, 2, 3, 4, 5, 6, 7, 8 ]
##
## If there are fewer elements in the list than the requested number,
## returns the entire list.
##
## >>> List.takeFirst 5 [ 1, 2 ]
##
## To *remove* elements from the beginning of the list, use `List.takeLast`.
##
## To remove elements from both the beginning and end of the list,
## use `List.sublist`.
##
## To split the list into two lists, use `List.split`.
##
## ## Performance Details
##
## When given a Unique list, this runs extremely fast. It sets the list's length
## to the given length value, and frees the leftover elements. This runs very
## slightly faster than `List.takeLast`.
##
## In fact, `List.takeFirst 1 list` runs faster than `List.first list` when given
## a Unique list, because [List.first] returns the first element as well -
## which introduces a conditional bounds check as well as a memory load.
takeFirst : List elem, Nat -> List elem
## Returns the given number of elements from the end of the list.
##
## >>> List.takeLast 4 [ 1, 2, 3, 4, 5, 6, 7, 8 ]
##
## If there are fewer elements in the list than the requested number,
## returns the entire list.
##
## >>> List.takeLast 5 [ 1, 2 ]
##
## To *remove* elements from the end of the list, use `List.takeFirst`.
##
## To remove elements from both the beginning and end of the list,
## use `List.sublist`.
##
## To split the list into two lists, use `List.split`.
##
## ## Performance Details
##
## When given a Unique list, this runs extremely fast. It moves the list's
## pointer to the index at the given length value, updates its length,
## and frees the leftover elements. This runs very nearly as fast as
## `List.takeFirst` on a Unique list.
##
## In fact, `List.takeLast 1 list` runs faster than `List.first list` when given
## a Unique list, because [List.first] returns the first element as well -
## which introduces a conditional bounds check as well as a memory load.
takeLast : List elem, Nat -> List elem
## Deconstruct
## Splits the list into two lists, around the given index.
##
## The returned lists are labeled `before` and `others`. The `before` list will
## contain all the elements whose index in the original list was **less than**
## than the given index, # and the `others` list will be all the others. (This
## means if you give an index of 0, the `before` list will be empty and the
## `others` list will have the same elements as the original list.)
split : List elem, Nat -> { before: List elem, others: List elem }
## Returns a subsection of the given list, beginning at the `start` index and
## including a total of `len` elements.
##
## If `start` is outside the bounds of the given list, returns the empty list.
##
## >>> List.sublist { start: 4, len: 0 } [ 1, 2, 3 ]
##
## If more elements are requested than exist in the list, returns as many as it can.
##
## >>> List.sublist { start: 2, len: 10 } [ 1, 2, 3, 4, 5 ]
##
## > If you want a sublist which goes all the way to the end of the list, no
## > matter how long the list is, `List.takeLast` can do that more efficiently.
##
## Some languages have a function called **`slice`** which works similarly to this.
sublist : List elem, { start : Nat, len : Nat } -> List elem
## Build a value using each element in the list.
##
## Starting with a given `state` value, this walks through each element in the
## list from first to last, running a given `step` function on that element
## which updates the `state`. It returns the final `state` at the end.
##
## You can use it in a pipeline:
##
## [ 2, 4, 8 ]
## |> List.walk { start: 0, step: Num.add }
##
## This returns 14 because:
## * `state` starts at 0 (because of `start: 0`)
## * Each `step` runs `Num.add state elem`, and the return value becomes the new `state`.
##
## Here is a table of how `state` changes as [List.walk] walks over the elements
## `[ 2, 4, 8 ]` using #Num.add as its `step` function to determine the next `state`.
##
## `state` | `elem` | `step state elem` (`Num.add state elem`)
## --------+--------+-----------------------------------------
## 0 | |
## 0 | 2 | 2
## 2 | 4 | 6
## 6 | 8 | 14
##
## So `state` goes through these changes:
## 1. `0` (because of `start: 0`)
## 2. `1` (because of `Num.add state elem` with `state` = 0 and `elem` = 1
##
## [ 1, 2, 3 ]
## |> List.walk { start: 0, step: Num.sub }
##
## This returns -6 because
##
## Note that in other languages, `walk` is sometimes called `reduce`,
## `fold`, `foldLeft`, or `foldl`.
walk : List elem, state, (state, elem -> state) -> state
## Note that in other languages, `walkBackwards` is sometimes called `reduceRight`,
## `fold`, `foldRight`, or `foldr`.
walkBackwards : List elem, state, (state, elem -> state) -> state
## Same as [List.walk], except you can stop walking early.
##
## ## Performance Details
##
## Compared to [List.walk], this can potentially visit fewer elements (which can
## improve performance) at the cost of making each step take longer.
## However, the added cost to each step is extremely small, and can easily
## be outweighed if it results in skipping even a small number of elements.
##
## As such, it is typically better for performance to use this over [List.walk]
## if returning `Done` earlier than the last element is expected to be common.
walkUntil : List elem, state, (state, elem -> [ Continue state, Done state ]) -> state
# Same as [List.walk]Backwards, except you can stop walking early.
walkBackwardsUntil : List elem, state, (state, elem -> [ Continue state, Done state ]) -> state
## Check
## Returns the length of the list - the number of elements it contains.
##
## One [List] can store up to 2,147,483,648 elements (just over 2 billion), which
## is exactly equal to the highest valid #I32 value. This means the #U32 this function
## returns can always be safely converted to an #I32 without losing any data.
len : List * -> Nat
isEmpty : List * -> Bool
contains : List elem, elem -> Bool
startsWith : List elem, List elem -> Bool
endsWith : List elem, List elem -> Bool
## Run the given predicate on each element of the list, returning `True` if
## any of the elements satisfy it.
any : List elem, (elem -> Bool) -> Bool
## Run the given predicate on each element of the list, returning `True` if
## all of the elements satisfy it.
all : List elem, (elem -> Bool) -> Bool
## Returns the first element of the list satisfying a predicate function.
## If no satisfying element is found, an `Err NotFound` is returned.
find : List elem, (elem -> Bool) -> Result elem [ NotFound ]*
## Apply a function that returns a Result on a list, only successful
## Results are kept and returned unwrapped.
keepOks : List before, (before -> Result after *) -> List after
## Apply a function that returns a Result on a list, only unsuccessful
## Results are kept and returned unwrapped.
keepErrs : List before, (before -> Result * after) -> List after

View File

@ -1,67 +0,0 @@
interface Result
exposes
[
Result,
after,
isOk,
isErr,
map,
mapErr,
withDefault
]
imports []
## The result of an operation that could fail: either the operation went
## okay, or else there was an error of some sort.
Result ok err : [ Ok ok, Err err ]
## Return True if the result indicates a success, else return False
##
## >>> Result.isOk (Ok 5)
isOk : Result * * -> bool
## Return True if the result indicates a failure, else return False
##
## >>> Result.isErr (Err "uh oh")
isErr : Result * * -> bool
## If the result is `Ok`, return the value it holds. Otherwise, return
## the given default value.
##
## >>> Result.withDefault (Ok 7) 42
##
## >>> Result.withDefault (Err "uh oh") 42
withDefault : Result ok err, ok -> ok
## If the result is `Ok`, transform the entire result by running a conversion
## function on the value the `Ok` holds. Then return that new result.
##
## (If the result is `Err`, this has no effect. Use `afterErr` to transform an `Err`.)
##
## >>> Result.after (Ok -1) \num -> if num < 0 then Err "negative!" else Ok -num
##
## >>> Result.after (Err "yipes!") \num -> if num < 0 then Err "negative!" else Ok -num
after : Result before err, (before -> Result after err) -> Result after err
## If the result is `Ok`, transform the value it holds by running a conversion
## function on it. Then return a new `Ok` holding the transformed value.
##
## (If the result is `Err`, this has no effect. Use [mapErr] to transform an `Err`.)
##
## >>> Result.map (Ok 12) Num.negate
##
## >>> Result.map (Err "yipes!") Num.negate
##
## `map` functions like this are common in Roc, and they all work similarly.
## See for example [List.map], `Set.map`, and `Dict.map`.
map : Result before err, (before -> after) -> Result after err
## If the result is `Err`, transform the value it holds by running a conversion
## function on it. Then return a new `Err` holding the transformed value.
##
## (If the result is `Ok`, this has no effect. Use [map] to transform an `Ok`.)
##
## >>> Result.mapErr (Err "yipes!") Str.isEmpty
##
## >>> Result.mapErr (Ok 12) Str.isEmpty
mapErr : Result ok before, (before -> after) -> Result ok after

View File

@ -1,59 +0,0 @@
interface Set
exposes
[
Set,
contains,
difference,
empty,
fromList,
insert,
intersection,
len,
remove,
single,
toList,
union,
walk
]
imports []
## A Set is an unordered collection of unique elements.
Set elem := [ Set elem ]
## An empty set.
empty : Set *
## Check
isEmpty : Set * -> Bool
len : Set * -> Nat
## Modify
# TODO: removed `'` from signature because parser does not support it yet
# Original signature: `add : Set 'elem, 'elem -> Set 'elem`
## Make sure never to add a *NaN* to a [Set]! Because *NaN* is defined to be
## unequal to *NaN*, adding a *NaN* results in an entry that can never be
## retrieved or removed from the [Set].
add : Set elem, elem -> Set elem
## Drops the given element from the set.
# TODO: removed `'` from signature because parser does not support it yet
# Original signature: `drop : Set 'elem, 'elem -> Set 'elem`
drop : Set elem, elem -> Set elem
## Transform
## Convert each element in the set to something new, by calling a conversion
## function on each of them. Then return a new set of the converted values.
##
## >>> Set.map {: -1, 1, 3 :} Num.negate
##
## >>> Set.map {: "", "a", "bc" :} Str.isEmpty
##
## `map` functions like this are common in Roc, and they all work similarly.
## See for example [List.map], `Dict.map`, and [Result.map].
# TODO: removed `'` from signature because parser does not support it yet
# Original signature: `map : Set 'elem, ('before -> 'after) -> Set 'after`
map : Set elem, (before -> after) -> Set after

View File

@ -1,470 +0,0 @@
interface Str
exposes
[
Str,
append,
concat,
countGraphemes,
endsWith,
fromUtf8,
isEmpty,
joinWith,
split,
startsWith,
startsWithCodePt,
toUtf8,
Utf8Problem,
Utf8ByteProblem
]
imports []
## # Types
##
## Dealing with text is a deep topic, so by design, Roc's `Str` module sticks
## to the basics.
##
## _For more advanced use cases like working with raw [code points](https://unicode.org/glossary/#code_point),
## see the [roc/unicode](roc/unicode) package. For locale-specific text
## functions (including uppercasing strings, as capitalization rules vary by locale;
## in English, `"i"` capitalizes to `"I"`, but [in Turkish](https://en.wikipedia.org/wiki/Dotted_and_dotless_I#In_computing),
## the same `"i"` capitalizes to `"İ"` - as well as sorting strings, which also varies
## by locale; `"ö"` is sorted differently in German and Swedish) see the [roc/locale](roc/locale) package._
##
## ### Unicode
##
## Unicode can represent text values which span multiple languages, symbols, and emoji.
## Here are some valid Roc strings:
##
## "Roc!"
## "鹏"
## "🕊"
##
## Every Unicode string is a sequence of [extended grapheme clusters](http://www.unicode.org/glossary/#extended_grapheme_cluster).
## An extended grapheme cluster represents what a person reading a string might
## call a "character" - like "A" or "ö" or "👩‍👩‍👦‍👦".
## Because the term "character" means different things in different areas of
## programming, and "extended grapheme cluster" is a mouthful, in Roc we use the
## term "grapheme" as a shorthand for the more precise "extended grapheme cluster."
##
## You can get the number of graphemes in a string by calling [Str.countGraphemes] on it:
##
## Str.countGraphemes "Roc!"
## Str.countGraphemes "折り紙"
## Str.countGraphemes "🕊"
##
## > The `countGraphemes` function walks through the entire string to get its answer,
## > so if you want to check whether a string is empty, you'll get much better performance
## > by calling `Str.isEmpty myStr` instead of `Str.countGraphemes myStr == 0`.
##
## ### Escape sequences
##
## If you put a `\` in a Roc string literal, it begins an *escape sequence*.
## An escape sequence is a convenient way to insert certain strings into other strings.
## For example, suppose you write this Roc string:
##
## "I took the one less traveled by,\nAnd that has made all the difference."
##
## The `"\n"` in the middle will insert a line break into this string. There are
## other ways of getting a line break in there, but `"\n"` is the most common.
##
## Another way you could insert a newlines is by writing `\u{0x0A}` instead of `\n`.
## That would result in the same string, because the `\u` escape sequence inserts
## [Unicode code points](https://unicode.org/glossary/#code_point) directly into
## the string. The Unicode code point 10 is a newline, and 10 is `0A` in hexadecimal.
## `0x0A` is a Roc hexadecimal literal, and `\u` escape sequences are always
## followed by a hexadecimal literal inside `{` and `}` like this.
##
## As another example, `"R\u{0x6F}c"` is the same string as `"Roc"`, because
## `"\u{0x6F}"` corresponds to the Unicode code point for lowercase `o`. If you
## want to [spice things up a bit](https://en.wikipedia.org/wiki/Metal_umlaut),
## you can write `"R\u{0xF6}c"` as an alternative way to get the string `"Röc"\.
##
## Roc strings also support these escape sequences:
##
## * `\\` - an actual backslash (writing a single `\` always begins an escape sequence!)
## * `\"` - an actual quotation mark (writing a `"` without a `\` ends the string)
## * `\r` - [carriage return](https://en.wikipedia.org/wiki/Carriage_Return)
## * `\t` - [horizontal tab](https://en.wikipedia.org/wiki/Tab_key#Tab_characters)
## * `\v` - [vertical tab](https://en.wikipedia.org/wiki/Tab_key#Tab_characters)
##
## You can also use escape sequences to insert named strings into other strings, like so:
##
## name = "Lee"
## city = "Roctown"
##
## greeting = "Hello there, \(name)! Welcome to \(city)."
##
## Here, `greeting` will become the string `"Hello there, Lee! Welcome to Roctown."`.
## This is known as [string interpolation](https://en.wikipedia.org/wiki/String_interpolation),
## and you can use it as many times as you like inside a string. The name
## between the parentheses must refer to a `Str` value that is currently in
## scope, and it must be a name - it can't be an arbitrary expression like a function call.
##
## ### Encoding
##
## Roc strings are not coupled to any particular
## [encoding](https://en.wikipedia.org/wiki/Character_encoding). As it happens,
## they are currently encoded in UTF-8, but this module is intentionally designed
## not to rely on that implementation detail so that a future release of Roc can
## potentially change it without breaking existing Roc applications. (UTF-8
## seems pretty great today, but so did UTF-16 at an earlier point in history.)
##
## This module has functions to can convert a [Str] to a [List] of raw [code unit](https://unicode.org/glossary/#code_unit)
## integers (not to be confused with the [code points](https://unicode.org/glossary/#code_point)
## mentioned earlier) in a particular encoding. If you need encoding-specific functions,
## you should take a look at the [roc/unicode](roc/unicode) package.
## It has many more tools than this module does!
## A [Unicode](https://unicode.org) text value.
Str := [ Str ]
## Convert
## Convert a [Float] to a decimal string, rounding off to the given number of decimal places.
##
## If you want to keep all the digits, use [Str.num] instead.
decimal : Float *, Nat -> Str
## Convert a [Num] to a string.
num : Float *, Nat -> Str
## Split a string around a separator.
##
## >>> Str.split "1,2,3" ","
##
## Passing `""` for the separator is not useful; it returns the original string
## wrapped in a list.
##
## >>> Str.split "1,2,3" ""
##
## To split a string into its individual graphemes, use `Str.graphemes`
split : Str, Str -> List Str
## Split a string around newlines.
##
## On strings that use `"\n"` for their line endings, this gives the same answer
## as passing `"\n"` to [Str.split]. However, on strings that use `"\n\r"` (such
## as [in Windows files](https://en.wikipedia.org/wiki/Newline#History)), this
## will consume the entire `"\n\r"` instead of just the `"\n"`.
##
## >>> Str.lines "Hello, World!\nNice to meet you!"
##
## >>> Str.lines "Hello, World!\n\rNice to meet you!"
##
## To split a string using a custom separator, use [Str.split]. For more advanced
## string splitting, use a #Parser.
lines : Str, Str -> List Str
## Check
## Returns `True` if the string is empty, and `False` otherwise.
##
## >>> Str.isEmpty "hi!"
##
## >>> Str.isEmpty ""
isEmpty : Str -> Bool
startsWith : Str, Str -> Bool
## If the string begins with a [Unicode code point](http://www.unicode.org/glossary/#code_point)
## equal to the given [U32], return `True`. Otherwise return `False`.
##
## If the given [Str] is empty, or if the given [U32] is not a valid
## code point, this will return `False`.
##
## **Performance Note:** This runs slightly faster than [Str.startsWith], so
## if you want to check whether a string begins with something that's representable
## in a single code point, you can use (for example) `Str.startsWithCodePt '鹏'`
## instead of `Str.startsWithCodePt "鹏"`. ('鹏' evaluates to the [U32]
## value `40527`.) This will not work for graphemes which take up multiple code
## points, however; `Str.startsWithCodePt '👩‍👩‍👦‍👦'` would be a compiler error
## because 👩‍👩‍👦‍👦 takes up multiple code points and cannot be represented as a
## single [U32]. You'd need to use `Str.startsWithCodePt "🕊"` instead.
startsWithCodePt : Str, U32 -> Bool
endsWith : Str, Str -> Bool
contains : Str, Str -> Bool
anyGraphemes : Str, (Str -> Bool) -> Bool
allGraphemes : Str, (Str -> Bool) -> Bool
## Combine
## Combine a list of strings into a single string.
##
## >>> Str.join [ "a", "bc", "def" ]
join : List Str -> Str
## Combine a list of strings into a single string, with a separator
## string in between each.
##
## >>> Str.joinWith [ "one", "two", "three" ] ", "
joinWith : List Str, Str -> Str
## Add to the start of a string until it has at least the given number of
## graphemes.
##
## >>> Str.padGraphemesStart "0" 5 "36"
##
## >>> Str.padGraphemesStart "0" 1 "36"
##
## >>> Str.padGraphemesStart "0" 5 "12345"
##
## >>> Str.padGraphemesStart "✈️"" 5 "👩‍👩‍👦‍👦👩‍👩‍👦‍👦👩‍👩‍👦‍👦"
padGraphemesStart : Str, Nat, Str -> Str
## Add to the end of a string until it has at least the given number of
## graphemes.
##
## >>> Str.padGraphemesStart "0" 5 "36"
##
## >>> Str.padGraphemesStart "0" 1 "36"
##
## >>> Str.padGraphemesStart "0" 5 "12345"
##
## >>> Str.padGraphemesStart "✈️"" 5 "👩‍👩‍👦‍👦👩‍👩‍👦‍👦👩‍👩‍👦‍👦"
padGraphemesEnd : Str, Nat, Str -> Str
## Graphemes
## Split a string into its individual graphemes.
##
## >>> Str.graphemes "1,2,3"
##
## >>> Str.graphemes "👍👍👍"
##
graphemes : Str -> List Str
## Count the number of [extended grapheme clusters](http://www.unicode.org/glossary/#extended_grapheme_cluster)
## in the string.
##
## Str.countGraphemes "Roc!" # 4
## Str.countGraphemes "七巧板" # 3
## Str.countGraphemes "🕊" # 1
countGraphemes : Str -> Nat
## Reverse the order of the string's individual graphemes.
##
## >>> Str.reverseGraphemes "1-2-3"
##
## >>> Str.reverseGraphemes "🐦✈️"👩‍👩‍👦‍👦"
##
## >>> Str.reversegraphemes "Crème Brûlée"
reverseGraphemes : Str -> Str
## Returns `True` if the two strings are equal when ignoring case.
##
## >>> Str.caseInsensitiveEq "hi" "Hi"
isCaseInsensitiveEq : Str, Str -> Bool
isCaseInsensitiveNeq : Str, Str -> Bool
walkGraphemes : Str, { start: state, step: (state, Str -> state) } -> state
walkGraphemesUntil : Str, { start: state, step: (state, Str -> [ Continue state, Done state ]) } -> state
walkGraphemesBackwards : Str, { start: state, step: (state, Str -> state) } -> state
walkGraphemesBackwardsUntil : Str, { start: state, step: (state, Str -> [ Continue state, Done state ]) } -> state
## Returns `True` if the string begins with an uppercase letter.
##
## >>> Str.isCapitalized "Hi"
##
## >>> Str.isCapitalized " Hi"
##
## >>> Str.isCapitalized "hi"
##
## >>> Str.isCapitalized "Česká"
##
## >>> Str.isCapitalized "Э"
##
## >>> Str.isCapitalized "東京"
##
## >>> Str.isCapitalized "🐦"
##
## >>> Str.isCapitalized ""
##
## Since the rules for how to capitalize a string vary by locale,
## (for example, in English, `"i"` capitalizes to `"I"`, but
## [in Turkish](https://en.wikipedia.org/wiki/Dotted_and_dotless_I#In_computing),
## the same `"i"` capitalizes to `"İ"`) see the [roc/locale](roc/locale) package
## package for functions which capitalize strings.
isCapitalized : Str -> Bool
## Returns `True` if the string consists entirely of uppercase letters.
##
## >>> Str.isAllUppercase "hi"
##
## >>> Str.isAllUppercase "Hi"
##
## >>> Str.isAllUppercase "HI"
##
## >>> Str.isAllUppercase " Hi"
##
## >>> Str.isAllUppercase "Česká"
##
## >>> Str.isAllUppercase "Э"
##
## >>> Str.isAllUppercase "東京"
##
## >>> Str.isAllUppercase "🐦"
##
## >>> Str.isAllUppercase ""
isAllUppercase : Str -> Bool
## Returns `True` if the string consists entirely of lowercase letters.
##
## >>> Str.isAllLowercase "hi"
##
## >>> Str.isAllLowercase "Hi"
##
## >>> Str.isAllLowercase "HI"
##
## >>> Str.isAllLowercase " Hi"
##
## >>> Str.isAllLowercase "Česká"
##
## >>> Str.isAllLowercase "Э"
##
## >>> Str.isAllLowercase "東京"
##
## >>> Str.isAllLowercase "🐦"
##
## >>> Str.isAllLowercase ""
isAllLowercase : Str -> Bool
## Return the string with any blank spaces removed from both the beginning
## as well as the end.
trim : Str -> Str
## If the given [U32] is a valid [Unicode Scalar Value](http://www.unicode.org/glossary/#unicode_scalar_value),
## return a [Str] containing only that scalar.
fromScalar : U32 -> Result Str [ BadScalar ]*
fromCodePts : List U32 -> Result Str [ BadCodePt U32 ]*
fromUtf8 : List U8 -> Result Str [ BadUtf8 ]*
## Create a [Str] from bytes encoded as [UTF-16LE](https://en.wikipedia.org/wiki/UTF-16#Byte-order_encoding_schemes).
# fromUtf16Le : List U8 -> Result Str [ BadUtf16Le Endi ]*
# ## Create a [Str] from bytes encoded as [UTF-16BE](https://en.wikipedia.org/wiki/UTF-16#Byte-order_encoding_schemes).
# fromUtf16Be : List U8 -> Result Str [ BadUtf16Be Endi ]*
# ## Create a [Str] from bytes encoded as UTF-16 with a [Byte Order Mark](https://en.wikipedia.org/wiki/Byte_order_mark).
# fromUtf16Bom : List U8 -> Result Str [ BadUtf16 Endi, NoBom ]*
# ## Create a [Str] from bytes encoded as [UTF-32LE](https://web.archive.org/web/20120322145307/http://mail.apps.ietf.org/ietf/charsets/msg01095.html)
# fromUtf32Le : List U8 -> Result Str [ BadUtf32Le Endi ]*
# ## Create a [Str] from bytes encoded as [UTF-32BE](https://web.archive.org/web/20120322145307/http://mail.apps.ietf.org/ietf/charsets/msg01095.html)
# fromUtf32Be : List U8 -> Result Str [ BadUtf32Be Endi ]*
# ## Create a [Str] from bytes encoded as UTF-32 with a [Byte Order Mark](https://en.wikipedia.org/wiki/Byte_order_mark).
# fromUtf32Bom : List U8 -> Result Str [ BadUtf32 Endi, NoBom ]*
# ## Convert from UTF-8, substituting the replacement character ("<22>") for any
# ## invalid sequences encountered.
# fromUtf8Sub : List U8 -> Str
# fromUtf16Sub : List U8, Endi -> Str
# fromUtf16BomSub : List U8 -> Result Str [ NoBom ]*
## Return a [List] of the string's [U8] UTF-8 [code units](https://unicode.org/glossary/#code_unit).
## (To split the string into a [List] of smaller [Str] values instead of [U8] values,
## see [Str.split] and `Str.graphemes`.)
##
## >>> Str.toUtf8 "👩‍👩‍👦‍👦"
##
## >>> Str.toUtf8 "Roc"
##
## >>> Str.toUtf8 "鹏"
##
## >>> Str.toUtf8 "🐦"
##
## For a more flexible function that walks through each of these [U8] code units
## without creating a [List], see `Str.walkUtf8` and `Str.walkRevUtf8`.
toUtf8 : Str -> List U8
toUtf16Be : Str -> List U8
toUtf16Le : Str -> List U8
# toUtf16Bom : Str, Endi -> List U8
toUtf32Be : Str -> List U8
toUtf32Le : Str -> List U8
# toUtf32Bom : Str, Endi -> List U8
# Parsing
## If the bytes begin with a valid [extended grapheme cluster](http://www.unicode.org/glossary/#extended_grapheme_cluster)
## encoded as [UTF-8](https://en.wikipedia.org/wiki/UTF-8), return it along with the number of bytes it took up.
##
## If the bytes do not begin with a valid grapheme, for example because the list was
## empty or began with an invalid grapheme, return `Err`.
parseUtf8Grapheme : List U8 -> Result { grapheme : Str, bytesParsed: Nat } [ InvalidGrapheme ]*
## If the bytes begin with a valid [Unicode code point](http://www.unicode.org/glossary/#code_point)
## encoded as [UTF-8](https://en.wikipedia.org/wiki/UTF-8), return it along with the number of bytes it took up.
##
## If the string does not begin with a valid code point, for example because the list was
## empty or began with an invalid code point, return an `Err`.
parseUtf8CodePt : List U8 -> Result { codePt : U32, bytesParsed: Nat } [ InvalidCodePt ]*
## If the string represents a valid [U8] number, return that number.
##
## For more advanced options, see [parseU8].
toU8 : Str -> Result U8 [ InvalidU8 ]*
toI8 : Str -> Result I8 [ InvalidI8 ]*
toU16 : Str -> Result U16 [ InvalidU16 ]*
toI16 : Str -> Result I16 [ InvalidI16 ]*
toU32 : Str -> Result U32 [ InvalidU32 ]*
toI32 : Str -> Result I32 [ InvalidI32 ]*
toU64 : Str -> Result U64 [ InvalidU64 ]*
toI64 : Str -> Result I64 [ InvalidI64 ]*
toU128 : Str -> Result U128 [ InvalidU128 ]*
toI128 : Str -> Result I128 [ InvalidI128 ]*
toF64 : Str -> Result U128 [ InvalidF64 ]*
toF32 : Str -> Result I128 [ InvalidF32 ]*
toDec : Str -> Result Dec [ InvalidDec ]*
## If the string represents a valid number, return that number.
##
## The exact number type to look for will be inferred from usage.
## In the example below, the usage of I64 in the type signature will require that type instead of (Num *).
##
## >>> strToI64 : Str -> Result I64 [ InvalidNumStr ]*
## >>> strToI64 = \inputStr ->
## >>> Str.toNum inputStr
##
## If the string is exactly `"NaN"`, `"∞"`, or `"-∞"`, they will be accepted
## only when converting to [F64] or [F32] numbers, and will be translated accordingly.
##
## This never accepts numbers with underscores or commas in them. For more
## advanced options, see [parseNum].
toNum : Str -> Result (Num *) [ InvalidNumStr ]*
## If the string begins with an [Int] or a [finite](Num.isFinite) [Frac], return
## that number along with the rest of the string after it.
##
## The exact number type to look for will be inferred from usage.
## In the example below, the usage of Float64 in the type signature will require that type instead of (Num *).
##
## >>> parseFloat64 : Str -> Result { val: Float64, rest: Str } [ InvalidNumStr ]*
## >>> Str.parseNum input {}
##
## If the string begins with `"NaN"`, `"∞"`, and `"-∞"` (which do not represent
## [finite](Num.isFinite) numbers), they will be accepted only when parsing
## [F64] or [F32] numbers, and translated accordingly.
# parseNum : Str, NumParseConfig -> Result { val : Num *, rest : Str } [ InvalidNumStr ]*
## Notes:
## * You can allow a decimal mark for integers; they'll only parse if the numbers after it are all 0.
## * For `wholeSep`, `Required` has a payload for how many digits (e.g. "required every 3 digits")
## * For `wholeSep`, `Allowed` allows the separator to appear anywhere.
# NumParseConfig :
# {
# base ? [ Decimal, Hexadecimal, Octal, Binary ],
# notation ? [ Standard, Scientific, Any ],
# decimalMark ? [ Allowed Str, Required Str, Disallowed ],
# decimalDigits ? [ Any, AtLeast U16, Exactly U16 ],
# wholeDigits ? [ Any, AtLeast U16, Exactly U16 ],
# leadingZeroes ? [ Allowed, Disallowed ],
# trailingZeroes ? [ Allowed, Disallowed ],
# wholeSep ? { mark : Str, policy : [ Allowed, Required U64 ] }
# }

View File

@ -20,6 +20,57 @@ interface Dict
Bool.{ Bool }
]
## A [dictionary](https://en.wikipedia.org/wiki/Associative_array) that lets you can associate keys with values.
##
## ### Inserting
##
## The most basic way to use a dictionary is to start with an empty one and then:
## 1. Call [Dict.insert] passing a key and a value, to associate that key with that value in the dictionary.
## 2. Later, call [Dict.get] passing the same key as before, and it will return the value you stored.
##
## Here's an example of a dictionary which uses a city's name as the key, and its population as the associated value.
##
## populationByCity =
## Dict.empty
## |> Dict.insert "London" 8_961_989
## |> Dict.insert "Philadelphia" 1_603_797
## |> Dict.insert "Shanghai" 24_870_895
## |> Dict.insert "Delhi" 16_787_941
## |> Dict.insert "Amsterdam" 872_680
##
## ### Accessing keys or values
##
## We can use [Dict.keys] and [Dict.values] functions to get only the keys or only the values.
##
## You may notice that these lists have the same order as the original insertion order. This will be true if
## all you ever do is [insert] and [get] operations on the dictionary, but [remove] operations can change this order.
## Let's see how that looks.
##
## ### Removing
##
## We can remove an element from the dictionary, like so:
##
## populationByCity
## |> Dict.remove "Philadelphia"
## |> Dict.keys
## ==
## [ "London", "Amsterdam", "Shanghai", "Delhi" ]
##
## Notice that the order changed! Philadelphia has been not only removed from the list, but Amsterdam - the last
## entry we inserted - has been moved into the spot where Philadelphia was previously. This is exactly what
## [Dict.remove] does: it removes an element and moves the most recent insertion into the vacated spot.
##
## This move is done as a performance optimization, and it lets [remove] have
## [constant time complexity](https://en.wikipedia.org/wiki/Time_complexity#Constant_time). ##
##
## ### Equality
##
## When comparing two dictionaries for equality, they are `==` only if their both their contents and their
## orderings match. This preserves the property that if `dict1 == dict2`, you should be able to rely on
## `fn dict1 == fn dict2` also being `True`, even if `fn` relies on the dictionary's ordering.
## An empty dictionary.
empty : Dict k v
single : k, v -> Dict k v
get : Dict k v, k -> Result v [ KeyNotFound ]*
@ -28,7 +79,11 @@ insert : Dict k v, k, v -> Dict k v
len : Dict k v -> Nat
remove : Dict k v, k -> Dict k v
contains : Dict k v, k -> Bool
## Returns a [List] of the dictionary's keys.
keys : Dict k v -> List k
## Returns a [List] of the dictionary's values.
values : Dict k v -> List v
union : Dict k v, Dict k v -> Dict k v
intersection : Dict k v, Dict k v -> Dict k v

View File

@ -56,6 +56,149 @@ interface List
Bool.{ Bool }
]
## Types
## A sequential list of values.
##
## >>> [ 1, 2, 3 ] # a list of numbers
## >>> [ "a", "b", "c" ] # a list of strings
## >>> [ [ 1.1 ], [], [ 2.2, 3.3 ] ] # a list of lists of numbers
##
## The maximum size of a [List] is limited by the amount of heap memory available
## to the current process. If there is not enough memory available, attempting to
## create the list could crash. (On Linux, where [overcommit](https://www.etalabs.net/overcommit.html)
## is normally enabled, not having enough memory could result in the list appearing
## to be created just fine, but then crashing later.)
##
## > The theoretical maximum length for a list created in Roc is half of
## > `Num.maxNat`. Attempting to create a list bigger than that
## > in Roc code will always fail, although in practice it is likely to fail
## > at much smaller lengths due to insufficient memory being available.
##
## ## Performance Details
##
## Under the hood, a list is a record containing a `len : Nat` field as well
## as a pointer to a reference count and a flat array of bytes. Unique lists
## store a capacity #Nat instead of a reference count.
##
## ## Shared Lists
##
## Shared lists are [reference counted](https://en.wikipedia.org/wiki/Reference_counting).
##
## Each time a given list gets referenced, its reference count ("refcount" for short)
## gets incremented. Each time a list goes out of scope, its refcount count gets
## decremented. Once a refcount, has been decremented more times than it has been
## incremented, we know nothing is referencing it anymore, and the list's memory
## will be immediately freed.
##
## Let's look at an example.
##
## ratings = [ 5, 4, 3 ]
##
## { foo: ratings, bar: ratings }
##
## The first line binds the name `ratings` to the list `[ 5, 4, 3 ]`. The list
## begins with a refcount of 1, because so far only `ratings` is referencing it.
##
## The second line alters this refcount. `{ foo: ratings` references
## the `ratings` list, which will result in its refcount getting incremented
## from 0 to 1. Similarly, `bar: ratings }` also references the `ratings` list,
## which will result in its refcount getting incremented from 1 to 2.
##
## Let's turn this example into a function.
##
## getRatings = \first ->
## ratings = [ first, 4, 3 ]
##
## { foo: ratings, bar: ratings }
##
## getRatings 5
##
## At the end of the `getRatings` function, when the record gets returned,
## the original `ratings =` binding has gone out of scope and is no longer
## accessible. (Trying to reference `ratings` outside the scope of the
## `getRatings` function would be an error!)
##
## Since `ratings` represented a way to reference the list, and that way is no
## longer accessible, the list's refcount gets decremented when `ratings` goes
## out of scope. It will decrease from 2 back down to 1.
##
## Putting these together, when we call `getRatings 5`, what we get back is
## a record with two fields, `foo`, and `bar`, each of which refers to the same
## list, and that list has a refcount of 1.
##
## Let's change the last line to be `(getRatings 5).bar` instead of `getRatings 5`:
##
## getRatings = \first ->
## ratings = [ first, 4, 3 ]
##
## { foo: ratings, bar: ratings }
##
## (getRatings 5).bar
##
## Now, when this expression returns, only the `bar` field of the record will
## be returned. This will mean that the `foo` field becomes inaccessible, causing
## the list's refcount to get decremented from 2 to 1. At this point, the list is back
## where it started: there is only 1 reference to it.
##
## Finally let's suppose the final line were changed to this:
##
## List.first (getRatings 5).bar
##
## This call to [List.first] means that even the list in the `bar` field has become
## inaccessible. As such, this line will cause the list's refcount to get
## decremented all the way to 0. At that point, nothing is referencing the list
## anymore, and its memory will get freed.
##
## Things are different if this is a list of lists instead of a list of numbers.
## Let's look at a simpler example using [List.first] - first with a list of numbers,
## and then with a list of lists, to see how they differ.
##
## Here's the example using a list of numbers.
##
## nums = [ 1, 2, 3, 4, 5, 6, 7 ]
##
## first = List.first nums
## last = List.last nums
##
## first
##
## It makes a list, calls [List.first] and [List.last] on it, and then returns `first`.
##
## Here's the equivalent code with a list of lists:
##
## lists = [ [ 1 ], [ 2, 3 ], [], [ 4, 5, 6, 7 ] ]
##
## first = List.first lists
## last = List.last lists
##
## first
##
## TODO explain how in the former example, when we go to free `nums` at the end,
## we can free it immediately because there are no other refcounts. However,
## in the case of `lists`, we have to iterate through the list and decrement
## the refcounts of each of its contained lists - because they, too, have
## refcounts! Importantly, because the first element had its refcount incremented
## because the function returned `first`, that element will actually end up
## *not* getting freed at the end - but all the others will be.
##
## In the `lists` example, `lists = [ ... ]` also creates a list with an initial
## refcount of 1. Separately, it also creates several other lists - each with
## their own refcounts - to go inside that list. (The empty list at the end
## does not use heap memory, and thus has no refcount.)
##
## At the end, we once again call [List.first] on the list, but this time
##
## * Copying small lists (64 elements or fewer) is typically slightly faster than copying small persistent data structures. This is because, at small sizes, persistent data structures tend to be thin wrappers around flat arrays anyway. They don't have any copying advantage until crossing a certain minimum size threshold.
## * Even when copying is faster, other list operations may still be slightly slower with persistent data structures. For example, even if it were a persistent data structure, [List.map], [List.walk], and [List.keepIf] would all need to traverse every element in the list and build up the result from scratch. These operations are all
## * Roc's compiler optimizes many list operations into in-place mutations behind the scenes, depending on how the list is being used. For example, [List.map], [List.keepIf], and [List.set] can all be optimized to perform in-place mutations.
## * If possible, it is usually best for performance to use large lists in a way where the optimizer can turn them into in-place mutations. If this is not possible, a persistent data structure might be faster - but this is a rare enough scenario that it would not be good for the average Roc program's performance if this were the way [List] worked by default. Instead, you can look outside Roc's standard modules for an implementation of a persistent data structure - likely built using [List] under the hood!
## Check if the list is empty.
##
## >>> List.isEmpty [ 1, 2, 3 ]
##
## >>> List.isEmpty []
isEmpty : List a -> Bool
isEmpty = \list ->
List.len list == 0
@ -63,22 +206,134 @@ isEmpty = \list ->
get : List a, Nat -> Result a [ OutOfBounds ]*
replace : List a, Nat, a -> { list : List a, value : a }
## Replaces the element at the given index with a replacement.
##
## >>> List.set [ "a", "b", "c" ] 1 "B"
##
## If the given index is outside the bounds of the list, returns the original
## list unmodified.
##
## To drop the element at a given index, instead of replacing it, see [List.dropAt].
set : List a, Nat, a -> List a
set = \list, index, value ->
(List.replace list index value).list
## Add a single element to the end of a list.
##
## >>> List.append [ 1, 2, 3 ] 4
##
## >>> [ 0, 1, 2 ]
## >>> |> List.append 3
append : List a, a -> List a
## Add a single element to the beginning of a list.
##
## >>> List.prepend [ 1, 2, 3 ] 0
##
## >>> [ 2, 3, 4 ]
## >>> |> List.prepend 1
prepend : List a, a -> List a
## Returns the length of the list - the number of elements it contains.
##
## One [List] can store up to 2,147,483,648 elements (just over 2 billion), which
## is exactly equal to the highest valid #I32 value. This means the #U32 this function
## returns can always be safely converted to an #I32 without losing any data.
len : List a -> Nat
## Put two lists together.
##
## >>> List.concat [ 1, 2, 3 ] [ 4, 5 ]
##
## >>> [ 0, 1, 2 ]
## >>> |> List.concat [ 3, 4 ]
concat : List a, List a -> List a
## Returns the last element in the list, or `ListWasEmpty` if it was empty.
last : List a -> Result a [ ListWasEmpty ]*
## A list with a single element in it.
##
## This is useful in pipelines, like so:
##
## websites =
## Str.concat domain ".com"
## |> List.single
##
single : a -> List a
## Returns a list with the given length, where every element is the given value.
##
##
repeat : a, Nat -> List a
## Returns the list with its elements reversed.
##
## >>> List.reverse [ 1, 2, 3 ]
reverse : List a -> List a
## Join the given lists together into one list.
##
## >>> List.join [ [ 1, 2, 3 ], [ 4, 5 ], [], [ 6, 7 ] ]
##
## >>> List.join [ [], [] ]
##
## >>> List.join []
join : List (List a) -> List a
contains : List a, a -> Bool
## Build a value using each element in the list.
##
## Starting with a given `state` value, this walks through each element in the
## list from first to last, running a given `step` function on that element
## which updates the `state`. It returns the final `state` at the end.
##
## You can use it in a pipeline:
##
## [ 2, 4, 8 ]
## |> List.walk { start: 0, step: Num.add }
##
## This returns 14 because:
## * `state` starts at 0 (because of `start: 0`)
## * Each `step` runs `Num.add state elem`, and the return value becomes the new `state`.
##
## Here is a table of how `state` changes as [List.walk] walks over the elements
## `[ 2, 4, 8 ]` using #Num.add as its `step` function to determine the next `state`.
##
## `state` | `elem` | `step state elem` (`Num.add state elem`)
## --------+--------+-----------------------------------------
## 0 | |
## 0 | 2 | 2
## 2 | 4 | 6
## 6 | 8 | 14
##
## So `state` goes through these changes:
## 1. `0` (because of `start: 0`)
## 2. `1` (because of `Num.add state elem` with `state` = 0 and `elem` = 1
##
## [ 1, 2, 3 ]
## |> List.walk { start: 0, step: Num.sub }
##
## This returns -6 because
##
## Note that in other languages, `walk` is sometimes called `reduce`,
## `fold`, `foldLeft`, or `foldl`.
walk : List elem, state, (state, elem -> state) -> state
## Note that in other languages, `walkBackwards` is sometimes called `reduceRight`,
## `fold`, `foldRight`, or `foldr`.
walkBackwards : List elem, state, (state, elem -> state) -> state
## Same as [List.walk], except you can stop walking early.
##
## ## Performance Details
##
## Compared to [List.walk], this can potentially visit fewer elements (which can
## improve performance) at the cost of making each step take longer.
## However, the added cost to each step is extremely small, and can easily
## be outweighed if it results in skipping even a small number of elements.
##
## As such, it is typically better for performance to use this over [List.walk]
## if returning `Done` earlier than the last element is expected to be common.
walkUntil : List elem, state, (state, elem -> [ Continue state, Stop state ]) -> state
sum : List (Num a) -> Num a
@ -89,40 +344,201 @@ product : List (Num a) -> Num a
product = \list ->
List.walk list 1 Num.mul
## Run the given predicate on each element of the list, returning `True` if
## any of the elements satisfy it.
any : List a, (a -> Bool) -> Bool
## Run the given predicate on each element of the list, returning `True` if
## all of the elements satisfy it.
all : List a, (a -> Bool) -> Bool
## Run the given function on each element of a list, and return all the
## elements for which the function returned `True`.
##
## >>> List.keepIf [ 1, 2, 3, 4 ] (\num -> num > 2)
##
## ## Performance Details
##
## [List.keepIf] always returns a list that takes up exactly the same amount
## of memory as the original, even if its length decreases. This is because it
## can't know in advance exactly how much space it will need, and if it guesses a
## length that's too low, it would have to re-allocate.
##
## (If you want to do an operation like this which reduces the memory footprint
## of the resulting list, you can do two passes over the lis with [List.walk] - one
## to calculate the precise new size, and another to populate the new list.)
##
## If given a unique list, [List.keepIf] will mutate it in place to assemble the appropriate list.
## If that happens, this function will not allocate any new memory on the heap.
## If all elements in the list end up being kept, Roc will return the original
## list unaltered.
##
keepIf : List a, (a -> Bool) -> List a
## Run the given function on each element of a list, and return all the
## elements for which the function returned `False`.
##
## >>> List.dropIf [ 1, 2, 3, 4 ] (\num -> num > 2)
##
## ## Performance Details
##
## `List.dropIf` has the same performance characteristics as [List.keepIf].
## See its documentation for details on those characteristics!
dropIf : List a, (a -> Bool) -> List a
dropIf = \list, predicate ->
List.keepIf list (\e -> Bool.not (predicate e))
## This works like [List.map], except only the transformed values that are
## wrapped in `Ok` are kept. Any that are wrapped in `Err` are dropped.
##
## >>> List.keepOks [ [ "a", "b" ], [], [], [ "c", "d", "e" ] ] List.last
##
## >>> fn = \str -> if Str.isEmpty str then Err StrWasEmpty else Ok (Str.len str)
## >>>
## >>> List.keepOks [ "", "a", "bc", "", "d", "ef", "" ]
keepOks : List before, (before -> Result after *) -> List after
## This works like [List.map], except only the transformed values that are
## wrapped in `Err` are kept. Any that are wrapped in `Ok` are dropped.
##
## >>> List.keepErrs [ [ "a", "b" ], [], [], [ "c", "d", "e" ] ] List.last
##
## >>> fn = \str -> if Str.isEmpty str then Err StrWasEmpty else Ok (Str.len str)
## >>>
## >>> List.keepErrs [ "", "a", "bc", "", "d", "ef", "" ]
keepErrs: List before, (before -> Result * after) -> List after
## Convert each element in the list to something new, by calling a conversion
## function on each of them. Then return a new list of the converted values.
##
## > List.map [ 1, 2, 3 ] (\num -> num + 1)
##
## > List.map [ "", "a", "bc" ] Str.isEmpty
map : List a, (a -> b) -> List b
## Run a transformation function on the first element of each list,
## and use that as the first element in the returned list.
## Repeat until a list runs out of elements.
##
## Some languages have a function named `zip`, which does something similar to
## calling [List.map2] passing two lists and `Pair`:
##
## >>> zipped = List.map2 [ "a", "b", "c" ] [ 1, 2, 3 ] Pair
map2 : List a, List b, (a, b -> c) -> List c
## Run a transformation function on the first element of each list,
## and use that as the first element in the returned list.
## Repeat until a list runs out of elements.
map3 : List a, List b, List c, (a, b, c -> d) -> List d
## Run a transformation function on the first element of each list,
## and use that as the first element in the returned list.
## Repeat until a list runs out of elements.
map4 : List a, List b, List c, List d, (a, b, c, d -> e) -> List e
## This works like [List.map], except it also passes the index
## of the element to the conversion function.
mapWithIndex : List a, (a, Nat -> b) -> List b
## Returns a list of all the integers between one and another,
## including both of the given numbers.
##
## >>> List.range 2 8
range : Int a, Int a -> List (Int a)
sortWith : List a, (a, a -> [ LT, EQ, GT ] ) -> List a
## Sorts a list in ascending order (lowest to highest), using a function which
## specifies a way to represent each element as a number.
##
## To sort in descending order (highest to lowest), use [List.sortDesc] instead.
sortAsc : List (Num a) -> List (Num a)
sortAsc = \list -> List.sortWith list Num.compare
## Sorts a list in descending order (highest to lowest), using a function which
## specifies a way to represent each element as a number.
##
## To sort in ascending order (lowest to highest), use [List.sortAsc] instead.
sortDesc : List (Num a) -> List (Num a)
sortDesc = \list -> List.sortWith list (\a, b -> Num.compare b a)
swap : List a, Nat, Nat -> List a
## Returns the first element in the list, or `ListWasEmpty` if it was empty.
first : List a -> Result a [ ListWasEmpty ]*
## Remove the first element from the list.
##
## Returns the new list (with the removed element missing).
dropFirst : List elem -> List elem
## Remove the last element from the list.
##
## Returns the new list (with the removed element missing).
dropLast : List elem -> List elem
## Returns the given number of elements from the beginning of the list.
##
## >>> List.takeFirst 4 [ 1, 2, 3, 4, 5, 6, 7, 8 ]
##
## If there are fewer elements in the list than the requested number,
## returns the entire list.
##
## >>> List.takeFirst 5 [ 1, 2 ]
##
## To *remove* elements from the beginning of the list, use `List.takeLast`.
##
## To remove elements from both the beginning and end of the list,
## use `List.sublist`.
##
## To split the list into two lists, use `List.split`.
##
## ## Performance Details
##
## When given a Unique list, this runs extremely fast. It sets the list's length
## to the given length value, and frees the leftover elements. This runs very
## slightly faster than `List.takeLast`.
##
## In fact, `List.takeFirst 1 list` runs faster than `List.first list` when given
## a Unique list, because [List.first] returns the first element as well -
## which introduces a conditional bounds check as well as a memory load.
takeFirst : List elem, Nat -> List elem
## Returns the given number of elements from the end of the list.
##
## >>> List.takeLast 4 [ 1, 2, 3, 4, 5, 6, 7, 8 ]
##
## If there are fewer elements in the list than the requested number,
## returns the entire list.
##
## >>> List.takeLast 5 [ 1, 2 ]
##
## To *remove* elements from the end of the list, use `List.takeFirst`.
##
## To remove elements from both the beginning and end of the list,
## use `List.sublist`.
##
## To split the list into two lists, use `List.split`.
##
## ## Performance Details
##
## When given a Unique list, this runs extremely fast. It moves the list's
## pointer to the index at the given length value, updates its length,
## and frees the leftover elements. This runs very nearly as fast as
## `List.takeFirst` on a Unique list.
##
## In fact, `List.takeLast 1 list` runs faster than `List.first list` when given
## a Unique list, because [List.first] returns the first element as well -
## which introduces a conditional bounds check as well as a memory load.
takeLast : List elem, Nat -> List elem
## Drops n elements from the beginning of the list.
drop : List elem, Nat -> List elem
## Drops the element at the given index from the list.
##
## This has no effect if the given index is outside the bounds of the list.
##
## To replace the element at a given index, instead of dropping it, see [List.set].
dropAt : List elem, Nat -> List elem
min : List (Num a) -> Result (Num a) [ ListWasEmpty ]*
@ -163,11 +579,41 @@ maxHelp = \list, initial ->
else
bestSoFar
## Like [List.map], except the transformation function wraps the return value
## in a list. At the end, all the lists get joined together into one list.
##
## You may know a similar function named `concatMap` in other languages.
joinMap : List a, (a -> List b) -> List b
joinMap = \list, mapper ->
List.walk list [] (\state, elem -> List.concat state (mapper elem))
## Returns the first element of the list satisfying a predicate function.
## If no satisfying element is found, an `Err NotFound` is returned.
find : List elem, (elem -> Bool) -> Result elem [ NotFound ]*
## Returns a subsection of the given list, beginning at the `start` index and
## including a total of `len` elements.
##
## If `start` is outside the bounds of the given list, returns the empty list.
##
## >>> List.sublist { start: 4, len: 0 } [ 1, 2, 3 ]
##
## If more elements are requested than exist in the list, returns as many as it can.
##
## >>> List.sublist { start: 2, len: 10 } [ 1, 2, 3, 4, 5 ]
##
## > If you want a sublist which goes all the way to the end of the list, no
## > matter how long the list is, `List.takeLast` can do that more efficiently.
##
## Some languages have a function called **`slice`** which works similarly to this.
sublist : List elem, { start : Nat, len : Nat } -> List elem
intersperse : List elem, elem -> List elem
## Splits the list into two lists, around the given index.
##
## The returned lists are labeled `before` and `others`. The `before` list will
## contain all the elements whose index in the original list was **less than**
## than the given index, # and the `others` list will be all the others. (This
## means if you give an index of 0, the `before` list will be empty and the
## `others` list will have the same elements as the original list.)
split : List elem, Nat -> { before: List elem, others: List elem }

View File

@ -2,38 +2,79 @@ interface Result
exposes [ Result, isOk, isErr, map, mapErr, after, withDefault ]
imports [ Bool.{ Bool } ]
## The result of an operation that could fail: either the operation went
## okay, or else there was an error of some sort.
Result ok err : [ Ok ok, Err err ]
## Return True if the result indicates a success, else return False
##
## >>> Result.isOk (Ok 5)
isOk : Result ok err -> Bool
isOk = \result ->
when result is
Ok _ -> True
Err _ -> False
## Return True if the result indicates a failure, else return False
##
## >>> Result.isErr (Err "uh oh")
isErr : Result ok err -> Bool
isErr = \result ->
when result is
Ok _ -> False
Err _ -> True
## If the result is `Ok`, return the value it holds. Otherwise, return
## the given default value.
##
## >>> Result.withDefault (Ok 7) 42
##
## >>> Result.withDefault (Err "uh oh") 42
withDefault : Result ok err, ok -> ok
withDefault = \result, default ->
when result is
Ok value -> value
Err _ -> default
## If the result is `Ok`, transform the value it holds by running a conversion
## function on it. Then return a new `Ok` holding the transformed value.
##
## (If the result is `Err`, this has no effect. Use [mapErr] to transform an `Err`.)
##
## >>> Result.map (Ok 12) Num.negate
##
## >>> Result.map (Err "yipes!") Num.negate
##
## `map` functions like this are common in Roc, and they all work similarly.
## See for example [List.map], `Set.map`, and `Dict.map`.
map : Result a err, (a -> b) -> Result b err
map = \result, transform ->
when result is
Ok v -> Ok (transform v)
Err e -> Err e
## If the result is `Err`, transform the value it holds by running a conversion
## function on it. Then return a new `Err` holding the transformed value.
##
## (If the result is `Ok`, this has no effect. Use [map] to transform an `Ok`.)
##
## >>> Result.mapErr (Err "yipes!") Str.isEmpty
##
## >>> Result.mapErr (Ok 12) Str.isEmpty
mapErr : Result ok a, (a -> b) -> Result ok b
mapErr = \result, transform ->
when result is
Ok v -> Ok v
Err e -> Err (transform e)
## If the result is `Ok`, transform the entire result by running a conversion
## function on the value the `Ok` holds. Then return that new result.
##
## (If the result is `Err`, this has no effect. Use `afterErr` to transform an `Err`.)
##
## >>> Result.after (Ok -1) \num -> if num < 0 then Err "negative!" else Ok -num
##
## >>> Result.after (Err "yipes!") \num -> if num < 0 then Err "negative!" else Ok -num
after : Result a err, (a -> Result b err) -> Result b err
after = \result, transform ->
when result is

View File

@ -16,10 +16,17 @@ interface Set
]
imports [ List, Bool.{ Bool }, Dict.{ values } ]
## An empty set.
empty : Set k
single : k -> Set k
## Make sure never to insert a *NaN* to a [Set]! Because *NaN* is defined to be
## unequal to *NaN*, adding a *NaN* results in an entry that can never be
## retrieved or removed from the [Set].
insert : Set k, k -> Set k
len : Set k -> Nat
## Drops the given element from the set.
remove : Set k, k -> Set k
contains : Set k, k -> Bool

View File

@ -36,6 +36,81 @@ interface Str
]
imports [ Bool.{ Bool }, Result.{ Result } ]
## # Types
##
## Dealing with text is a deep topic, so by design, Roc's `Str` module sticks
## to the basics.
##
## ### Unicode
##
## Unicode can represent text values which span multiple languages, symbols, and emoji.
## Here are some valid Roc strings:
##
## "Roc!"
## "鹏"
## "🕊"
##
## Every Unicode string is a sequence of [extended grapheme clusters](http://www.unicode.org/glossary/#extended_grapheme_cluster).
## An extended grapheme cluster represents what a person reading a string might
## call a "character" - like "A" or "ö" or "👩‍👩‍👦‍👦".
## Because the term "character" means different things in different areas of
## programming, and "extended grapheme cluster" is a mouthful, in Roc we use the
## term "grapheme" as a shorthand for the more precise "extended grapheme cluster."
##
## You can get the number of graphemes in a string by calling [Str.countGraphemes] on it:
##
## Str.countGraphemes "Roc!"
## Str.countGraphemes "折り紙"
## Str.countGraphemes "🕊"
##
## > The `countGraphemes` function walks through the entire string to get its answer,
## > so if you want to check whether a string is empty, you'll get much better performance
## > by calling `Str.isEmpty myStr` instead of `Str.countGraphemes myStr == 0`.
##
## ### Escape sequences
##
## If you put a `\` in a Roc string literal, it begins an *escape sequence*.
## An escape sequence is a convenient way to insert certain strings into other strings.
## For example, suppose you write this Roc string:
##
## "I took the one less traveled by,\nAnd that has made all the difference."
##
## The `"\n"` in the middle will insert a line break into this string. There are
## other ways of getting a line break in there, but `"\n"` is the most common.
##
## Another way you could insert a newlines is by writing `\u{0x0A}` instead of `\n`.
## That would result in the same string, because the `\u` escape sequence inserts
## [Unicode code points](https://unicode.org/glossary/#code_point) directly into
## the string. The Unicode code point 10 is a newline, and 10 is `0A` in hexadecimal.
## `0x0A` is a Roc hexadecimal literal, and `\u` escape sequences are always
## followed by a hexadecimal literal inside `{` and `}` like this.
##
## As another example, `"R\u{0x6F}c"` is the same string as `"Roc"`, because
## `"\u{0x6F}"` corresponds to the Unicode code point for lowercase `o`. If you
## want to [spice things up a bit](https://en.wikipedia.org/wiki/Metal_umlaut),
## you can write `"R\u{0xF6}c"` as an alternative way to get the string `"Röc"\.
##
## Roc strings also support these escape sequences:
##
## * `\\` - an actual backslash (writing a single `\` always begins an escape sequence!)
## * `\"` - an actual quotation mark (writing a `"` without a `\` ends the string)
## * `\r` - [carriage return](https://en.wikipedia.org/wiki/Carriage_Return)
## * `\t` - [horizontal tab](https://en.wikipedia.org/wiki/Tab_key#Tab_characters)
## * `\v` - [vertical tab](https://en.wikipedia.org/wiki/Tab_key#Tab_characters)
##
## You can also use escape sequences to insert named strings into other strings, like so:
##
## name = "Lee"
## city = "Roctown"
##
## greeting = "Hello there, \(name)! Welcome to \(city)."
##
## Here, `greeting` will become the string `"Hello there, Lee! Welcome to Roctown."`.
## This is known as [string interpolation](https://en.wikipedia.org/wiki/String_interpolation),
## and you can use it as many times as you like inside a string. The name
## between the parentheses must refer to a `Str` value that is currently in
## scope, and it must be a name - it can't be an arbitrary expression like a function call.
Utf8ByteProblem :
@ -50,15 +125,68 @@ Utf8ByteProblem :
Utf8Problem : { byteIndex : Nat, problem : Utf8ByteProblem }
## Returns `True` if the string is empty, and `False` otherwise.
##
## >>> Str.isEmpty "hi!"
##
## >>> Str.isEmpty ""
isEmpty : Str -> Bool
concat : Str, Str -> Str
## Combine a list of strings into a single string, with a separator
## string in between each.
##
## >>> Str.joinWith [ "one", "two", "three" ] ", "
joinWith : List Str, Str -> Str
## Split a string around a separator.
##
## >>> Str.split "1,2,3" ","
##
## Passing `""` for the separator is not useful; it returns the original string
## wrapped in a list.
##
## >>> Str.split "1,2,3" ""
##
## To split a string into its individual graphemes, use `Str.graphemes`
split : Str, Str -> List Str
repeat : Str, Nat -> Str
## Count the number of [extended grapheme clusters](http://www.unicode.org/glossary/#extended_grapheme_cluster)
## in the string.
##
## Str.countGraphemes "Roc!" # 4
## Str.countGraphemes "七巧板" # 3
## Str.countGraphemes "üïä" # 1
countGraphemes : Str -> Nat
## If the string begins with a [Unicode code point](http://www.unicode.org/glossary/#code_point)
## equal to the given [U32], return `True`. Otherwise return `False`.
##
## If the given [Str] is empty, or if the given [U32] is not a valid
## code point, this will return `False`.
##
## **Performance Note:** This runs slightly faster than [Str.startsWith], so
## if you want to check whether a string begins with something that's representable
## in a single code point, you can use (for example) `Str.startsWithCodePt '鹏'`
## instead of `Str.startsWithCodePt "鹏"`. ('鹏' evaluates to the [U32]
## value `40527`.) This will not work for graphemes which take up multiple code
## points, however; `Str.startsWithCodePt '👩‍👩‍👦‍👦'` would be a compiler error
## because 👩‍👩‍👦‍👦 takes up multiple code points and cannot be represented as a
## single [U32]. You'd need to use `Str.startsWithCodePt "🕊"` instead.
startsWithCodePt : Str, U32 -> Bool
## Return a [List] of the string's [U8] UTF-8 [code units](https://unicode.org/glossary/#code_unit).
## (To split the string into a [List] of smaller [Str] values instead of [U8] values,
## see [Str.split].)
##
## >>> Str.toUtf8 "👩‍👩‍👦‍👦"
##
## >>> Str.toUtf8 "Roc"
##
## >>> Str.toUtf8 "鹏"
##
## >>> Str.toUtf8 "🐦"
toUtf8 : Str -> List U8
# fromUtf8 : List U8 -> Result Str [ BadUtf8 Utf8Problem ]*
@ -70,6 +198,8 @@ fromUtf8Range : List U8, { start : Nat, count : Nat } -> Result Str [ BadUtf8 Ut
startsWith : Str, Str -> Bool
endsWith : Str, Str -> Bool
## Return the string with any blank spaces removed from both the beginning
## as well as the end.
trim : Str -> Str
trimLeft : Str -> Str
trimRight : Str -> Str

View File

@ -269,7 +269,9 @@ pub const NUM_ATAN: IntrinsicName = float_intrinsic!("roc_builtins.num.atan");
pub const NUM_IS_FINITE: IntrinsicName = float_intrinsic!("roc_builtins.num.is_finite");
pub const NUM_POW_INT: IntrinsicName = int_intrinsic!("roc_builtins.num.pow_int");
pub const NUM_DIV_CEIL: IntrinsicName = int_intrinsic!("roc_builtins.num.div_ceil");
pub const NUM_ROUND: IntrinsicName = float_intrinsic!("roc_builtins.num.round");
pub const NUM_ROUND_F32: IntrinsicName = int_intrinsic!("roc_builtins.num.round_f32");
pub const NUM_ROUND_F64: IntrinsicName = int_intrinsic!("roc_builtins.num.round_f64");
pub const NUM_BYTES_TO_U16: &str = "roc_builtins.num.bytes_to_u16";
pub const NUM_BYTES_TO_U32: &str = "roc_builtins.num.bytes_to_u32";

View File

@ -54,7 +54,7 @@ pub struct Annotation {
pub(crate) struct CanDefs {
defs: Vec<Option<Def>>,
def_ordering: DefOrdering,
pub(crate) abilities_in_scope: Vec<Symbol>,
aliases: VecMap<Symbol, Alias>,
}
@ -192,17 +192,12 @@ fn sort_type_defs_before_introduction(
}
// find the strongly connected components and their relations
let nodes: Vec<_> = (0..capacity as u32).collect();
let mut output = Vec::with_capacity(capacity);
for group in matrix.strongly_connected_components(&nodes).groups() {
for index in group.iter_ones() {
output.push(symbols[index])
}
}
output
matrix
.strongly_connected_components_all()
.groups()
.flat_map(|group| group.iter_ones())
.map(|index| symbols[index])
.collect()
}
#[inline(always)]
@ -523,6 +518,7 @@ pub(crate) fn canonicalize_defs<'a>(
CanDefs {
defs,
def_ordering,
abilities_in_scope,
// The result needs a thread-safe `SendMap`
aliases,
},
@ -766,8 +762,11 @@ pub(crate) fn sort_can_defs(
mut defs,
def_ordering,
aliases,
abilities_in_scope,
} = defs;
output.abilities_in_scope = abilities_in_scope;
for (symbol, alias) in aliases.into_iter() {
output.aliases.insert(symbol, alias);
}
@ -786,14 +785,10 @@ pub(crate) fn sort_can_defs(
};
}
let nodes: Vec<_> = (0..defs.len() as u32).collect();
// We first perform SCC based on any reference, both variable usage and calls
// considering both value definitions and function bodies. This will spot any
// recursive relations between any 2 definitions.
let sccs = def_ordering
.references
.strongly_connected_components(&nodes);
let sccs = def_ordering.references.strongly_connected_components_all();
let mut declarations = Vec::new();
@ -834,10 +829,9 @@ pub(crate) fn sort_can_defs(
// boom = \{} -> boom {}
//
// In general we cannot spot faulty recursion (halting problem) so this is our best attempt
let nodes: Vec<_> = group.iter_ones().map(|v| v as u32).collect();
let direct_sccs = def_ordering
.direct_references
.strongly_connected_components(&nodes);
.strongly_connected_components_subset(group);
let declaration = if direct_sccs.groups().count() == 1 {
// all defs are part of the same direct cycle, that is invalid!
@ -1567,8 +1561,7 @@ fn correct_mutual_recursive_type_alias<'a>(
let mut solved_aliases = bitvec::vec::BitVec::<usize>::repeat(false, capacity);
let group: Vec<_> = (0u32..capacity as u32).collect();
let sccs = matrix.strongly_connected_components(&group);
let sccs = matrix.strongly_connected_components_all();
// scratchpad to store aliases that are modified in the current iteration.
// Only used when there is are more than one alias in a group. See below why

View File

@ -30,6 +30,7 @@ pub struct Output {
pub introduced_variables: IntroducedVariables,
pub aliases: VecMap<Symbol, Alias>,
pub non_closures: VecSet<Symbol>,
pub abilities_in_scope: Vec<Symbol>,
}
impl Output {

View File

@ -1,4 +1,5 @@
use crate::abilities::AbilitiesStore;
use crate::annotation::canonicalize_annotation;
use crate::def::{canonicalize_defs, sort_can_defs, Declaration, Def};
use crate::effect_module::HostedGeneratedFunctions;
use crate::env::Env;
@ -11,7 +12,7 @@ use roc_collections::{MutMap, SendMap, VecSet};
use roc_module::ident::Ident;
use roc_module::ident::Lowercase;
use roc_module::symbol::{IdentIds, IdentIdsByModule, ModuleId, ModuleIds, Symbol};
use roc_parse::ast;
use roc_parse::ast::{self, TypeAnnotation};
use roc_parse::header::HeaderFor;
use roc_parse::pattern::PatternType;
use roc_problem::can::{Problem, RuntimeError};
@ -49,6 +50,7 @@ pub struct ModuleOutput {
pub problems: Vec<Problem>,
pub referenced_values: VecSet<Symbol>,
pub referenced_types: VecSet<Symbol>,
pub symbols_from_requires: Vec<(Loc<Symbol>, Loc<Type>)>,
pub scope: Scope,
}
@ -156,16 +158,17 @@ fn has_no_implementation(expr: &Expr) -> bool {
// TODO trim these down
#[allow(clippy::too_many_arguments)]
pub fn canonicalize_module_defs<'a>(
arena: &Bump,
arena: &'a Bump,
loc_defs: &'a [Loc<ast::Def<'a>>],
header_for: &roc_parse::header::HeaderFor,
home: ModuleId,
module_ids: &ModuleIds,
module_ids: &'a ModuleIds,
exposed_ident_ids: IdentIds,
dep_idents: &'a IdentIdsByModule,
aliases: MutMap<Symbol, Alias>,
exposed_imports: MutMap<Ident, (Symbol, Region)>,
exposed_symbols: &VecSet<Symbol>,
symbols_from_requires: &[(Loc<Symbol>, Loc<TypeAnnotation<'a>>)],
var_store: &mut VarStore,
) -> Result<ModuleOutput, RuntimeError> {
let mut can_exposed_imports = MutMap::default();
@ -349,9 +352,37 @@ pub fn canonicalize_module_defs<'a>(
};
match sort_can_defs(&mut env, defs, new_output) {
(Ok(mut declarations), output) => {
(Ok(mut declarations), mut output) => {
use crate::def::Declaration::*;
let symbols_from_requires = symbols_from_requires
.iter()
.map(|(symbol, loc_ann)| {
let ann = canonicalize_annotation(
&mut env,
&mut scope,
&loc_ann.value,
loc_ann.region,
var_store,
&output.abilities_in_scope,
);
ann.add_to(
&mut output.aliases,
&mut output.references,
&mut output.introduced_variables,
);
(
*symbol,
Loc {
value: ann.typ,
region: loc_ann.region,
},
)
})
.collect();
if let GeneratedInfo::Hosted {
effect_symbol,
generated_functions,
@ -545,6 +576,7 @@ pub fn canonicalize_module_defs<'a>(
referenced_types,
exposed_imports: can_exposed_imports,
problems: env.problems,
symbols_from_requires,
lookups,
};

View File

@ -129,8 +129,14 @@ impl ReferenceMatrix {
TopologicalSort::Groups { groups }
}
/// Get the strongly-connected components of the set of input nodes.
pub fn strongly_connected_components(&self, nodes: &[u32]) -> Sccs {
/// Get the strongly-connected components all nodes in the matrix
pub fn strongly_connected_components_all(&self) -> Sccs {
let bitvec = BitVec::repeat(true, self.length);
self.strongly_connected_components_subset(&bitvec)
}
/// Get the strongly-connected components of a set of input nodes.
pub fn strongly_connected_components_subset(&self, nodes: &BitSlice) -> Sccs {
let mut params = Params::new(self.length, nodes);
'outer: loop {
@ -176,15 +182,15 @@ struct Params {
p: Vec<u32>,
s: Vec<u32>,
scc: Sccs,
scca: Vec<u32>,
scca: BitVec,
}
impl Params {
fn new(length: usize, group: &[u32]) -> Self {
fn new(length: usize, group: &BitSlice) -> Self {
let mut preorders = vec![Preorder::Removed; length];
for value in group {
preorders[*value as usize] = Preorder::Empty;
for index in group.iter_ones() {
preorders[index] = Preorder::Empty;
}
Self {
@ -196,7 +202,7 @@ impl Params {
matrix: ReferenceMatrix::new(length),
components: 0,
},
scca: Vec::new(),
scca: BitVec::repeat(false, length),
}
}
}
@ -210,7 +216,7 @@ fn recurse_onto(length: usize, bitvec: &BitVec, v: usize, params: &mut Params) {
params.p.push(v as u32);
for w in bitvec[v * length..][..length].iter_ones() {
if !params.scca.contains(&(w as u32)) {
if !params.scca[w] {
match params.preorders[w] {
Preorder::Filled(pw) => loop {
let index = *params.p.last().unwrap();
@ -241,7 +247,7 @@ fn recurse_onto(length: usize, bitvec: &BitVec, v: usize, params: &mut Params) {
.scc
.matrix
.set_row_col(params.scc.components, node as usize, true);
params.scca.push(node);
params.scca.set(node as usize, true);
params.preorders[node as usize] = Preorder::Removed;
if node as usize == v {
break;

View File

@ -13,6 +13,13 @@ impl<K, V> Default for VecMap<K, V> {
}
}
impl<K, V> VecMap<K, V> {
pub fn len(&self) -> usize {
debug_assert_eq!(self.keys.len(), self.values.len());
self.keys.len()
}
}
impl<K: PartialEq, V> VecMap<K, V> {
pub fn with_capacity(capacity: usize) -> Self {
Self {
@ -21,11 +28,6 @@ impl<K: PartialEq, V> VecMap<K, V> {
}
}
pub fn len(&self) -> usize {
debug_assert_eq!(self.keys.len(), self.values.len());
self.keys.len()
}
pub fn is_empty(&self) -> bool {
debug_assert_eq!(self.keys.len(), self.values.len());
self.keys.is_empty()
@ -58,15 +60,9 @@ impl<K: PartialEq, V> VecMap<K, V> {
self.keys.contains(key)
}
pub fn remove(&mut self, key: &K) {
match self.keys.iter().position(|x| x == key) {
None => {
// just do nothing
}
Some(index) => {
self.swap_remove(index);
}
}
pub fn remove(&mut self, key: &K) -> Option<(K, V)> {
let index = self.keys.iter().position(|x| x == key)?;
Some(self.swap_remove(index))
}
pub fn get(&self, key: &K) -> Option<&V> {
@ -83,7 +79,7 @@ impl<K: PartialEq, V> VecMap<K, V> {
}
}
pub fn get_or_insert(&mut self, key: K, default_value: impl Fn() -> V) -> &mut V {
pub fn get_or_insert(&mut self, key: K, default_value: impl FnOnce() -> V) -> &mut V {
match self.keys.iter().position(|x| x == &key) {
Some(index) => &mut self.values[index],
None => {
@ -97,15 +93,15 @@ impl<K: PartialEq, V> VecMap<K, V> {
}
}
pub fn iter(&self) -> impl Iterator<Item = (&K, &V)> {
pub fn iter(&self) -> impl ExactSizeIterator<Item = (&K, &V)> {
self.keys.iter().zip(self.values.iter())
}
pub fn keys(&self) -> impl Iterator<Item = &K> {
pub fn keys(&self) -> impl ExactSizeIterator<Item = &K> {
self.keys.iter()
}
pub fn values(&self) -> impl Iterator<Item = &V> {
pub fn values(&self) -> impl ExactSizeIterator<Item = &V> {
self.values.iter()
}
@ -159,6 +155,7 @@ impl<K, V> IntoIterator for VecMap<K, V> {
fn into_iter(self) -> Self::IntoIter {
IntoIter {
len: self.len(),
keys: self.keys.into_iter(),
values: self.values.into_iter(),
}
@ -166,6 +163,7 @@ impl<K, V> IntoIterator for VecMap<K, V> {
}
pub struct IntoIter<K, V> {
len: usize,
keys: std::vec::IntoIter<K>,
values: std::vec::IntoIter<V>,
}
@ -180,3 +178,9 @@ impl<K, V> Iterator for IntoIter<K, V> {
}
}
}
impl<K, V> ExactSizeIterator for IntoIter<K, V> {
fn len(&self) -> usize {
self.len
}
}

View File

@ -1300,7 +1300,7 @@ pub fn constrain_decls(
constraint
}
fn constrain_def_pattern(
pub fn constrain_def_pattern(
constraints: &mut Constraints,
env: &Env,
loc_pattern: &Loc<Pattern>,
@ -1706,7 +1706,7 @@ fn constrain_def(
}
}
fn constrain_def_make_constraint(
pub fn constrain_def_make_constraint(
constraints: &mut Constraints,
new_rigid_variables: Vec<Variable>,
new_infer_variables: Vec<Variable>,

View File

@ -3,13 +3,16 @@ use roc_can::abilities::AbilitiesStore;
use roc_can::constraint::{Constraint, Constraints};
use roc_can::def::Declaration;
use roc_can::expected::Expected;
use roc_can::pattern::Pattern;
use roc_collections::all::MutMap;
use roc_error_macros::internal_error;
use roc_module::symbol::{ModuleId, Symbol};
use roc_region::all::{Loc, Region};
use roc_types::solved_types::{FreeVars, SolvedType};
use roc_types::subs::{VarStore, Variable};
use roc_types::types::{Category, Type};
use roc_types::types::{AnnotationSource, Category, Type};
use crate::expr::{constrain_def_make_constraint, constrain_def_pattern, Env};
/// The types of all exposed values/functions of a collection of modules
#[derive(Clone, Debug, Default)]
@ -94,12 +97,14 @@ pub enum ExposedModuleTypes {
pub fn constrain_module(
constraints: &mut Constraints,
symbols_from_requires: Vec<(Loc<Symbol>, Loc<Type>)>,
abilities_store: &AbilitiesStore,
declarations: &[Declaration],
home: ModuleId,
) -> Constraint {
let constraint = crate::expr::constrain_decls(constraints, home, declarations);
let constraint =
constrain_symbols_from_requires(constraints, symbols_from_requires, home, constraint);
let constraint = frontload_ability_constraints(constraints, abilities_store, constraint);
// The module constraint should always save the environment at the end.
@ -108,6 +113,60 @@ pub fn constrain_module(
constraint
}
fn constrain_symbols_from_requires(
constraints: &mut Constraints,
symbols_from_requires: Vec<(Loc<Symbol>, Loc<Type>)>,
home: ModuleId,
constraint: Constraint,
) -> Constraint {
symbols_from_requires
.into_iter()
.fold(constraint, |constraint, (loc_symbol, loc_type)| {
if loc_symbol.value.module_id() == home {
// 1. Required symbols can only be specified in package modules
// 2. Required symbols come from app modules
// But, if we are running e.g. `roc check` on a package module, there is no app
// module, and we will have instead put the required symbols in the package module
// namespace. If this is the case, we want to introduce the symbols as if they had
// the types they are annotated with.
let rigids = Default::default();
let env = Env { home, rigids };
let pattern = Loc::at_zero(roc_can::pattern::Pattern::Identifier(loc_symbol.value));
let def_pattern_state =
constrain_def_pattern(constraints, &env, &pattern, loc_type.value);
constrain_def_make_constraint(
constraints,
// No new rigids or flex vars because they are represented in the type
// annotation.
vec![],
vec![],
Constraint::True,
constraint,
def_pattern_state,
)
} else {
// Otherwise, this symbol comes from an app module - we want to check that the type
// provided by the app is in fact what the package module requires.
let arity = loc_type.value.arity();
let provided_eq_requires_constr = constraints.lookup(
loc_symbol.value,
Expected::FromAnnotation(
loc_symbol.map(|&s| Pattern::Identifier(s)),
arity,
AnnotationSource::RequiredSymbol {
region: loc_type.region,
},
loc_type.value,
),
loc_type.region,
);
constraints.and_constraint([provided_eq_requires_constr, constraint])
}
})
}
pub fn frontload_ability_constraints(
constraints: &mut Constraints,
abilities_store: &AbilitiesStore,

View File

@ -10,6 +10,14 @@ pub mod pattern;
pub mod spaces;
use bumpalo::{collections::String, Bump};
use roc_parse::ast::{Def, Module};
use roc_region::all::Loc;
#[derive(Debug, PartialEq)]
pub struct Ast<'a> {
pub module: Module<'a>,
pub defs: bumpalo::collections::vec::Vec<'a, Loc<Def<'a>>>,
}
#[derive(Debug)]
pub struct Buf<'a> {

View File

@ -11,7 +11,7 @@ use roc_parse::header::{
use roc_parse::ident::UppercaseIdent;
use roc_region::all::Loc;
pub fn fmt_module<'a, 'buf>(buf: &mut Buf<'buf>, module: &'a Module<'a>) {
pub fn fmt_module<'a>(buf: &mut Buf<'_>, module: &'a Module<'a>) {
match module {
Module::Interface { header } => {
fmt_interface_header(buf, header);

View File

@ -1,6 +1,21 @@
use roc_parse::ast::CommentOrNewline;
use bumpalo::collections::vec::Vec;
use bumpalo::Bump;
use roc_module::called_via::{BinOp, UnaryOp};
use roc_parse::{
ast::{
AbilityMember, AssignedField, Collection, CommentOrNewline, Def, Expr, Has, HasClause,
Module, Pattern, Spaced, StrLiteral, StrSegment, Tag, TypeAnnotation, TypeDef, TypeHeader,
ValueDef, WhenBranch,
},
header::{
AppHeader, ExposedName, HostedHeader, ImportsEntry, InterfaceHeader, ModuleName,
PackageEntry, PackageName, PlatformHeader, PlatformRequires, To, TypedIdent,
},
ident::UppercaseIdent,
};
use roc_region::all::{Loc, Region};
use crate::Buf;
use crate::{Ast, Buf};
/// The number of spaces to indent.
pub const INDENT: u16 = 4;
@ -149,3 +164,575 @@ fn fmt_docs<'buf>(buf: &mut Buf<'buf>, docs: &str) {
}
buf.push_str(docs);
}
/// RemoveSpaces normalizes the ast to something that we _expect_ to be invariant under formatting.
///
/// Currently this consists of:
/// * Removing newlines
/// * Removing comments
/// * Removing parens in Exprs
///
/// Long term, we actuall want this transform to preserve comments (so we can assert they're maintained by formatting)
/// - but there are currently several bugs where they're _not_ preserved.
/// TODO: ensure formatting retains comments
pub trait RemoveSpaces<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self;
}
impl<'a> RemoveSpaces<'a> for Ast<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
Ast {
module: self.module.remove_spaces(arena),
defs: {
let mut defs = Vec::with_capacity_in(self.defs.len(), arena);
for d in &self.defs {
defs.push(d.remove_spaces(arena))
}
defs
},
}
}
}
impl<'a> RemoveSpaces<'a> for Module<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match self {
Module::Interface { header } => Module::Interface {
header: InterfaceHeader {
name: header.name.remove_spaces(arena),
exposes: header.exposes.remove_spaces(arena),
imports: header.imports.remove_spaces(arena),
before_header: &[],
after_interface_keyword: &[],
before_exposes: &[],
after_exposes: &[],
before_imports: &[],
after_imports: &[],
},
},
Module::App { header } => Module::App {
header: AppHeader {
name: header.name.remove_spaces(arena),
packages: header.packages.remove_spaces(arena),
imports: header.imports.remove_spaces(arena),
provides: header.provides.remove_spaces(arena),
provides_types: header.provides_types.map(|ts| ts.remove_spaces(arena)),
to: header.to.remove_spaces(arena),
before_header: &[],
after_app_keyword: &[],
before_packages: &[],
after_packages: &[],
before_imports: &[],
after_imports: &[],
before_provides: &[],
after_provides: &[],
before_to: &[],
after_to: &[],
},
},
Module::Platform { header } => Module::Platform {
header: PlatformHeader {
name: header.name.remove_spaces(arena),
requires: header.requires.remove_spaces(arena),
exposes: header.exposes.remove_spaces(arena),
packages: header.packages.remove_spaces(arena),
imports: header.imports.remove_spaces(arena),
provides: header.provides.remove_spaces(arena),
before_header: &[],
after_platform_keyword: &[],
before_requires: &[],
after_requires: &[],
before_exposes: &[],
after_exposes: &[],
before_packages: &[],
after_packages: &[],
before_imports: &[],
after_imports: &[],
before_provides: &[],
after_provides: &[],
},
},
Module::Hosted { header } => Module::Hosted {
header: HostedHeader {
name: header.name.remove_spaces(arena),
exposes: header.exposes.remove_spaces(arena),
imports: header.imports.remove_spaces(arena),
generates: header.generates.remove_spaces(arena),
generates_with: header.generates_with.remove_spaces(arena),
before_header: &[],
after_hosted_keyword: &[],
before_exposes: &[],
after_exposes: &[],
before_imports: &[],
after_imports: &[],
before_generates: &[],
after_generates: &[],
before_with: &[],
after_with: &[],
},
},
}
}
}
impl<'a> RemoveSpaces<'a> for &'a str {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
self
}
}
impl<'a, T: RemoveSpaces<'a> + Copy> RemoveSpaces<'a> for Spaced<'a, T> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
Spaced::Item(a) => Spaced::Item(a.remove_spaces(arena)),
Spaced::SpaceBefore(a, _) => a.remove_spaces(arena),
Spaced::SpaceAfter(a, _) => a.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for ExposedName<'a> {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
*self
}
}
impl<'a> RemoveSpaces<'a> for ModuleName<'a> {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
*self
}
}
impl<'a> RemoveSpaces<'a> for PackageName<'a> {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
*self
}
}
impl<'a> RemoveSpaces<'a> for To<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
To::ExistingPackage(a) => To::ExistingPackage(a),
To::NewPackage(a) => To::NewPackage(a.remove_spaces(arena)),
}
}
}
impl<'a> RemoveSpaces<'a> for TypedIdent<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
TypedIdent {
ident: self.ident.remove_spaces(arena),
spaces_before_colon: &[],
ann: self.ann.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for PlatformRequires<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
PlatformRequires {
rigids: self.rigids.remove_spaces(arena),
signature: self.signature.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for UppercaseIdent<'a> {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
*self
}
}
impl<'a> RemoveSpaces<'a> for PackageEntry<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
PackageEntry {
shorthand: self.shorthand,
spaces_after_shorthand: &[],
package_name: self.package_name.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for ImportsEntry<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
ImportsEntry::Module(a, b) => ImportsEntry::Module(a, b.remove_spaces(arena)),
ImportsEntry::Package(a, b, c) => ImportsEntry::Package(a, b, c.remove_spaces(arena)),
}
}
}
impl<'a, T: RemoveSpaces<'a>> RemoveSpaces<'a> for Option<T> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
self.as_ref().map(|a| a.remove_spaces(arena))
}
}
impl<'a, T: RemoveSpaces<'a> + std::fmt::Debug> RemoveSpaces<'a> for Loc<T> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
let res = self.value.remove_spaces(arena);
Loc::at(Region::zero(), res)
}
}
impl<'a, A: RemoveSpaces<'a>, B: RemoveSpaces<'a>> RemoveSpaces<'a> for (A, B) {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
(self.0.remove_spaces(arena), self.1.remove_spaces(arena))
}
}
impl<'a, T: RemoveSpaces<'a>> RemoveSpaces<'a> for Collection<'a, T> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
let mut items = Vec::with_capacity_in(self.items.len(), arena);
for item in self.items {
items.push(item.remove_spaces(arena));
}
Collection::with_items(items.into_bump_slice())
}
}
impl<'a, T: RemoveSpaces<'a> + std::fmt::Debug> RemoveSpaces<'a> for &'a [T] {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
let mut items = Vec::with_capacity_in(self.len(), arena);
for item in *self {
let res = item.remove_spaces(arena);
items.push(res);
}
items.into_bump_slice()
}
}
impl<'a> RemoveSpaces<'a> for UnaryOp {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
*self
}
}
impl<'a> RemoveSpaces<'a> for BinOp {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
*self
}
}
impl<'a, T: RemoveSpaces<'a>> RemoveSpaces<'a> for &'a T {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
arena.alloc((*self).remove_spaces(arena))
}
}
impl<'a> RemoveSpaces<'a> for TypeDef<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
use TypeDef::*;
match *self {
Alias {
header: TypeHeader { name, vars },
ann,
} => Alias {
header: TypeHeader {
name: name.remove_spaces(arena),
vars: vars.remove_spaces(arena),
},
ann: ann.remove_spaces(arena),
},
Opaque {
header: TypeHeader { name, vars },
typ,
} => Opaque {
header: TypeHeader {
name: name.remove_spaces(arena),
vars: vars.remove_spaces(arena),
},
typ: typ.remove_spaces(arena),
},
Ability {
header: TypeHeader { name, vars },
loc_has,
members,
} => Ability {
header: TypeHeader {
name: name.remove_spaces(arena),
vars: vars.remove_spaces(arena),
},
loc_has: loc_has.remove_spaces(arena),
members: members.remove_spaces(arena),
},
}
}
}
impl<'a> RemoveSpaces<'a> for ValueDef<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
use ValueDef::*;
match *self {
Annotation(a, b) => Annotation(a.remove_spaces(arena), b.remove_spaces(arena)),
Body(a, b) => Body(
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
),
AnnotatedBody {
ann_pattern,
ann_type,
comment: _,
body_pattern,
body_expr,
} => AnnotatedBody {
ann_pattern: arena.alloc(ann_pattern.remove_spaces(arena)),
ann_type: arena.alloc(ann_type.remove_spaces(arena)),
comment: None,
body_pattern: arena.alloc(body_pattern.remove_spaces(arena)),
body_expr: arena.alloc(body_expr.remove_spaces(arena)),
},
Expect(a) => Expect(arena.alloc(a.remove_spaces(arena))),
}
}
}
impl<'a> RemoveSpaces<'a> for Def<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
Def::Type(def) => Def::Type(def.remove_spaces(arena)),
Def::Value(def) => Def::Value(def.remove_spaces(arena)),
Def::NotYetImplemented(a) => Def::NotYetImplemented(a),
Def::SpaceBefore(a, _) | Def::SpaceAfter(a, _) => a.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for Has<'a> {
fn remove_spaces(&self, _arena: &'a Bump) -> Self {
Has::Has
}
}
impl<'a> RemoveSpaces<'a> for AbilityMember<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
AbilityMember {
name: self.name.remove_spaces(arena),
typ: self.typ.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for WhenBranch<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
WhenBranch {
patterns: self.patterns.remove_spaces(arena),
value: self.value.remove_spaces(arena),
guard: self.guard.remove_spaces(arena),
}
}
}
impl<'a, T: RemoveSpaces<'a> + Copy + std::fmt::Debug> RemoveSpaces<'a> for AssignedField<'a, T> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
AssignedField::RequiredValue(a, _, c) => AssignedField::RequiredValue(
a.remove_spaces(arena),
arena.alloc([]),
arena.alloc(c.remove_spaces(arena)),
),
AssignedField::OptionalValue(a, _, c) => AssignedField::OptionalValue(
a.remove_spaces(arena),
arena.alloc([]),
arena.alloc(c.remove_spaces(arena)),
),
AssignedField::LabelOnly(a) => AssignedField::LabelOnly(a.remove_spaces(arena)),
AssignedField::Malformed(a) => AssignedField::Malformed(a),
AssignedField::SpaceBefore(a, _) => a.remove_spaces(arena),
AssignedField::SpaceAfter(a, _) => a.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for StrLiteral<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
StrLiteral::PlainLine(t) => StrLiteral::PlainLine(t),
StrLiteral::Line(t) => StrLiteral::Line(t.remove_spaces(arena)),
StrLiteral::Block(t) => StrLiteral::Block(t.remove_spaces(arena)),
}
}
}
impl<'a> RemoveSpaces<'a> for StrSegment<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
StrSegment::Plaintext(t) => StrSegment::Plaintext(t),
StrSegment::Unicode(t) => StrSegment::Unicode(t.remove_spaces(arena)),
StrSegment::EscapedChar(c) => StrSegment::EscapedChar(c),
StrSegment::Interpolated(t) => StrSegment::Interpolated(t.remove_spaces(arena)),
}
}
}
impl<'a> RemoveSpaces<'a> for Expr<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
Expr::Float(a) => Expr::Float(a),
Expr::Num(a) => Expr::Num(a),
Expr::NonBase10Int {
string,
base,
is_negative,
} => Expr::NonBase10Int {
string,
base,
is_negative,
},
Expr::Str(a) => Expr::Str(a.remove_spaces(arena)),
Expr::Access(a, b) => Expr::Access(arena.alloc(a.remove_spaces(arena)), b),
Expr::AccessorFunction(a) => Expr::AccessorFunction(a),
Expr::List(a) => Expr::List(a.remove_spaces(arena)),
Expr::RecordUpdate { update, fields } => Expr::RecordUpdate {
update: arena.alloc(update.remove_spaces(arena)),
fields: fields.remove_spaces(arena),
},
Expr::Record(a) => Expr::Record(a.remove_spaces(arena)),
Expr::Var { module_name, ident } => Expr::Var { module_name, ident },
Expr::Underscore(a) => Expr::Underscore(a),
Expr::Tag(a) => Expr::Tag(a),
Expr::OpaqueRef(a) => Expr::OpaqueRef(a),
Expr::Closure(a, b) => Expr::Closure(
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
),
Expr::Defs(a, b) => {
Expr::Defs(a.remove_spaces(arena), arena.alloc(b.remove_spaces(arena)))
}
Expr::Backpassing(a, b, c) => Expr::Backpassing(
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
arena.alloc(c.remove_spaces(arena)),
),
Expr::Expect(a, b) => Expr::Expect(
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
),
Expr::Apply(a, b, c) => Expr::Apply(
arena.alloc(a.remove_spaces(arena)),
b.remove_spaces(arena),
c,
),
Expr::BinOps(a, b) => {
Expr::BinOps(a.remove_spaces(arena), arena.alloc(b.remove_spaces(arena)))
}
Expr::UnaryOp(a, b) => {
Expr::UnaryOp(arena.alloc(a.remove_spaces(arena)), b.remove_spaces(arena))
}
Expr::If(a, b) => Expr::If(a.remove_spaces(arena), arena.alloc(b.remove_spaces(arena))),
Expr::When(a, b) => {
Expr::When(arena.alloc(a.remove_spaces(arena)), b.remove_spaces(arena))
}
Expr::ParensAround(a) => {
// The formatter can remove redundant parentheses, so also remove these when normalizing for comparison.
a.remove_spaces(arena)
}
Expr::MalformedIdent(a, b) => Expr::MalformedIdent(a, b),
Expr::MalformedClosure => Expr::MalformedClosure,
Expr::PrecedenceConflict(a) => Expr::PrecedenceConflict(a),
Expr::SpaceBefore(a, _) => a.remove_spaces(arena),
Expr::SpaceAfter(a, _) => a.remove_spaces(arena),
Expr::SingleQuote(a) => Expr::Num(a),
}
}
}
impl<'a> RemoveSpaces<'a> for Pattern<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
Pattern::Identifier(a) => Pattern::Identifier(a),
Pattern::Tag(a) => Pattern::Tag(a),
Pattern::OpaqueRef(a) => Pattern::OpaqueRef(a),
Pattern::Apply(a, b) => Pattern::Apply(
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
),
Pattern::RecordDestructure(a) => Pattern::RecordDestructure(a.remove_spaces(arena)),
Pattern::RequiredField(a, b) => {
Pattern::RequiredField(a, arena.alloc(b.remove_spaces(arena)))
}
Pattern::OptionalField(a, b) => {
Pattern::OptionalField(a, arena.alloc(b.remove_spaces(arena)))
}
Pattern::NumLiteral(a) => Pattern::NumLiteral(a),
Pattern::NonBase10Literal {
string,
base,
is_negative,
} => Pattern::NonBase10Literal {
string,
base,
is_negative,
},
Pattern::FloatLiteral(a) => Pattern::FloatLiteral(a),
Pattern::StrLiteral(a) => Pattern::StrLiteral(a),
Pattern::Underscore(a) => Pattern::Underscore(a),
Pattern::Malformed(a) => Pattern::Malformed(a),
Pattern::MalformedIdent(a, b) => Pattern::MalformedIdent(a, b),
Pattern::QualifiedIdentifier { module_name, ident } => {
Pattern::QualifiedIdentifier { module_name, ident }
}
Pattern::SpaceBefore(a, _) => a.remove_spaces(arena),
Pattern::SpaceAfter(a, _) => a.remove_spaces(arena),
Pattern::SingleQuote(a) => Pattern::NumLiteral(a),
}
}
}
impl<'a> RemoveSpaces<'a> for TypeAnnotation<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
TypeAnnotation::Function(a, b) => TypeAnnotation::Function(
arena.alloc(a.remove_spaces(arena)),
arena.alloc(b.remove_spaces(arena)),
),
TypeAnnotation::Apply(a, b, c) => TypeAnnotation::Apply(a, b, c.remove_spaces(arena)),
TypeAnnotation::BoundVariable(a) => TypeAnnotation::BoundVariable(a),
TypeAnnotation::As(a, _, c) => {
TypeAnnotation::As(arena.alloc(a.remove_spaces(arena)), &[], c)
}
TypeAnnotation::Record { fields, ext } => TypeAnnotation::Record {
fields: fields.remove_spaces(arena),
ext: ext.remove_spaces(arena),
},
TypeAnnotation::TagUnion { ext, tags } => TypeAnnotation::TagUnion {
ext: ext.remove_spaces(arena),
tags: tags.remove_spaces(arena),
},
TypeAnnotation::Inferred => TypeAnnotation::Inferred,
TypeAnnotation::Wildcard => TypeAnnotation::Wildcard,
TypeAnnotation::Where(annot, has_clauses) => TypeAnnotation::Where(
arena.alloc(annot.remove_spaces(arena)),
arena.alloc(has_clauses.remove_spaces(arena)),
),
TypeAnnotation::SpaceBefore(a, _) => a.remove_spaces(arena),
TypeAnnotation::SpaceAfter(a, _) => a.remove_spaces(arena),
TypeAnnotation::Malformed(a) => TypeAnnotation::Malformed(a),
}
}
}
impl<'a> RemoveSpaces<'a> for HasClause<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
HasClause {
var: self.var.remove_spaces(arena),
ability: self.ability.remove_spaces(arena),
}
}
}
impl<'a> RemoveSpaces<'a> for Tag<'a> {
fn remove_spaces(&self, arena: &'a Bump) -> Self {
match *self {
Tag::Apply { name, args } => Tag::Apply {
name: name.remove_spaces(arena),
args: args.remove_spaces(arena),
},
Tag::Malformed(a) => Tag::Malformed(a),
Tag::SpaceBefore(a, _) => a.remove_spaces(arena),
Tag::SpaceAfter(a, _) => a.remove_spaces(arena),
}
}
}

View File

@ -10,60 +10,153 @@ mod test_fmt {
use roc_fmt::def::fmt_def;
use roc_fmt::module::fmt_module;
use roc_fmt::Buf;
use roc_parse::ast::Module;
use roc_parse::module::{self, module_defs};
use roc_parse::parser::Parser;
use roc_parse::state::State;
use roc_test_utils::assert_multiline_str_eq;
// Not intended to be used directly in tests; please use expr_formats_to or expr_formats_same
fn expect_format_expr_helper(input: &str, expected: &str) {
fn expr_formats_to(input: &str, expected: &str) {
let arena = Bump::new();
match roc_parse::test_helpers::parse_expr_with(&arena, input.trim()) {
let input = input.trim();
let expected = expected.trim();
match roc_parse::test_helpers::parse_expr_with(&arena, input) {
Ok(actual) => {
use roc_fmt::spaces::RemoveSpaces;
let mut buf = Buf::new_in(&arena);
actual.format_with_options(&mut buf, Parens::NotNeeded, Newlines::Yes, 0);
assert_multiline_str_eq!(expected, buf.as_str());
let output = buf.as_str();
assert_multiline_str_eq!(expected, output);
let reparsed_ast = roc_parse::test_helpers::parse_expr_with(&arena, output).unwrap_or_else(|err| {
panic!(
"After formatting, the source code no longer parsed!\n\nParse error was: {:?}\n\nThe code that failed to parse:\n\n{}\n\n",
err, output
);
});
let ast_normalized = actual.remove_spaces(&arena);
let reparsed_ast_normalized = reparsed_ast.remove_spaces(&arena);
// HACK!
// We compare the debug format strings of the ASTs, because I'm finding in practice that _somewhere_ deep inside the ast,
// the PartialEq implementation is returning `false` even when the Debug-formatted impl is exactly the same.
// I don't have the patience to debug this right now, so let's leave it for another day...
// TODO: fix PartialEq impl on ast types
if format!("{:?}", ast_normalized) != format!("{:?}", reparsed_ast_normalized) {
panic!(
"Formatting bug; formatting didn't reparse to the same AST (after removing spaces)\n\n\
* * * Source code before formatting:\n{}\n\n\
* * * Source code after formatting:\n{}\n\n",
input,
output
);
}
// Now verify that the resultant formatting is _stable_ - i.e. that it doesn't change again if re-formatted
let mut reformatted_buf = Buf::new_in(&arena);
reparsed_ast.format_with_options(&mut reformatted_buf, Parens::NotNeeded, Newlines::Yes, 0);
if output != reformatted_buf.as_str() {
eprintln!("Formatting bug; formatting is not stable. Reformatting the formatted code changed it again, as follows:\n\n");
assert_multiline_str_eq!(output, reformatted_buf.as_str());
}
}
Err(error) => panic!("Unexpected parse failure when parsing this for formatting:\n\n{}\n\nParse error was:\n\n{:?}\n\n", input, error)
};
}
fn expr_formats_to(input: &str, expected: &str) {
let input = input.trim_end();
let expected = expected.trim_end();
// First check that input formats to the expected version
expect_format_expr_helper(input, expected);
// Parse the expected result format it, asserting that it doesn't change
// It's important that formatting be stable / idempotent
expect_format_expr_helper(expected, expected);
}
fn expr_formats_same(input: &str) {
expr_formats_to(input, input);
}
fn fmt_module_and_defs<'a>(
arena: &Bump,
src: &str,
module: &Module<'a>,
state: State<'a>,
buf: &mut Buf<'_>,
) {
fmt_module(buf, module);
match module_defs().parse(&arena, state) {
Ok((_, loc_defs, _)) => {
for loc_def in loc_defs {
fmt_def(buf, arena.alloc(loc_def.value), 0);
}
}
Err(error) => panic!("Unexpected parse failure when parsing this for defs formatting:\n\n{:?}\n\nParse error was:\n\n{:?}\n\n", src, error)
}
}
// Not intended to be used directly in tests; please use module_formats_to or module_formats_same
fn expect_format_module_helper(src: &str, expected: &str) {
let arena = Bump::new();
let src = src.trim();
let expected = expected.trim();
match module::parse_header(&arena, State::new(src.as_bytes())) {
Ok((actual, state)) => {
use roc_fmt::spaces::RemoveSpaces;
let mut buf = Buf::new_in(&arena);
fmt_module(&mut buf, &actual);
fmt_module_and_defs(&arena, src, &actual, state, &mut buf);
match module_defs().parse(&arena, state) {
Ok((_, loc_defs, _)) => {
for loc_def in loc_defs {
fmt_def(&mut buf, arena.alloc(loc_def.value), 0);
}
}
Err(error) => panic!("Unexpected parse failure when parsing this for defs formatting:\n\n{:?}\n\nParse error was:\n\n{:?}\n\n", src, error)
let output = buf.as_str().trim();
let (reparsed_ast, state) = module::parse_header(&arena, State::new(output.as_bytes())).unwrap_or_else(|err| {
panic!(
"After formatting, the source code no longer parsed!\n\nParse error was: {:?}\n\nThe code that failed to parse:\n\n{}\n\n",
err, output
);
});
let ast_normalized = actual.remove_spaces(&arena);
let reparsed_ast_normalized = reparsed_ast.remove_spaces(&arena);
// HACK!
// We compare the debug format strings of the ASTs, because I'm finding in practice that _somewhere_ deep inside the ast,
// the PartialEq implementation is returning `false` even when the Debug-formatted impl is exactly the same.
// I don't have the patience to debug this right now, so let's leave it for another day...
// TODO: fix PartialEq impl on ast types
if format!("{:?}", ast_normalized) != format!("{:?}", reparsed_ast_normalized) {
panic!(
"Formatting bug; formatting didn't reparse to the same AST (after removing spaces)\n\n\
* * * Source code before formatting:\n{}\n\n\
* * * Source code after formatting:\n{}\n\n",
src,
output
);
}
assert_multiline_str_eq!(expected, buf.as_str())
// Now verify that the resultant formatting is _stable_ - i.e. that it doesn't change again if re-formatted
let mut reformatted_buf = Buf::new_in(&arena);
fmt_module_and_defs(&arena, output, &reparsed_ast, state, &mut reformatted_buf);
let reformatted = reformatted_buf.as_str().trim();
if output != reformatted {
eprintln!("Formatting bug; formatting is not stable. Reformatting the formatted code changed it again, as follows:\n\n");
assert_multiline_str_eq!(output, reformatted);
}
// If everything was idempotent re-parsing worked, finally assert
// that the formatted code was what we expected it to be.
//
// Do this last because if there were any serious problems with the
// formatter (e.g. it wasn't idempotent), we want to know about
// those more than we want to know that the expectation failed!
assert_multiline_str_eq!(expected, output);
}
Err(error) => panic!("Unexpected parse failure when parsing this for module header formatting:\n\n{:?}\n\nParse error was:\n\n{:?}\n\n", src, error)
};
@ -680,7 +773,7 @@ mod test_fmt {
1,
]
list
list
"#
));
@ -693,7 +786,7 @@ mod test_fmt {
1,
]
list
list
"#
));
@ -706,7 +799,7 @@ mod test_fmt {
1,
]
list
list
"#
));
@ -720,7 +813,7 @@ mod test_fmt {
1,
]
list
list
"#
));
@ -737,7 +830,7 @@ mod test_fmt {
# comment 3
]
list
list
"#
));
@ -753,7 +846,7 @@ mod test_fmt {
# comment 3
]
list
list
"#
));
expr_formats_to(
@ -767,7 +860,7 @@ mod test_fmt {
1,
]
list
list
"#
),
indoc!(
@ -779,7 +872,7 @@ mod test_fmt {
1,
]
list
list
"#
),
);
@ -795,7 +888,7 @@ mod test_fmt {
]
list
list
"#
),
indoc!(
@ -807,7 +900,7 @@ mod test_fmt {
# comment
]
list
list
"#
),
);
@ -823,7 +916,7 @@ mod test_fmt {
1,
]
list
list
"#
),
indoc!(
@ -835,7 +928,7 @@ mod test_fmt {
1,
]
list
list
"#
),
);
@ -851,7 +944,7 @@ mod test_fmt {
1,
]
list
list
"#
),
indoc!(
@ -863,7 +956,7 @@ mod test_fmt {
1,
]
list
list
"#
),
);
@ -881,7 +974,7 @@ mod test_fmt {
1,
]
list
list
"#
),
indoc!(
@ -894,7 +987,7 @@ mod test_fmt {
1,
]
list
list
"#
),
);
@ -1444,7 +1537,7 @@ mod test_fmt {
3,
]
toList
toList
"#
));
@ -3939,6 +4032,39 @@ mod test_fmt {
);
}
#[test]
fn format_tui_package_config() {
// At one point this failed to reformat.
module_formats_to(
indoc!(
r#"
platform "tui"
requires { Model } { main : { init : ({} -> Model), update : (Model, Str -> Model), view : (Model -> Str) } }
exposes []
packages {}
imports []
provides [ mainForHost ]
mainForHost : { init : ({} -> Model) as Init, update : (Model, Str -> Model) as Update, view : (Model -> Str) as View }
mainForHost = main
"#
),
indoc!(
r#"
platform "tui"
requires { Model } { main : { init : {} -> Model, update : Model, Str -> Model, view : Model -> Str } }
exposes []
packages {}
imports []
provides [ mainForHost ]
mainForHost : { init : ({} -> Model) as Init, update : (Model, Str -> Model) as Update, view : (Model -> Str) as View }
mainForHost = main
"#
),
);
}
#[test]
fn single_line_hosted() {
module_formats_same(indoc!(

View File

@ -554,7 +554,7 @@ trait Backend<'a> {
}
LowLevel::NumRound => self.build_fn_call(
sym,
bitcode::NUM_ROUND[FloatWidth::F64].to_string(),
bitcode::NUM_ROUND_F64[IntWidth::I64].to_string(),
args,
arg_layouts,
ret_layout,

View File

@ -602,6 +602,7 @@ static LLVM_SIN: IntrinsicName = float_intrinsic!("llvm.sin");
static LLVM_COS: IntrinsicName = float_intrinsic!("llvm.cos");
static LLVM_CEILING: IntrinsicName = float_intrinsic!("llvm.ceil");
static LLVM_FLOOR: IntrinsicName = float_intrinsic!("llvm.floor");
static LLVM_ROUND: IntrinsicName = float_intrinsic!("llvm.round");
static LLVM_MEMSET_I64: &str = "llvm.memset.p0i8.i64";
static LLVM_MEMSET_I32: &str = "llvm.memset.p0i8.i32";
@ -7403,20 +7404,67 @@ fn build_float_unary_op<'a, 'ctx, 'env>(
}
}
}
NumCeiling => env.builder.build_cast(
InstructionOpcode::FPToSI,
env.call_intrinsic(&LLVM_CEILING[float_width], &[arg.into()]),
env.context.i64_type(),
"num_ceiling",
),
NumFloor => env.builder.build_cast(
InstructionOpcode::FPToSI,
env.call_intrinsic(&LLVM_FLOOR[float_width], &[arg.into()]),
env.context.i64_type(),
"num_floor",
),
NumCeiling => {
let (return_signed, return_type) = match layout {
Layout::Builtin(Builtin::Int(int_width)) => (
int_width.is_signed(),
convert::int_type_from_int_width(env, *int_width),
),
_ => internal_error!("Ceiling return layout is not int: {:?}", layout),
};
let opcode = if return_signed {
InstructionOpcode::FPToSI
} else {
InstructionOpcode::FPToUI
};
env.builder.build_cast(
opcode,
env.call_intrinsic(&LLVM_CEILING[float_width], &[arg.into()]),
return_type,
"num_ceiling",
)
}
NumFloor => {
let (return_signed, return_type) = match layout {
Layout::Builtin(Builtin::Int(int_width)) => (
int_width.is_signed(),
convert::int_type_from_int_width(env, *int_width),
),
_ => internal_error!("Ceiling return layout is not int: {:?}", layout),
};
let opcode = if return_signed {
InstructionOpcode::FPToSI
} else {
InstructionOpcode::FPToUI
};
env.builder.build_cast(
opcode,
env.call_intrinsic(&LLVM_FLOOR[float_width], &[arg.into()]),
return_type,
"num_floor",
)
}
NumRound => {
let (return_signed, return_type) = match layout {
Layout::Builtin(Builtin::Int(int_width)) => (
int_width.is_signed(),
convert::int_type_from_int_width(env, *int_width),
),
_ => internal_error!("Ceiling return layout is not int: {:?}", layout),
};
let opcode = if return_signed {
InstructionOpcode::FPToSI
} else {
InstructionOpcode::FPToUI
};
env.builder.build_cast(
opcode,
env.call_intrinsic(&LLVM_ROUND[float_width], &[arg.into()]),
return_type,
"num_round",
)
}
NumIsFinite => call_bitcode_fn(env, &[arg.into()], &bitcode::NUM_IS_FINITE[float_width]),
NumRound => call_bitcode_fn(env, &[arg.into()], &bitcode::NUM_ROUND[float_width]),
// trigonometry
NumSin => env.call_intrinsic(&LLVM_SIN[float_width], &[arg.into()]),

View File

@ -561,18 +561,6 @@ impl<'a> LowLevelCall<'a> {
NumCos => todo!("{:?}", self.lowlevel),
NumSqrtUnchecked => todo!("{:?}", self.lowlevel),
NumLogUnchecked => todo!("{:?}", self.lowlevel),
NumRound => {
self.load_args(backend);
match CodeGenNumType::for_symbol(backend, self.arguments[0]) {
F32 => {
self.load_args_and_call_zig(backend, &bitcode::NUM_ROUND[FloatWidth::F32])
}
F64 => {
self.load_args_and_call_zig(backend, &bitcode::NUM_ROUND[FloatWidth::F64])
}
_ => todo!("{:?} for {:?}", self.lowlevel, self.ret_layout),
}
}
NumToFloat => {
self.load_args(backend);
let ret_type = CodeGenNumType::from(self.ret_layout);
@ -592,35 +580,54 @@ impl<'a> LowLevelCall<'a> {
}
}
NumPow => todo!("{:?}", self.lowlevel),
NumCeiling => {
NumRound => {
self.load_args(backend);
match CodeGenNumType::from(self.ret_layout) {
I32 => {
let arg_type = CodeGenNumType::for_symbol(backend, self.arguments[0]);
let ret_type = CodeGenNumType::from(self.ret_layout);
let width = match ret_type {
CodeGenNumType::I32 => IntWidth::I32,
CodeGenNumType::I64 => IntWidth::I64,
CodeGenNumType::I128 => todo!("{:?} for I128", self.lowlevel),
_ => internal_error!("Invalid return type for round: {:?}", ret_type),
};
match arg_type {
F32 => self.load_args_and_call_zig(backend, &bitcode::NUM_ROUND_F32[width]),
F64 => self.load_args_and_call_zig(backend, &bitcode::NUM_ROUND_F64[width]),
_ => internal_error!("Invalid argument type for round: {:?}", arg_type),
}
}
NumCeiling | NumFloor => {
self.load_args(backend);
let arg_type = CodeGenNumType::for_symbol(backend, self.arguments[0]);
let ret_type = CodeGenNumType::from(self.ret_layout);
match (arg_type, self.lowlevel) {
(F32, NumCeiling) => {
backend.code_builder.f32_ceil();
backend.code_builder.i32_trunc_s_f32()
}
I64 => {
(F64, NumCeiling) => {
backend.code_builder.f64_ceil();
backend.code_builder.i64_trunc_s_f64()
}
(F32, NumFloor) => {
backend.code_builder.f32_floor();
}
(F64, NumFloor) => {
backend.code_builder.f64_floor();
}
_ => internal_error!("Invalid argument type for ceiling: {:?}", arg_type),
}
match (ret_type, arg_type) {
// TODO: unsigned truncation
(I32, F32) => backend.code_builder.i32_trunc_s_f32(),
(I32, F64) => backend.code_builder.i32_trunc_s_f64(),
(I64, F32) => backend.code_builder.i64_trunc_s_f32(),
(I64, F64) => backend.code_builder.i64_trunc_s_f64(),
(I128, _) => todo!("{:?} for I128", self.lowlevel),
_ => panic_ret_type(),
}
}
NumPowInt => todo!("{:?}", self.lowlevel),
NumFloor => {
self.load_args(backend);
match CodeGenNumType::from(self.ret_layout) {
I32 => {
backend.code_builder.f32_floor();
backend.code_builder.i32_trunc_s_f32()
}
I64 => {
backend.code_builder.f64_floor();
backend.code_builder.i64_trunc_s_f64()
}
_ => panic_ret_type(),
}
}
NumIsFinite => num_is_finite(backend, self.arguments[0]),
NumAtan => match self.ret_layout {

View File

@ -1,6 +1,7 @@
use std::path::PathBuf;
use bumpalo::Bump;
use roc_load_internal::file::Threading;
use roc_module::symbol::ModuleId;
const MODULES: &[(ModuleId, &str)] = &[
@ -37,6 +38,7 @@ fn write_subs_for_module(module_id: ModuleId, filename: &str) {
Default::default(),
target_info,
roc_reporting::report::RenderTarget::ColorTerminal,
Threading::Multi,
);
let module = res_module.unwrap();

View File

@ -1,3 +1,5 @@
pub use roc_load_internal::file::Threading;
use bumpalo::Bump;
use roc_collections::all::MutMap;
use roc_constrain::module::ExposedByModule;
@ -12,6 +14,7 @@ pub use roc_load_internal::file::{
LoadResult, LoadStart, LoadedModule, LoadingProblem, MonomorphizedModule, Phase,
};
#[allow(clippy::too_many_arguments)]
fn load<'a>(
arena: &'a Bump,
load_start: LoadStart<'a>,
@ -20,6 +23,7 @@ fn load<'a>(
goal_phase: Phase,
target_info: TargetInfo,
render: RenderTarget,
threading: Threading,
) -> Result<LoadResult<'a>, LoadingProblem<'a>> {
let cached_subs = read_cached_subs();
@ -32,6 +36,7 @@ fn load<'a>(
target_info,
cached_subs,
render,
threading,
)
}
@ -59,6 +64,7 @@ pub fn load_single_threaded<'a>(
)
}
#[allow(clippy::too_many_arguments)]
pub fn load_and_monomorphize_from_str<'a>(
arena: &'a Bump,
filename: PathBuf,
@ -67,6 +73,7 @@ pub fn load_and_monomorphize_from_str<'a>(
exposed_types: ExposedByModule,
target_info: TargetInfo,
render: RenderTarget,
threading: Threading,
) -> Result<MonomorphizedModule<'a>, LoadingProblem<'a>> {
use LoadResult::*;
@ -80,6 +87,7 @@ pub fn load_and_monomorphize_from_str<'a>(
Phase::MakeSpecializations,
target_info,
render,
threading,
)? {
Monomorphized(module) => Ok(module),
TypeChecked(_) => unreachable!(""),
@ -93,6 +101,7 @@ pub fn load_and_monomorphize<'a>(
exposed_types: ExposedByModule,
target_info: TargetInfo,
render: RenderTarget,
threading: Threading,
) -> Result<MonomorphizedModule<'a>, LoadingProblem<'a>> {
use LoadResult::*;
@ -106,6 +115,7 @@ pub fn load_and_monomorphize<'a>(
Phase::MakeSpecializations,
target_info,
render,
threading,
)? {
Monomorphized(module) => Ok(module),
TypeChecked(_) => unreachable!(""),
@ -119,6 +129,7 @@ pub fn load_and_typecheck<'a>(
exposed_types: ExposedByModule,
target_info: TargetInfo,
render: RenderTarget,
threading: Threading,
) -> Result<LoadedModule, LoadingProblem<'a>> {
use LoadResult::*;
@ -132,6 +143,7 @@ pub fn load_and_typecheck<'a>(
Phase::SolveTypes,
target_info,
render,
threading,
)? {
Monomorphized(_) => unreachable!(""),
TypeChecked(module) => Ok(module),

View File

@ -30,7 +30,7 @@ use roc_mono::ir::{
UpdateModeIds,
};
use roc_mono::layout::{Layout, LayoutCache, LayoutProblem};
use roc_parse::ast::{self, ExtractSpaces, Spaced, StrLiteral};
use roc_parse::ast::{self, ExtractSpaces, Spaced, StrLiteral, TypeAnnotation};
use roc_parse::header::{ExposedName, ImportsEntry, PackageEntry, PlatformHeader, To, TypedIdent};
use roc_parse::header::{HeaderFor, ModuleNameEnum, PackageName};
use roc_parse::ident::UppercaseIdent;
@ -512,8 +512,9 @@ struct ModuleHeader<'a> {
exposes: Vec<Symbol>,
exposed_imports: MutMap<Ident, (Symbol, Region)>,
parse_state: roc_parse::state::State<'a>,
module_timing: ModuleTiming,
header_for: HeaderFor<'a>,
symbols_from_requires: Vec<(Loc<Symbol>, Loc<TypeAnnotation<'a>>)>,
module_timing: ModuleTiming,
}
#[derive(Debug)]
@ -603,6 +604,7 @@ struct ParsedModule<'a> {
exposed_imports: MutMap<Ident, (Symbol, Region)>,
parsed_defs: &'a [Loc<roc_parse::ast::Def<'a>>],
module_name: ModuleNameEnum<'a>,
symbols_from_requires: Vec<(Loc<Symbol>, Loc<TypeAnnotation<'a>>)>,
header_for: HeaderFor<'a>,
}
@ -948,6 +950,7 @@ fn enqueue_task<'a>(
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn load_and_typecheck_str<'a>(
arena: &'a Bump,
filename: PathBuf,
@ -956,6 +959,7 @@ pub fn load_and_typecheck_str<'a>(
exposed_types: ExposedByModule,
target_info: TargetInfo,
render: RenderTarget,
threading: Threading,
) -> Result<LoadedModule, LoadingProblem<'a>> {
use LoadResult::*;
@ -974,6 +978,7 @@ pub fn load_and_typecheck_str<'a>(
target_info,
cached_subs,
render,
threading,
)? {
Monomorphized(_) => unreachable!(""),
TypeChecked(module) => Ok(module),
@ -1091,6 +1096,12 @@ pub enum LoadResult<'a> {
Monomorphized(MonomorphizedModule<'a>),
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Threading {
Single,
Multi,
}
/// The loading process works like this, starting from the given filename (e.g. "main.roc"):
///
/// 1. Open the file.
@ -1144,10 +1155,11 @@ pub fn load<'a>(
target_info: TargetInfo,
cached_subs: MutMap<ModuleId, (Subs, Vec<(Symbol, Variable)>)>,
render: RenderTarget,
threading: Threading,
) -> Result<LoadResult<'a>, LoadingProblem<'a>> {
// When compiling to wasm, we cannot spawn extra threads
// so we have a single-threaded implementation
if cfg!(target_family = "wasm") {
if threading == Threading::Single || cfg!(target_family = "wasm") {
load_single_threaded(
arena,
load_start,
@ -3234,8 +3246,9 @@ fn send_header<'a>(
exposes: exposed,
parse_state,
exposed_imports: scope,
module_timing,
symbols_from_requires: Vec::new(),
header_for: extra,
module_timing,
}),
)
}
@ -3275,6 +3288,7 @@ fn send_header_two<'a>(
} = info;
let declared_name: ModuleName = "".into();
let mut symbols_from_requires = Vec::with_capacity(requires.len());
let mut imported: Vec<(QualifiedModuleName, Vec<Ident>, Region)> =
Vec::with_capacity(imports.len());
@ -3378,6 +3392,7 @@ fn send_header_two<'a>(
debug_assert!(!scope.contains_key(&ident.clone()));
scope.insert(ident, (symbol, entry.ident.region));
symbols_from_requires.push((Loc::at(entry.ident.region, symbol), entry.ann));
}
for entry in requires_types {
@ -3477,6 +3492,7 @@ fn send_header_two<'a>(
parse_state,
exposed_imports: scope,
module_timing,
symbols_from_requires,
header_for: extra,
}),
)
@ -3820,6 +3836,7 @@ fn canonicalize_and_constrain<'a>(
exposed_imports,
imported_modules,
mut module_timing,
symbols_from_requires,
..
} = parsed;
@ -3837,6 +3854,7 @@ fn canonicalize_and_constrain<'a>(
aliases,
exposed_imports,
&exposed_symbols,
&symbols_from_requires,
&mut var_store,
);
@ -3881,6 +3899,7 @@ fn canonicalize_and_constrain<'a>(
} else {
constrain_module(
&mut constraints,
module_output.symbols_from_requires,
&module_output.scope.abilities_store,
&module_output.declarations,
module_id,
@ -3992,6 +4011,7 @@ fn parse<'a>(arena: &'a Bump, header: ModuleHeader<'a>) -> Result<Msg<'a>, Loadi
exposed_imports,
module_path,
header_for,
symbols_from_requires,
..
} = header;
@ -4006,6 +4026,7 @@ fn parse<'a>(arena: &'a Bump, header: ModuleHeader<'a>) -> Result<Msg<'a>, Loadi
exposed_ident_ids,
exposed_imports,
parsed_defs,
symbols_from_requires,
header_for,
};

View File

@ -19,6 +19,7 @@ mod test_load {
use roc_can::def::Declaration::*;
use roc_can::def::Def;
use roc_constrain::module::ExposedByModule;
use roc_load_internal::file::Threading;
use roc_load_internal::file::{LoadResult, LoadStart, LoadedModule, LoadingProblem, Phase};
use roc_module::ident::ModuleName;
use roc_module::symbol::{Interns, ModuleId};
@ -53,6 +54,7 @@ mod test_load {
target_info,
Default::default(), // these tests will re-compile the builtins
RenderTarget::Generic,
Threading::Single,
)? {
Monomorphized(_) => unreachable!(""),
TypeChecked(module) => Ok(module),

View File

@ -1210,15 +1210,8 @@ pub fn optimize_when<'a>(
// bind the fields referenced in the pattern. For guards this happens separately, so
// the pattern variables are defined when evaluating the guard.
if !has_guard {
branch = crate::ir::store_pattern(
env,
procs,
layout_cache,
&pattern,
cond_layout,
cond_symbol,
branch,
);
branch =
crate::ir::store_pattern(env, procs, layout_cache, &pattern, cond_symbol, branch);
}
let ((branch_index, choice), opt_jump) = create_choices(&target_counts, index, branch);
@ -1730,15 +1723,7 @@ fn decide_to_branching<'a>(
body: arena.alloc(decide),
};
crate::ir::store_pattern(
env,
procs,
layout_cache,
&pattern,
cond_layout,
cond_symbol,
join,
)
crate::ir::store_pattern(env, procs, layout_cache, &pattern, cond_symbol, join)
}
Chain {
test_chain,

View File

@ -10,6 +10,7 @@ use roc_builtins::bitcode::{FloatWidth, IntWidth};
use roc_can::abilities::AbilitiesStore;
use roc_can::expr::{AnnotatedMark, ClosureData, IntValue};
use roc_collections::all::{default_hasher, BumpMap, BumpMapDefault, MutMap};
use roc_collections::VecMap;
use roc_debug_flags::{
dbg_do, ROC_PRINT_IR_AFTER_REFCOUNT, ROC_PRINT_IR_AFTER_RESET_REUSE,
ROC_PRINT_IR_AFTER_SPECIALIZATION,
@ -753,6 +754,157 @@ impl<'a> Specialized<'a> {
}
}
/// Uniquely determines the specialization of a polymorphic (non-proc) value symbol.
/// Two specializations are equivalent if their [`SpecializationMark`]s are equal.
#[derive(PartialEq, Eq, Debug, Clone, Copy)]
struct SpecializationMark<'a> {
/// The layout of the symbol itself.
layout: Layout<'a>,
/// If this symbol is a closure def, we must also keep track of what function it specializes,
/// because the [`layout`] field will only keep track of its closure and lambda set - which can
/// be the same for two different function specializations. For example,
///
/// id = if True then \x -> x else \y -> y
/// { a: id "", b: id 1u8 }
///
/// The lambda set and captures of `id` is the same in both usages inside the record, but the
/// reified specializations of `\x -> x` and `\y -> y` must be for Str and U8.
///
/// Note that this field is not relevant for anything that is not a function.
function_mark: Option<RawFunctionLayout<'a>>,
}
/// When walking a function body, we may encounter specialized usages of polymorphic symbols. For
/// example
///
/// myTag = A
/// use1 : [A, B]
/// use1 = myTag
/// use2 : [A, B, C]
/// use2 = myTag
///
/// We keep track of the specializations of `myTag` and create fresh symbols when there is more
/// than one, so that a unique def can be created for each.
#[derive(Default, Debug, Clone)]
struct SymbolSpecializations<'a>(
// THEORY:
// 1. the number of symbols in a def is very small
// 2. the number of specializations of a symbol in a def is even smaller (almost always only one)
// So, a linear VecMap is preferrable. Use a two-layered one to make (1) extraction of defs easy
// and (2) reads of a certain symbol be determined by its first occurrence, not its last.
VecMap<Symbol, VecMap<SpecializationMark<'a>, (Variable, Symbol)>>,
);
impl<'a> SymbolSpecializations<'a> {
/// Gets a specialization for a symbol, or creates a new one.
#[inline(always)]
fn get_or_insert(
&mut self,
env: &mut Env<'a, '_>,
layout_cache: &mut LayoutCache<'a>,
symbol: Symbol,
specialization_var: Variable,
) -> Symbol {
let arena = env.arena;
let subs: &Subs = env.subs;
let layout = match layout_cache.from_var(arena, specialization_var, subs) {
Ok(layout) => layout,
// This can happen when the def symbol has a type error. In such cases just use the
// def symbol, which is erroring.
Err(_) => return symbol,
};
let is_closure = matches!(
subs.get_content_without_compacting(specialization_var),
Content::Structure(FlatType::Func(..))
);
let function_mark = if is_closure {
let fn_layout = match layout_cache.raw_from_var(arena, specialization_var, subs) {
Ok(layout) => layout,
// This can happen when the def symbol has a type error. In such cases just use the
// def symbol, which is erroring.
Err(_) => return symbol,
};
Some(fn_layout)
} else {
None
};
let specialization_mark = SpecializationMark {
layout,
function_mark,
};
let symbol_specializations = self.0.get_or_insert(symbol, Default::default);
// For the first specialization, always reuse the current symbol. The vast majority of defs
// only have one instance type, so this preserves readability of the IR.
// TODO: turn me off and see what breaks.
let needs_fresh_symbol = !symbol_specializations.is_empty();
let mut make_specialized_symbol = || {
if needs_fresh_symbol {
env.unique_symbol()
} else {
symbol
}
};
let (_var, specialized_symbol) = symbol_specializations
.get_or_insert(specialization_mark, || {
(specialization_var, make_specialized_symbol())
});
*specialized_symbol
}
/// Inserts a known specialization for a symbol. Returns the overwritten specialization, if any.
pub fn get_or_insert_known(
&mut self,
symbol: Symbol,
mark: SpecializationMark<'a>,
specialization_var: Variable,
specialization_symbol: Symbol,
) -> Option<(Variable, Symbol)> {
self.0
.get_or_insert(symbol, Default::default)
.insert(mark, (specialization_var, specialization_symbol))
}
/// Removes all specializations for a symbol, returning the type and symbol of each specialization.
pub fn remove(
&mut self,
symbol: Symbol,
) -> impl ExactSizeIterator<Item = (SpecializationMark<'a>, (Variable, Symbol))> {
self.0
.remove(&symbol)
.map(|(_, specializations)| specializations)
.unwrap_or_default()
.into_iter()
}
/// Expects and removes at most a single specialization symbol for the given requested symbol.
/// A symbol may have no specializations if it is never referenced in a body, so it is possible
/// for this to return None.
pub fn remove_single(&mut self, symbol: Symbol) -> Option<Symbol> {
let mut specializations = self.remove(symbol);
debug_assert!(
specializations.len() < 2,
"Symbol {:?} has multiple specializations",
symbol
);
specializations.next().map(|(_, (_, symbol))| symbol)
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
}
#[derive(Clone, Debug)]
pub struct Procs<'a> {
pub partial_procs: PartialProcs<'a>,
@ -763,7 +915,7 @@ pub struct Procs<'a> {
specialized: Specialized<'a>,
pub runtime_errors: BumpMap<Symbol, &'a str>,
pub externals_we_need: BumpMap<ModuleId, ExternalSpecializations>,
pub needed_symbol_specializations: BumpMap<(Symbol, Layout<'a>), (Variable, Symbol)>,
symbol_specializations: SymbolSpecializations<'a>,
}
impl<'a> Procs<'a> {
@ -777,38 +929,9 @@ impl<'a> Procs<'a> {
specialized: Specialized::default(),
runtime_errors: BumpMap::new_in(arena),
externals_we_need: BumpMap::new_in(arena),
needed_symbol_specializations: BumpMap::new_in(arena),
symbol_specializations: Default::default(),
}
}
/// Expects and removes a single specialization symbol for the given requested symbol.
/// In debug builds, we assert that the layout of the specialization is the layout expected by
/// the requested symbol.
fn remove_single_symbol_specialization(
&mut self,
symbol: Symbol,
layout: Layout,
) -> Option<Symbol> {
let mut specialized_symbols = self
.needed_symbol_specializations
.drain_filter(|(sym, _), _| sym == &symbol);
let specialization_symbol = specialized_symbols
.next()
.map(|((_, specialized_layout), (_, specialized_symbol))| {
debug_assert_eq!(specialized_layout, layout, "Requested the single specialization of {:?}, but the specialization layout ({:?}) doesn't match the expected layout ({:?})", symbol, specialized_layout, layout);
specialized_symbol
});
debug_assert_eq!(
specialized_symbols.count(),
0,
"Symbol {:?} has multiple specializations",
symbol
);
specialization_symbol
}
}
#[derive(Clone, Debug, PartialEq)]
@ -2206,9 +2329,9 @@ pub fn specialize_all<'a>(
specialize_host_specializations(env, &mut procs, layout_cache, specializations_for_host);
debug_assert!(
procs.needed_symbol_specializations.is_empty(),
procs.symbol_specializations.is_empty(),
"{:?}",
&procs.needed_symbol_specializations
&procs.symbol_specializations
);
procs
@ -2543,11 +2666,10 @@ fn specialize_external<'a>(
// An argument from the closure list may have taken on a specialized symbol
// name during the evaluation of the def body. If this is the case, load the
// specialized name rather than the original captured name!
let mut get_specialized_name = |symbol, layout| {
let mut get_specialized_name = |symbol| {
procs
.needed_symbol_specializations
.remove(&(symbol, layout))
.map(|(_, specialized)| specialized)
.symbol_specializations
.remove_single(symbol)
.unwrap_or(symbol)
};
@ -2585,7 +2707,7 @@ fn specialize_external<'a>(
union_layout,
};
let symbol = get_specialized_name(**symbol, **layout);
let symbol = get_specialized_name(**symbol);
specialized_body = Stmt::Let(
symbol,
@ -2628,7 +2750,7 @@ fn specialize_external<'a>(
structure: Symbol::ARG_CLOSURE,
};
let symbol = get_specialized_name(**symbol, **layout);
let symbol = get_specialized_name(**symbol);
specialized_body = Stmt::Let(
symbol,
@ -2673,11 +2795,10 @@ fn specialize_external<'a>(
let proc_args: Vec<_> = proc_args
.iter()
.map(|&(layout, symbol)| {
// Grab the specialization symbol, if it exists.
let symbol = procs
.needed_symbol_specializations
// We can remove the specialization since this is the definition site.
.remove(&(symbol, layout))
.map(|(_, specialized_symbol)| specialized_symbol)
.symbol_specializations
.remove_single(symbol)
.unwrap_or(symbol);
(layout, symbol)
@ -3391,18 +3512,7 @@ pub fn with_hole<'a>(
);
let outer_symbol = env.unique_symbol();
let pattern_layout = layout_cache
.from_var(env.arena, def.expr_var, env.subs)
.expect("Pattern has no layout");
stmt = store_pattern(
env,
procs,
layout_cache,
&mono_pattern,
pattern_layout,
outer_symbol,
stmt,
);
stmt = store_pattern(env, procs, layout_cache, &mono_pattern, outer_symbol, stmt);
// convert the def body, store in outer_symbol
with_hole(
@ -3445,7 +3555,9 @@ pub fn with_hole<'a>(
can_reuse_symbol(env, procs, &roc_can::expr::Expr::Var(symbol))
{
let real_symbol =
reuse_symbol_or_specialize(env, procs, layout_cache, symbol, variable);
procs
.symbol_specializations
.get_or_insert(env, layout_cache, symbol, variable);
symbol = real_symbol;
}
@ -3524,8 +3636,12 @@ pub fn with_hole<'a>(
match can_reuse_symbol(env, procs, &loc_arg_expr.value) {
// Opaques decay to their argument.
ReuseSymbol::Value(symbol) => {
let real_name =
reuse_symbol_or_specialize(env, procs, layout_cache, symbol, arg_var);
let real_name = procs.symbol_specializations.get_or_insert(
env,
layout_cache,
symbol,
arg_var,
);
let mut result = hole.clone();
substitute_in_exprs(arena, &mut result, assigned, real_name);
result
@ -3578,9 +3694,8 @@ pub fn with_hole<'a>(
can_fields.push(Field::Function(symbol, variable));
}
Value(symbol) => {
let reusable = reuse_symbol_or_specialize(
let reusable = procs.symbol_specializations.get_or_insert(
env,
procs,
layout_cache,
symbol,
field.var,
@ -4393,25 +4508,38 @@ pub fn with_hole<'a>(
}
}
}
Value(function_symbol) => match full_layout {
RawFunctionLayout::Function(arg_layouts, lambda_set, ret_layout) => {
let closure_data_symbol = function_symbol;
Value(function_symbol) => {
let function_symbol = procs.symbol_specializations.get_or_insert(
env,
layout_cache,
function_symbol,
fn_var,
);
result = match_on_lambda_set(
env,
lambda_set,
closure_data_symbol,
arg_symbols,
match full_layout {
RawFunctionLayout::Function(
arg_layouts,
lambda_set,
ret_layout,
assigned,
hole,
);
) => {
let closure_data_symbol = function_symbol;
result = match_on_lambda_set(
env,
lambda_set,
closure_data_symbol,
arg_symbols,
arg_layouts,
ret_layout,
assigned,
hole,
);
}
RawFunctionLayout::ZeroArgumentThunk(_) => {
unreachable!("calling a non-closure layout")
}
}
RawFunctionLayout::ZeroArgumentThunk(_) => {
unreachable!("calling a non-closure layout")
}
},
}
UnspecializedExpr(symbol) => {
match procs.ability_member_aliases.get(symbol).unwrap() {
&AbilityMember(member) => {
@ -5561,7 +5689,6 @@ pub fn from_can<'a>(
}
LetNonRec(def, cont, outer_annotation) => {
if let roc_can::pattern::Pattern::Identifier(symbol) = &def.loc_pattern.value {
// dbg!(symbol, &def.loc_expr.value);
match def.loc_expr.value {
roc_can::expr::Expr::Closure(closure_data) => {
register_capturing_closure(env, procs, layout_cache, *symbol, closure_data);
@ -5693,12 +5820,14 @@ pub fn from_can<'a>(
_ => {
let rest = from_can(env, variable, cont.value, procs, layout_cache);
let needs_def_specializations = procs
.needed_symbol_specializations
.keys()
.any(|(s, _)| s == symbol);
// Remove all the requested symbol specializations now, since this is the
// def site and hence we won't need them any higher up.
let mut needed_specializations =
procs.symbol_specializations.remove(*symbol);
if !needs_def_specializations {
if needed_specializations.len() == 0 {
// We don't need any specializations, that means this symbol is never
// referenced.
return with_hole(
env,
def.loc_expr.value,
@ -5714,16 +5843,9 @@ pub fn from_can<'a>(
let mut stmt = rest;
// Remove all the requested symbol specializations now, since this is the
// def site and hence we won't need them any higher up.
let mut needed_specializations = procs
.needed_symbol_specializations
.drain_filter(|(s, _), _| s == symbol)
.collect::<std::vec::Vec<_>>();
if needed_specializations.len() == 1 {
let ((_, _wanted_layout), (var, specialized_symbol)) =
needed_specializations.pop().unwrap();
let (_specialization_mark, (var, specialized_symbol)) =
needed_specializations.next().unwrap();
// Unify the expr_var with the requested specialization once.
let _res =
@ -5740,7 +5862,7 @@ pub fn from_can<'a>(
);
} else {
// Need to eat the cost and create a specialized version of the body for each specialization.
for ((_original_symbol, _wanted_layout), (var, specialized_symbol)) in
for (_specialization_mark, (var, specialized_symbol)) in
needed_specializations
{
use crate::copy::deep_copy_type_vars_into_expr;
@ -5810,11 +5932,9 @@ pub fn from_can<'a>(
// layer on any default record fields
for (symbol, variable, expr) in assignments {
let layout = layout_cache
.from_var(env.arena, variable, env.subs)
.expect("Default field has no layout");
let specialization_symbol = procs
.remove_single_symbol_specialization(symbol, layout)
.symbol_specializations
.remove_single(symbol)
// Can happen when the symbol was never used under this body, and hence has no
// requested specialization.
.unwrap_or(symbol);
@ -5831,30 +5951,12 @@ pub fn from_can<'a>(
);
}
let pattern_layout = layout_cache
.from_var(env.arena, def.expr_var, env.subs)
.expect("Pattern has no layout");
if let roc_can::expr::Expr::Var(outer_symbol) = def.loc_expr.value {
store_pattern(
env,
procs,
layout_cache,
&mono_pattern,
pattern_layout,
outer_symbol,
stmt,
)
store_pattern(env, procs, layout_cache, &mono_pattern, outer_symbol, stmt)
} else {
let outer_symbol = env.unique_symbol();
stmt = store_pattern(
env,
procs,
layout_cache,
&mono_pattern,
pattern_layout,
outer_symbol,
stmt,
);
stmt =
store_pattern(env, procs, layout_cache, &mono_pattern, outer_symbol, stmt);
// convert the def body, store in outer_symbol
with_hole(
@ -6420,19 +6522,10 @@ pub fn store_pattern<'a>(
procs: &mut Procs<'a>,
layout_cache: &mut LayoutCache<'a>,
can_pat: &Pattern<'a>,
pattern_layout: Layout,
outer_symbol: Symbol,
stmt: Stmt<'a>,
) -> Stmt<'a> {
match store_pattern_help(
env,
procs,
layout_cache,
can_pat,
pattern_layout,
outer_symbol,
stmt,
) {
match store_pattern_help(env, procs, layout_cache, can_pat, outer_symbol, stmt) {
StorePattern::Productive(new) => new,
StorePattern::NotProductive(new) => new,
}
@ -6452,7 +6545,6 @@ fn store_pattern_help<'a>(
procs: &mut Procs<'a>,
layout_cache: &mut LayoutCache<'a>,
can_pat: &Pattern<'a>,
pattern_layout: Layout,
outer_symbol: Symbol,
mut stmt: Stmt<'a>,
) -> StorePattern<'a> {
@ -6463,7 +6555,8 @@ fn store_pattern_help<'a>(
// An identifier in a pattern can define at most one specialization!
// Remove any requested specializations for this name now, since this is the definition site.
let specialization_symbol = procs
.remove_single_symbol_specialization(*symbol, pattern_layout)
.symbol_specializations
.remove_single(*symbol)
// Can happen when the symbol was never used under this body, and hence has no
// requested specialization.
.unwrap_or(*symbol);
@ -6484,16 +6577,8 @@ fn store_pattern_help<'a>(
return StorePattern::NotProductive(stmt);
}
NewtypeDestructure { arguments, .. } => match arguments.as_slice() {
[(pattern, layout)] => {
return store_pattern_help(
env,
procs,
layout_cache,
pattern,
*layout,
outer_symbol,
stmt,
);
[(pattern, _layout)] => {
return store_pattern_help(env, procs, layout_cache, pattern, outer_symbol, stmt);
}
_ => {
let mut fields = Vec::with_capacity_in(arguments.len(), env.arena);
@ -6530,16 +6615,8 @@ fn store_pattern_help<'a>(
);
}
OpaqueUnwrap { argument, .. } => {
let (pattern, layout) = &**argument;
return store_pattern_help(
env,
procs,
layout_cache,
pattern,
*layout,
outer_symbol,
stmt,
);
let (pattern, _layout) = &**argument;
return store_pattern_help(env, procs, layout_cache, pattern, outer_symbol, stmt);
}
RecordDestructure(destructs, [_single_field]) => {
@ -6547,7 +6624,8 @@ fn store_pattern_help<'a>(
match &destruct.typ {
DestructType::Required(symbol) => {
let specialization_symbol = procs
.remove_single_symbol_specialization(*symbol, destruct.layout)
.symbol_specializations
.remove_single(*symbol)
// Can happen when the symbol was never used under this body, and hence has no
// requested specialization.
.unwrap_or(*symbol);
@ -6565,7 +6643,6 @@ fn store_pattern_help<'a>(
procs,
layout_cache,
guard_pattern,
destruct.layout,
outer_symbol,
stmt,
);
@ -6638,7 +6715,8 @@ fn store_tag_pattern<'a>(
Identifier(symbol) => {
// Pattern can define only one specialization
let symbol = procs
.remove_single_symbol_specialization(*symbol, arg_layout)
.symbol_specializations
.remove_single(*symbol)
.unwrap_or(*symbol);
// store immediately in the given symbol
@ -6659,15 +6737,7 @@ fn store_tag_pattern<'a>(
let symbol = env.unique_symbol();
// first recurse, continuing to unpack symbol
match store_pattern_help(
env,
procs,
layout_cache,
argument,
arg_layout,
symbol,
stmt,
) {
match store_pattern_help(env, procs, layout_cache, argument, symbol, stmt) {
StorePattern::Productive(new) => {
is_productive = true;
stmt = new;
@ -6727,7 +6797,8 @@ fn store_newtype_pattern<'a>(
Identifier(symbol) => {
// store immediately in the given symbol, removing it specialization if it had any
let specialization_symbol = procs
.remove_single_symbol_specialization(*symbol, arg_layout)
.symbol_specializations
.remove_single(*symbol)
// Can happen when the symbol was never used under this body, and hence has no
// requested specialization.
.unwrap_or(*symbol);
@ -6754,15 +6825,7 @@ fn store_newtype_pattern<'a>(
let symbol = env.unique_symbol();
// first recurse, continuing to unpack symbol
match store_pattern_help(
env,
procs,
layout_cache,
argument,
arg_layout,
symbol,
stmt,
) {
match store_pattern_help(env, procs, layout_cache, argument, symbol, stmt) {
StorePattern::Productive(new) => {
is_productive = true;
stmt = new;
@ -6810,7 +6873,8 @@ fn store_record_destruct<'a>(
// A destructure can define at most one specialization!
// Remove any requested specializations for this name now, since this is the definition site.
let specialization_symbol = procs
.remove_single_symbol_specialization(*symbol, destruct.layout)
.symbol_specializations
.remove_single(*symbol)
// Can happen when the symbol was never used under this body, and hence has no
// requested specialization.
.unwrap_or(*symbol);
@ -6825,7 +6889,8 @@ fn store_record_destruct<'a>(
DestructType::Guard(guard_pattern) => match &guard_pattern {
Identifier(symbol) => {
let specialization_symbol = procs
.remove_single_symbol_specialization(*symbol, destruct.layout)
.symbol_specializations
.remove_single(*symbol)
// Can happen when the symbol was never used under this body, and hence has no
// requested specialization.
.unwrap_or(*symbol);
@ -6863,15 +6928,7 @@ fn store_record_destruct<'a>(
_ => {
let symbol = env.unique_symbol();
match store_pattern_help(
env,
procs,
layout_cache,
guard_pattern,
destruct.layout,
symbol,
stmt,
) {
match store_pattern_help(env, procs, layout_cache, guard_pattern, symbol, stmt) {
StorePattern::Productive(new) => {
stmt = new;
stmt = Stmt::Let(symbol, load, destruct.layout, env.arena.alloc(stmt));
@ -6934,45 +6991,6 @@ fn can_reuse_symbol<'a>(
}
}
/// Reuses the specialized symbol for a given symbol and instance type. If no specialization symbol
/// yet exists, one is created.
fn reuse_symbol_or_specialize<'a>(
env: &mut Env<'a, '_>,
procs: &mut Procs<'a>,
layout_cache: &mut LayoutCache<'a>,
symbol: Symbol,
var: Variable,
) -> Symbol {
let wanted_layout = match layout_cache.from_var(env.arena, var, env.subs) {
Ok(layout) => layout,
// This can happen when the def symbol has a type error. In such cases just use the
// def symbol, which is erroring.
Err(_) => return symbol,
};
// For the first specialization, always reuse the current symbol. The vast majority of defs
// only have one instance type, so this preserves readability of the IR.
let needs_fresh_symbol = procs
.needed_symbol_specializations
.keys()
.any(|(s, _)| *s == symbol);
let mut make_specialized_symbol = || {
if needs_fresh_symbol {
env.unique_symbol()
} else {
symbol
}
};
let (_, specialized_symbol) = procs
.needed_symbol_specializations
.entry((symbol, wanted_layout))
.or_insert_with(|| (var, make_specialized_symbol()));
*specialized_symbol
}
fn possible_reuse_symbol_or_specialize<'a>(
env: &mut Env<'a, '_>,
procs: &mut Procs<'a>,
@ -6982,7 +7000,9 @@ fn possible_reuse_symbol_or_specialize<'a>(
) -> Symbol {
match can_reuse_symbol(env, procs, expr) {
ReuseSymbol::Value(symbol) => {
reuse_symbol_or_specialize(env, procs, layout_cache, symbol, var)
procs
.symbol_specializations
.get_or_insert(env, layout_cache, symbol, var)
}
_ => env.unique_symbol(),
}
@ -7027,16 +7047,13 @@ where
let result = build_rest(env, procs, layout_cache);
// The specializations we wanted of the symbol on the LHS of this alias.
let needed_specializations_of_left = procs
.needed_symbol_specializations
.drain_filter(|(s, _), _| s == &left)
.collect::<std::vec::Vec<_>>();
let needed_specializations_of_left = procs.symbol_specializations.remove(left);
if procs.is_imported_module_thunk(right) {
// if this is an imported symbol, then we must make sure it is
// specialized, and wrap the original in a function pointer.
let mut result = result;
for (_, (variable, left)) in needed_specializations_of_left.into_iter() {
for (_, (variable, left)) in needed_specializations_of_left {
add_needed_external(procs, env, variable, right);
let res_layout = layout_cache.from_var(env.arena, variable, env.subs);
@ -7056,14 +7073,17 @@ where
// We need to lift all specializations of "left" to be specializations of "right".
let mut scratchpad_update_specializations = std::vec::Vec::new();
let left_had_specialization_symbols = !needed_specializations_of_left.is_empty();
let left_had_specialization_symbols = needed_specializations_of_left.len() > 0;
for ((_, layout), (specialized_var, specialized_sym)) in
needed_specializations_of_left.into_iter()
for (specialization_mark, (specialized_var, specialized_sym)) in
needed_specializations_of_left
{
let old_specialized_sym = procs
.needed_symbol_specializations
.insert((right, layout), (specialized_var, specialized_sym));
let old_specialized_sym = procs.symbol_specializations.get_or_insert_known(
right,
specialization_mark,
specialized_var,
specialized_sym,
);
if let Some((_, old_specialized_sym)) = old_specialized_sym {
scratchpad_update_specializations.push((old_specialized_sym, specialized_sym));

View File

@ -475,6 +475,8 @@ fn expression<'a>(
space0_before_e(term(min_indent), min_indent, EType::TIndentStart)
.parse(arena, state)?;
let region = Region::span_across(&first.region, &return_type.region);
// prepare arguments
let mut arguments = Vec::with_capacity_in(rest.len() + 1, arena);
arguments.push(first);
@ -482,7 +484,7 @@ fn expression<'a>(
let output = arena.alloc(arguments);
let result = Loc {
region: return_type.region,
region,
value: TypeAnnotation::Function(output, arena.alloc(return_type)),
};
let progress = p1.or(p2).or(p3);

View File

@ -15,7 +15,7 @@ Defs(
Newline,
],
),
typ: @33-36 Function(
typ: @18-36 Function(
[
@18-19 BoundVariable(
"a",

View File

@ -15,7 +15,7 @@ Defs(
Newline,
],
),
typ: @23-26 Function(
typ: @18-26 Function(
[
@18-19 BoundVariable(
"a",
@ -35,7 +35,7 @@ Defs(
Newline,
],
),
typ: @42-45 Function(
typ: @37-45 Function(
[
@37-38 BoundVariable(
"a",

View File

@ -10,8 +10,8 @@ Defs(
members: [
AbilityMember {
name: @9-13 "hash",
typ: @21-37 Where(
@21-24 Function(
typ: @16-37 Where(
@16-24 Function(
[
@16-17 BoundVariable(
"a",

View File

@ -10,8 +10,8 @@ Defs(
members: [
AbilityMember {
name: @8-11 "ab1",
typ: @19-33 Where(
@19-21 Function(
typ: @14-33 Where(
@14-21 Function(
[
@14-15 BoundVariable(
"a",
@ -47,8 +47,8 @@ Defs(
members: [
AbilityMember {
name: @43-46 "ab2",
typ: @54-68 Where(
@54-56 Function(
typ: @49-68 Where(
@49-56 Function(
[
@49-50 BoundVariable(
"a",

View File

@ -14,7 +14,7 @@
ann_pattern: @11-23 Identifier(
"wrappedNotEq",
),
ann_type: @34-38 Function(
ann_type: @26-38 Function(
[
@26-27 BoundVariable(
"a",

View File

@ -32,7 +32,7 @@ Defs(
RequiredValue(
@48-55 "putLine",
[],
@65-75 Function(
@58-75 Function(
[
@58-61 Apply(
"",

View File

@ -0,0 +1,83 @@
Defs(
[
@0-77 Value(
Annotation(
@0-1 Identifier(
"x",
),
@4-77 Record {
fields: [
@6-24 RequiredValue(
@6-10 "init",
[],
@13-24 Function(
[
@13-15 Record {
fields: [],
ext: None,
},
],
@19-24 Apply(
"",
"Model",
[],
),
),
),
@26-54 RequiredValue(
@26-32 "update",
[],
@35-54 Function(
[
@35-40 Apply(
"",
"Model",
[],
),
@42-45 Apply(
"",
"Str",
[],
),
],
@49-54 Apply(
"",
"Model",
[],
),
),
),
@56-75 RequiredValue(
@56-60 "view",
[],
@63-75 Function(
[
@63-68 Apply(
"",
"Model",
[],
),
],
@72-75 Apply(
"",
"Str",
[],
),
),
),
],
ext: None,
},
),
),
],
@79-81 SpaceBefore(
Num(
"42",
),
[
Newline,
Newline,
],
),
)

View File

@ -0,0 +1,3 @@
x : { init : {} -> Model, update : Model, Str -> Model, view : Model -> Str }
42

View File

@ -5,7 +5,7 @@ Defs(
@0-7 Identifier(
"doStuff",
),
@20-30 Function(
@10-30 Function(
[
@10-16 Apply(
"",

View File

@ -5,14 +5,14 @@ Defs(
@0-1 Identifier(
"f",
),
@15-27 Where(
@15-16 Function(
@4-27 Where(
@4-16 Function(
[
@4-5 BoundVariable(
"a",
),
],
@15-16 Function(
@10-16 Function(
[
@10-11 BoundVariable(
"b",

View File

@ -5,14 +5,14 @@ Defs(
@0-1 Identifier(
"f",
),
@15-48 Where(
@15-16 Function(
@4-48 Where(
@4-16 Function(
[
@4-5 BoundVariable(
"a",
),
],
@15-16 Function(
@10-16 Function(
[
@10-11 BoundVariable(
"b",

View File

@ -5,15 +5,15 @@ Defs(
@0-1 Identifier(
"f",
),
@15-67 Where(
@15-16 SpaceBefore(
@4-67 Where(
@4-16 SpaceBefore(
Function(
[
@4-5 BoundVariable(
"a",
),
],
@15-16 Function(
@10-16 Function(
[
@10-11 BoundVariable(
"b",

View File

@ -5,8 +5,8 @@ Defs(
@0-1 Identifier(
"f",
),
@9-29 Where(
@9-12 SpaceBefore(
@4-29 Where(
@4-12 SpaceBefore(
Function(
[
@4-5 BoundVariable(

View File

@ -161,6 +161,7 @@ mod test_parse {
pass/equals_with_spaces.expr,
pass/equals.expr,
pass/expect.expr,
pass/record_type_with_function.expr,
pass/float_with_underscores.expr,
pass/full_app_header_trailing_commas.header,
pass/full_app_header.header,
@ -299,7 +300,12 @@ mod test_parse {
let input_path = parent.join(&format!("{}.{}.roc", name, ty));
let result_path = parent.join(&format!("{}.{}.result-ast", name, ty));
let input = std::fs::read_to_string(&input_path).unwrap();
let input = std::fs::read_to_string(&input_path).unwrap_or_else(|err| {
panic!(
"Could not find a snapshot test result at {:?} - {:?}",
input_path, err
)
});
let result = func(&input);

View File

@ -3270,3 +3270,51 @@ fn dec_float_suffix() {
i128
);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn ceiling_to_u32() {
assert_evals_to!(
indoc!(
r#"
n : U32
n = Num.ceiling 124.5
n
"#
),
125,
u32
);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn floor_to_u32() {
assert_evals_to!(
indoc!(
r#"
n : U32
n = Num.floor 124.5
n
"#
),
124,
u32
);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn round_to_u32() {
assert_evals_to!(
indoc!(
r#"
n : U32
n = Num.round 124.49
n
"#
),
124,
u32
);
}

View File

@ -3267,7 +3267,6 @@ fn polymophic_expression_captured_inside_closure() {
#[test]
#[cfg(any(feature = "gen-llvm"))]
#[ignore = "Compile polymorphic functions"]
fn issue_2322() {
assert_evals_to!(
indoc!(
@ -3421,7 +3420,6 @@ fn polymorphic_def_used_in_closure() {
#[test]
#[cfg(any(feature = "gen-llvm"))]
#[ignore = "This still doesn't work... yet"]
fn polymorphic_lambda_set_usage() {
assert_evals_to!(
indoc!(
@ -3429,6 +3427,7 @@ fn polymorphic_lambda_set_usage() {
id1 = \x -> x
id2 = \y -> y
id = if True then id1 else id2
id 9u8
"#
),
@ -3437,6 +3436,24 @@ fn polymorphic_lambda_set_usage() {
)
}
#[test]
#[cfg(any(feature = "gen-llvm"))]
fn polymorphic_lambda_set_multiple_specializations() {
assert_evals_to!(
indoc!(
r#"
id1 = \x -> x
id2 = \y -> y
id = if True then id1 else id2
(id 9u8) + Num.toU8 (id 16u16)
"#
),
25,
u8
)
}
#[test]
#[cfg(any(feature = "gen-llvm"))]
fn list_map2_conslist() {
@ -3456,4 +3473,4 @@ fn list_map2_conslist() {
RocStr::default(),
RocStr
)
}
}

View File

@ -2,6 +2,7 @@ use libloading::Library;
use roc_build::link::{link, LinkType};
use roc_builtins::bitcode;
use roc_collections::all::MutMap;
use roc_load::Threading;
use roc_region::all::LineInfo;
use tempfile::tempdir;
@ -55,6 +56,7 @@ pub fn helper(
Default::default(),
roc_target::TargetInfo::default_x86_64(),
roc_reporting::report::RenderTarget::ColorTerminal,
Threading::Single,
);
let mut loaded = loaded.expect("failed to load module");

View File

@ -5,6 +5,7 @@ use roc_build::link::module_to_dylib;
use roc_build::program::FunctionIterator;
use roc_collections::all::MutSet;
use roc_gen_llvm::llvm::externs::add_default_roc_externs;
use roc_load::Threading;
use roc_mono::ir::OptLevel;
use roc_region::all::LineInfo;
use roc_reporting::report::RenderTarget;
@ -59,6 +60,7 @@ fn create_llvm_module<'a>(
Default::default(),
target_info,
RenderTarget::ColorTerminal,
Threading::Multi,
);
let mut loaded = match loaded {

View File

@ -1,16 +1,16 @@
use super::RefCount;
use crate::helpers::from_wasmer_memory::FromWasmerMemory;
use roc_collections::all::MutSet;
use roc_gen_wasm::wasm32_result::Wasm32Result;
use roc_gen_wasm::wasm_module::{Export, ExportType};
use roc_gen_wasm::{DEBUG_LOG_SETTINGS, MEMORY_NAME};
use roc_load::Threading;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::path::{Path, PathBuf};
use wasmer::{Memory, WasmPtr};
use super::RefCount;
use crate::helpers::from_wasmer_memory::FromWasmerMemory;
use roc_collections::all::MutSet;
use roc_gen_wasm::wasm32_result::Wasm32Result;
use roc_gen_wasm::{DEBUG_LOG_SETTINGS, MEMORY_NAME};
// Should manually match build.rs
const PLATFORM_FILENAME: &str = "wasm_test_platform";
const OUT_DIR_VAR: &str = "TEST_GEN_OUT";
@ -91,6 +91,7 @@ fn compile_roc_to_wasm_bytes<'a, T: Wasm32Result>(
Default::default(),
roc_target::TargetInfo::default_wasm32(),
roc_reporting::report::RenderTarget::ColorTerminal,
Threading::Single,
);
let loaded = loaded.expect("failed to load module");

View File

@ -16,9 +16,9 @@ const EXPANDED_STACK_SIZE: usize = 8 * 1024 * 1024;
use test_mono_macros::*;
use roc_collections::all::MutMap;
use roc_load::Threading;
use roc_module::symbol::Symbol;
use roc_mono::ir::Proc;
use roc_mono::ir::ProcLayout;
const TARGET_INFO: roc_target::TargetInfo = roc_target::TargetInfo::default_x86_64();
@ -99,6 +99,7 @@ fn compiles_to_ir(test_name: &str, src: &str) {
Default::default(),
TARGET_INFO,
roc_reporting::report::RenderTarget::Generic,
Threading::Single,
);
let mut loaded = match loaded {

View File

@ -1708,6 +1708,9 @@ pub enum AnnotationSource {
TypedBody {
region: Region,
},
RequiredSymbol {
region: Region,
},
}
impl AnnotationSource {
@ -1716,6 +1719,7 @@ impl AnnotationSource {
&Self::TypedIfBranch { region, .. }
| &Self::TypedWhenBranch { region, .. }
| &Self::TypedBody { region, .. } => region,
&Self::RequiredSymbol { region, .. } => region,
}
}
}

View File

@ -10,7 +10,7 @@ use roc_highlight::highlight_parser::{highlight_defs, highlight_expr};
use roc_load::docs::DocEntry::DocDef;
use roc_load::docs::{DocEntry, TypeAnnotation};
use roc_load::docs::{ModuleDocumentation, RecordField};
use roc_load::{LoadedModule, LoadingProblem};
use roc_load::{LoadedModule, LoadingProblem, Threading};
use roc_module::symbol::{IdentIdsByModule, Interns, ModuleId};
use roc_parse::ident::{parse_ident, Ident};
use roc_parse::state::State;
@ -435,6 +435,7 @@ pub fn load_modules_for_files(filenames: Vec<PathBuf>) -> Vec<LoadedModule> {
Default::default(),
roc_target::TargetInfo::default_x86_64(), // This is just type-checking for docs, so "target" doesn't matter
roc_reporting::report::RenderTarget::ColorTerminal,
Threading::Multi,
) {
Ok(loaded) => modules.push(loaded),
Err(LoadingProblem::FormattedReport(report)) => {

View File

@ -26,6 +26,7 @@ use pipelines::RectResources;
use roc_ast::lang::env::Env;
use roc_ast::mem_pool::pool::Pool;
use roc_ast::module::load_module;
use roc_load::Threading;
use roc_module::symbol::IdentIds;
use roc_types::subs::VarStore;
use std::collections::HashSet;
@ -128,7 +129,7 @@ fn run_event_loop(project_dir_path_opt: Option<&Path>) -> Result<(), Box<dyn Err
let file_path = Path::new(&file_path_str);
let loaded_module = load_module(file_path);
let loaded_module = load_module(file_path, Threading::Multi);
let mut var_store = VarStore::default();
let dep_idents = IdentIds::exposed_builtins(8);

View File

@ -234,7 +234,7 @@ pub mod test_ed_model {
use roc_ast::lang::env::Env;
use roc_ast::mem_pool::pool::Pool;
use roc_ast::module::load_module;
use roc_load::LoadedModule;
use roc_load::{LoadedModule, Threading};
use roc_module::symbol::IdentIds;
use roc_module::symbol::ModuleIds;
use roc_types::subs::VarStore;
@ -330,7 +330,7 @@ pub mod test_ed_model {
writeln!(file, "{}", clean_code_str)
.unwrap_or_else(|_| panic!("Failed to write {:?} to file: {:?}", clean_code_str, file));
let loaded_module = load_module(&temp_file_full_path);
let loaded_module = load_module(&temp_file_full_path, Threading::Multi);
let mut ed_model = init_dummy_model(
clean_code_str,

View File

@ -6,7 +6,7 @@ app "deriv"
# based on: https://github.com/koka-lang/koka/blob/master/test/bench/haskell/deriv.hs
IO a : Task.Task a []
main : IO {}
main : Task.Task {} []
main =
Task.after
Task.getInt

View File

@ -1,5 +1,5 @@
platform "benchmarks"
requires {} { main : Effect {} }
requires {} { main : Task {} [] }
exposes []
packages {}
imports [ Task.{ Task } ]

View File

@ -1,5 +1,5 @@
platform "effects"
requires {} { main : Effect {} }
requires {} { main : Effect.Effect {} }
exposes []
packages {}
imports [ pf.Effect ]

View File

@ -1,5 +1,5 @@
platform "tui"
requires { Model } { main : Effect {} }
requires { Model } { main : { init : {} -> Model, update : Model, Str -> Model, view : Model -> Str } }
exposes []
packages {}
imports []

View File

@ -1,4 +1,5 @@
use bumpalo::Bump;
use roc_load::Threading;
use roc_reporting::report::Palette;
use std::path::{Path, PathBuf};
@ -61,6 +62,7 @@ pub fn compile_to_mono<'a>(
exposed_types,
target_info,
roc_reporting::report::RenderTarget::ColorTerminal,
Threading::Single,
);
let mut loaded = match loaded {

View File

@ -540,12 +540,18 @@ fn to_expr_report<'b>(
the_name_text,
alloc.text(" definition:"),
]),
RequiredSymbol { .. } => alloc.concat([
alloc.text("type annotation of "),
the_name_text,
alloc.text(" required symbol:"),
]),
};
let it_is = match annotation_source {
TypedIfBranch { index, .. } => format!("The {} branch is", index.ordinal()),
TypedWhenBranch { index, .. } => format!("The {} branch is", index.ordinal()),
TypedBody { .. } => "The body is".into(),
RequiredSymbol { .. } => "The provided type is".into(),
};
let expectation_context = ExpectationContext::Annotation {

View File

@ -385,7 +385,8 @@ impl<'a> RocDocAllocator<'a> {
pub fn tag_name(&'a self, tn: TagName) -> DocBuilder<'a, Self, Annotation> {
match tn {
TagName::Tag(uppercase) => self.tag(uppercase),
TagName::Closure(_symbol) => unreachable!("closure tags are internal only"),
TagName::Closure(symbol) => self.symbol_qualified(symbol),
// TagName::Closure(_symbol) => unreachable!("closure tags are internal only"),
}
}

View File

@ -12,7 +12,7 @@ mod test_reporting {
use bumpalo::Bump;
use indoc::indoc;
use roc_can::abilities::AbilitiesStore;
use roc_load::{self, LoadedModule, LoadingProblem};
use roc_load::{self, LoadedModule, LoadingProblem, Threading};
use roc_module::symbol::{Interns, ModuleId};
use roc_region::all::LineInfo;
use roc_reporting::report::{
@ -92,6 +92,7 @@ mod test_reporting {
exposed_types,
roc_target::TargetInfo::default_x86_64(),
RenderTarget::Generic,
Threading::Single,
);
drop(file);