make shared quicksort work

This commit is contained in:
Folkert 2020-08-15 00:14:20 +02:00
parent c3b242bad1
commit 294273283b
10 changed files with 417 additions and 144 deletions

View File

@ -33,6 +33,9 @@ const PRINT_FN_VERIFICATION_OUTPUT: bool = true;
#[cfg(not(debug_assertions))]
const PRINT_FN_VERIFICATION_OUTPUT: bool = false;
pub const REFCOUNT_0: usize = std::usize::MAX;
pub const REFCOUNT_1: usize = REFCOUNT_0 - 1;
#[derive(Debug, Clone, Copy)]
pub enum OptLevel {
Normal,
@ -871,7 +874,7 @@ pub fn build_exp_stmt<'a, 'ctx, 'env>(
let layout = layout.clone();
match layout {
Layout::Builtin(Builtin::List(MemoryMode::Refcounted, _)) => {
Layout::Builtin(Builtin::List(MemoryMode::Refcounted, _)) if false => {
increment_refcount_list(env, value.into_struct_value());
build_exp_stmt(env, layout_ids, scope, parent, cont)
}
@ -882,9 +885,11 @@ pub fn build_exp_stmt<'a, 'ctx, 'env>(
let (value, layout) = load_symbol_and_layout(env, scope, symbol);
let layout = layout.clone();
/*
if layout.contains_refcounted() {
decrement_refcount_layout(env, parent, value, &layout);
}
*/
build_exp_stmt(env, layout_ids, scope, parent, cont)
}
@ -897,11 +902,7 @@ fn refcount_is_one_comparison<'ctx>(
context: &'ctx Context,
refcount: IntValue<'ctx>,
) -> IntValue<'ctx> {
let refcount_one: IntValue<'ctx> = context.i64_type().const_int((std::usize::MAX) as _, false);
// Note: Check for refcount < refcount_1 as the "true" condition,
// to avoid misprediction. (In practice this should usually pass,
// and CPUs generally default to predicting that a forward jump
// shouldn't be taken; that is, they predict "else" won't be taken.)
let refcount_one: IntValue<'ctx> = context.i64_type().const_int(REFCOUNT_1 as _, false);
builder.build_int_compare(
IntPredicate::EQ,
refcount,
@ -966,6 +967,7 @@ fn decrement_refcount_layout<'a, 'ctx, 'env>(
}
}
}
RecursiveUnion(_) => todo!("TODO implement decrement layout of recursive tag union"),
Union(tags) => {
debug_assert!(!tags.is_empty());
let wrapper_struct = value.into_struct_value();
@ -1099,11 +1101,24 @@ fn decrement_refcount_list<'a, 'ctx, 'env>(
let else_block = ctx.append_basic_block(parent, "else");
let cont_block = ctx.append_basic_block(parent, "dec_ref_branchcont");
// TODO what would be most optimial for the branch predictor
//
// are most refcounts 1 most of the time? or not?
builder.build_conditional_branch(comparison, then_block, else_block);
// build then block
{
builder.position_at_end(then_block);
if !env.leak {
let free = builder.build_free(refcount_ptr);
builder.insert_instruction(&free, None);
}
builder.build_unconditional_branch(cont_block);
}
// build else block
{
builder.position_at_end(else_block);
// our refcount 0 is actually usize::MAX, so decrementing the refcount means incrementing this value.
let decremented = env.builder.build_int_add(
ctx.i64_type().const_int(1 as u64, false),
@ -1117,16 +1132,6 @@ fn decrement_refcount_list<'a, 'ctx, 'env>(
builder.build_unconditional_branch(cont_block);
}
// build else block
{
builder.position_at_end(else_block);
if !env.leak {
let free = builder.build_free(refcount_ptr);
builder.insert_instruction(&free, None);
}
builder.build_unconditional_branch(cont_block);
}
// emit merge block
builder.position_at_end(cont_block);
}
@ -1753,14 +1758,9 @@ fn run_low_level<'a, 'ctx, 'env>(
list_get_unsafe(env, list_layout, elem_index, wrapper_struct)
}
ListSet => {
ListSetInPlace => {
let (list_symbol, list_layout) = load_symbol_and_layout(env, scope, &args[0]);
let in_place = match &list_layout {
Layout::Builtin(Builtin::List(MemoryMode::Unique, _)) => InPlace::InPlace,
_ => InPlace::Clone,
};
list_set(
parent,
&[
@ -1769,19 +1769,57 @@ fn run_low_level<'a, 'ctx, 'env>(
(load_symbol_and_layout(env, scope, &args[2])),
],
env,
in_place,
InPlace::InPlace,
)
}
ListSetInPlace => list_set(
parent,
&[
(load_symbol_and_layout(env, scope, &args[0])),
ListSet => {
let (list_symbol, list_layout) = load_symbol_and_layout(env, scope, &args[0]);
let arguments = &[
(list_symbol, list_layout),
(load_symbol_and_layout(env, scope, &args[1])),
(load_symbol_and_layout(env, scope, &args[2])),
],
env,
InPlace::InPlace,
),
];
match list_layout {
Layout::Builtin(Builtin::List(MemoryMode::Unique, _)) => {
// the layout tells us this List.set can be done in-place
list_set(parent, arguments, env, InPlace::InPlace)
}
Layout::Builtin(Builtin::List(MemoryMode::Refcounted, _)) => {
// no static guarantees, but all is not lost: we can check the refcount
// if it is one, we hold the final reference, and can mutate it in-place!
let builder = env.builder;
let ctx = env.context;
let ret_type =
basic_type_from_layout(env.arena, ctx, list_layout, env.ptr_bytes);
let refcount_ptr = list_get_refcount_ptr(env, list_symbol.into_struct_value());
let refcount = env
.builder
.build_load(refcount_ptr, "get_refcount")
.into_int_value();
let comparison = refcount_is_one_comparison(builder, env.context, refcount);
// build then block
// refcount is 1, so work in-place
let build_pass = || list_set(parent, arguments, env, InPlace::InPlace);
// build else block
// refcount != 1, so clone first
let build_fail = || list_set(parent, arguments, env, InPlace::Clone);
crate::llvm::build_list::build_basic_phi2(
env, parent, comparison, build_pass, build_fail, ret_type,
)
}
Layout::Builtin(Builtin::EmptyList) => list_symbol,
other => unreachable!("List.set: weird layout {:?}", other),
}
}
}
}

View File

@ -1166,7 +1166,7 @@ where
index_alloca
}
fn build_basic_phi2<'a, 'ctx, 'env, PassFn, FailFn>(
pub fn build_basic_phi2<'a, 'ctx, 'env, PassFn, FailFn>(
env: &Env<'a, 'ctx, 'env>,
parent: FunctionValue<'ctx>,
comparison: IntValue<'ctx>,
@ -1380,9 +1380,12 @@ pub fn allocate_list<'a, 'ctx, 'env>(
"make ptr",
);
// put our "refcount 0" in the first slot
let ref_count_zero = ctx.i64_type().const_int(std::usize::MAX as u64, false);
builder.build_store(refcount_ptr, ref_count_zero);
// the refcount of a new list is initially 1
// we assume that the list is indeed used (dead variables are eliminated)
let ref_count_one = ctx
.i64_type()
.const_int(crate::llvm::build::REFCOUNT_1 as _, false);
builder.build_store(refcount_ptr, ref_count_one);
list_element_ptr
}

View File

@ -107,6 +107,7 @@ pub fn basic_type_from_layout<'ctx>(
.struct_type(field_types.into_bump_slice(), false)
.as_basic_type_enum()
}
RecursiveUnion(_) => todo!("TODO implement layout of recursive tag union"),
Union(_) => {
// TODO make this dynamic
let ptr_size = std::mem::size_of::<i64>();

View File

@ -26,7 +26,13 @@ pub fn infer_borrow<'a>(
arena,
};
for proc in procs.values() {
// sort the symbols (roughly) in definition order.
// TODO in the future I think we need to do this properly, and group
// mutually recursive functions (or just make all their arguments owned)
let mut values = procs.values().collect::<std::vec::Vec<_>>();
values.sort_by(|a, b| b.name.cmp(&a.name));
for proc in values.iter() {
env.collect_proc(proc);
}
@ -34,14 +40,27 @@ pub fn infer_borrow<'a>(
}
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
pub enum Key {
enum Key {
Declaration(Symbol),
JoinPoint(Symbol, JoinPointId),
JoinPoint(JoinPointId),
}
#[derive(Debug, Clone, Default)]
pub struct ParamMap<'a> {
pub items: MutMap<Key, &'a [Param<'a>]>,
items: MutMap<Key, &'a [Param<'a>]>,
}
impl<'a> ParamMap<'a> {
pub fn get_symbol(&self, symbol: Symbol) -> &'a [Param<'a>] {
let key = Key::Declaration(symbol);
self.items.get(&key).unwrap()
}
pub fn get_join_point(&self, id: JoinPointId) -> &'a [Param<'a>] {
let key = Key::JoinPoint(id);
self.items.get(&key).unwrap()
}
}
impl<'a> ParamMap<'a> {
@ -91,10 +110,8 @@ impl<'a> ParamMap<'a> {
remainder: v,
continuation: b,
} => {
self.items.insert(
Key::JoinPoint(fnid, *j),
Self::init_borrow_params(arena, xs),
);
self.items
.insert(Key::JoinPoint(*j), Self::init_borrow_params(arena, xs));
stack.push(v);
stack.push(b);
@ -186,6 +203,19 @@ impl<'a> BorrowInfState<'a> {
}
}
/// This looks at an application `f x1 x2 x3`
/// If the parameter (based on the definition of `f`) is owned,
/// then the argument must also be owned
fn own_args_using_bools(&mut self, xs: &[Symbol], ps: &[bool]) {
debug_assert_eq!(xs.len(), ps.len());
for (x, borrow) in xs.iter().zip(ps.iter()) {
if !borrow {
self.own_var(*x);
}
}
}
/// For each xs[i], if xs[i] is owned, then mark ps[i] as owned.
/// We use this action to preserve tail calls. That is, if we have
/// a tail call `f xs`, if the i-th parameter is borrowed, but `xs[i]` is owned
@ -251,8 +281,46 @@ impl<'a> BorrowInfState<'a> {
}
RunLowLevel(op, args) => {
// base borrowing on the `op`
// todo!()
use roc_module::low_level::LowLevel::*;
self.own_var(z);
// TODO is true or false more efficient for non-refcounted layouts?
let irrelevant = true;
let owned = false;
let borrowed = true;
let arena = self.arena;
// Here we define the borrow signature of low-level operations
//
// - arguments with non-refcounted layouts (ints, floats) are `irrelevant`
// - arguments that we may want to update destructively must be Owned
// - other refcounted arguments are Borrowed
let ps = match op {
ListLen => arena.alloc_slice_copy(&[borrowed]),
ListSet => arena.alloc_slice_copy(&[owned, irrelevant, irrelevant]),
ListSetInPlace => arena.alloc_slice_copy(&[owned, irrelevant, irrelevant]),
ListGetUnsafe => arena.alloc_slice_copy(&[irrelevant, irrelevant]),
ListSingle => arena.alloc_slice_copy(&[irrelevant]),
ListRepeat => arena.alloc_slice_copy(&[irrelevant, irrelevant]),
ListReverse => arena.alloc_slice_copy(&[owned]),
ListConcat => arena.alloc_slice_copy(&[irrelevant, irrelevant]),
ListAppend => arena.alloc_slice_copy(&[owned, owned]),
ListPrepend => arena.alloc_slice_copy(&[owned, owned]),
ListJoin => arena.alloc_slice_copy(&[irrelevant]),
Eq | NotEq | And | Or | NumAdd | NumSub | NumMul | NumGt | NumGte | NumLt
| NumLte | NumDivUnchecked | NumRemUnchecked => {
arena.alloc_slice_copy(&[irrelevant, irrelevant])
}
NumAbs | NumNeg | NumSin | NumCos | NumSqrtUnchecked | NumRound
| NumToFloat | Not => arena.alloc_slice_copy(&[irrelevant]),
};
self.own_args_using_bools(args, ps);
}
Literal(_) | FunctionPointer(_, _) | RuntimeErrorFunction(_) => {}
}
@ -308,7 +376,7 @@ impl<'a> BorrowInfState<'a> {
self.collect_stmt(v);
self.param_set = old;
self.update_param_map(Key::JoinPoint(self.current_proc, *j));
self.update_param_map(Key::JoinPoint(*j));
self.collect_stmt(b);
}
@ -319,12 +387,7 @@ impl<'a> BorrowInfState<'a> {
self.preserve_tail_call(*x, v, b);
}
Jump(j, ys) => {
let ps = self
.param_map
.items
.get(&Key::JoinPoint(self.current_proc, *j))
.unwrap()
.clone();
let ps = self.param_map.get_join_point(*j).clone();
// for making sure the join point can reuse
self.own_args_using_params(ys, ps);

View File

@ -114,7 +114,11 @@ pub fn occuring_variables_expr(expr: &Expr<'_>, result: &mut MutSet<Symbol>) {
result.extend(arguments.iter().copied());
}
RunLowLevel(_, _) | EmptyArray | RuntimeErrorFunction(_) | Literal(_) => {}
RunLowLevel(_, args) => {
result.extend(args.iter());
}
EmptyArray | RuntimeErrorFunction(_) | Literal(_) => {}
}
}
@ -433,54 +437,25 @@ impl<'a> Context<'a> {
self.arena.alloc(Stmt::Let(z, v, l, b))
}
RunLowLevel(_, _) => {
// THEORY: runlowlevel only occurs
//
// - in a custom hard-coded function
// - when we insert them as compiler authors
//
// if we're carefule to only use RunLowLevel for non-rc'd types
// (e.g. when building a cond/switch, we check equality on integers, and to boolean and)
// then RunLowLevel should not change in any way the refcounts.
// let b = self.add_dec_after_application(ys, ps, b, b_live_vars);
RunLowLevel(_op, _args) => {
// Assumption: we never need to modify the refcount for these
self.arena.alloc(Stmt::Let(z, v, l, b))
}
FunctionCall {
args: ys,
call_type,
arg_layouts,
..
} => {
// this is where the borrow signature would come in
//let ps := (getDecl ctx f).params;
use crate::ir::CallType;
use crate::layout::Builtin;
let symbol = match call_type {
CallType::ByName(s) => s,
CallType::ByPointer(s) => s,
};
let symbol = call_type.into_inner();
let ps = Vec::from_iter_in(
arg_layouts.iter().map(|layout| {
let borrow = match layout {
Layout::Builtin(Builtin::List(_, _)) => true,
_ => false,
};
Param {
symbol,
borrow,
layout: layout.clone(),
}
}),
self.arena,
)
.into_bump_slice();
// get the borrow signature
let ps = self.param_map.get_symbol(symbol);
let b = self.add_dec_after_application(ys, ps, b, b_live_vars);
self.arena.alloc(Stmt::Let(z, v, l, b))
let b = self.arena.alloc(Stmt::Let(z, v, l, b));
self.add_inc_before(ys, ps, b, b_live_vars)
}
EmptyArray | FunctionPointer(_, _) | Literal(_) | RuntimeErrorFunction(_) => {
@ -496,13 +471,14 @@ impl<'a> Context<'a> {
fn update_var_info(&self, symbol: Symbol, layout: &Layout<'a>, expr: &Expr<'a>) -> Self {
let mut ctx = self.clone();
// TODO actually make these non-constant
// can this type be reference-counted at runtime?
let reference = layout.contains_refcounted();
// is this value a constant?
let persistent = false;
let persistent = match expr {
Expr::FunctionCall { args, .. } => args.is_empty(),
_ => false,
};
// must this value be consumed?
let consume = consume_expr(&ctx.vars, expr);
@ -884,20 +860,7 @@ pub fn visit_declaration<'a>(
pub fn visit_proc<'a>(arena: &'a Bump, param_map: &'a ParamMap<'a>, proc: &mut Proc<'a>) {
let ctx = Context::new(arena, param_map);
if proc.name.is_builtin() {
// we must take care of our own refcounting in builtins
return;
}
let params = Vec::from_iter_in(
proc.args.iter().map(|(layout, symbol)| Param {
symbol: *symbol,
layout: layout.clone(),
borrow: layout.contains_refcounted(),
}),
arena,
)
.into_bump_slice();
let params = param_map.get_symbol(proc.name);
let stmt = arena.alloc(proc.body.clone());
let ctx = ctx.update_var_info_with_params(params);

View File

@ -126,6 +126,33 @@ impl<'a> Procs<'a> {
result
}
pub fn get_specialized_procs_help(
self,
arena: &'a Bump,
) -> (
MutMap<(Symbol, Layout<'a>), Proc<'a>>,
&'a crate::borrow::ParamMap<'a>,
) {
let mut result = MutMap::with_capacity_and_hasher(self.specialized.len(), default_hasher());
for (key, in_prog_proc) in self.specialized.into_iter() {
match in_prog_proc {
InProgress => unreachable!("The procedure {:?} should have be done by now", key),
Done(proc) => {
result.insert(key, proc);
}
}
}
let borrow_params = arena.alloc(crate::borrow::infer_borrow(arena, &result));
for (_, proc) in result.iter_mut() {
crate::inc_dec::visit_proc(arena, borrow_params, proc);
}
(result, borrow_params)
}
// TODO trim down these arguments!
#[allow(clippy::too_many_arguments)]
pub fn insert_named(

View File

@ -22,6 +22,7 @@ pub enum Layout<'a> {
Builtin(Builtin<'a>),
Struct(&'a [Layout<'a>]),
Union(&'a [&'a [Layout<'a>]]),
RecursiveUnion(&'a [&'a [Layout<'a>]]),
/// A function. The types of its arguments, then the type of its return value.
FunctionPointer(&'a [Layout<'a>], &'a Layout<'a>),
Pointer(&'a Layout<'a>),
@ -96,6 +97,10 @@ impl<'a> Layout<'a> {
Union(tags) => tags
.iter()
.all(|tag_layout| tag_layout.iter().all(|field| field.safe_to_memcpy())),
RecursiveUnion(_) => {
// a recursive union will always contain a pointer, and are thus not safe to memcpy
false
}
FunctionPointer(_, _) => {
// Function pointers are immutable and can always be safely copied
true
@ -138,6 +143,16 @@ impl<'a> Layout<'a> {
})
.max()
.unwrap_or_default(),
RecursiveUnion(fields) => fields
.iter()
.map(|tag_layout| {
tag_layout
.iter()
.map(|field| field.stack_size(pointer_size))
.sum()
})
.max()
.unwrap_or_default(),
FunctionPointer(_, _) => pointer_size,
Pointer(_) => pointer_size,
}
@ -146,6 +161,7 @@ impl<'a> Layout<'a> {
pub fn is_refcounted(&self) -> bool {
match self {
Layout::Builtin(Builtin::List(_, _)) => true,
Layout::RecursiveUnion(_) => true,
_ => false,
}
}
@ -164,6 +180,7 @@ impl<'a> Layout<'a> {
.map(|ls| ls.iter())
.flatten()
.any(|f| f.is_refcounted()),
RecursiveUnion(_) => true,
FunctionPointer(_, _) | Pointer(_) => false,
}
}
@ -406,8 +423,41 @@ fn layout_from_flat_type<'a>(
Ok(layout_from_tag_union(arena, tags, subs))
}
RecursiveTagUnion(_rec_var, _tags, _ext_var) => {
panic!("TODO make Layout for empty RecursiveTagUnion");
RecursiveTagUnion(_rec_var, _tags, ext_var) => {
debug_assert!(ext_var_is_empty_tag_union(subs, ext_var));
// some observations
//
// * recursive tag unions are always recursive
// * therefore at least one tag has a pointer (non-zero sized) field
// * they must (to be instantiated) have 2 or more tags
//
// That means none of the optimizations for enums or single tag tag unions apply
// let rec_var = subs.get_root_key_without_compacting(rec_var);
// let mut tag_layouts = Vec::with_capacity_in(tags.len(), arena);
//
// // tags: MutMap<TagName, std::vec::Vec<Variable>>,
// for (_name, variables) in tags {
// let mut tag_layout = Vec::with_capacity_in(variables.len(), arena);
//
// for var in variables {
// // TODO does this still cause problems with mutually recursive unions?
// if rec_var == subs.get_root_key_without_compacting(var) {
// // TODO make this a pointer?
// continue;
// }
//
// let var_content = subs.get_without_compacting(var).content;
//
// tag_layout.push(Layout::new(arena, var_content, subs)?);
// }
//
// tag_layouts.push(tag_layout.into_bump_slice());
// }
//
// Ok(Layout::RecursiveUnion(tag_layouts.into_bump_slice()))
Ok(Layout::RecursiveUnion(&[]))
}
EmptyTagUnion => {
panic!("TODO make Layout for empty Tag Union");

View File

@ -66,18 +66,18 @@ mod test_mono {
// let mono_expr = Expr::new(&mut mono_env, loc_expr.value, &mut procs);
let procs = roc_mono::ir::specialize_all(&mut mono_env, procs, &mut LayoutCache::default());
// apply inc/dec
let param_map = mono_env.arena.alloc(roc_mono::borrow::ParamMap::default());
let stmt = mono_env.arena.alloc(ir_expr);
let ir_expr = roc_mono::inc_dec::visit_declaration(mono_env.arena, param_map, stmt);
assert_eq!(
procs.runtime_errors,
roc_collections::all::MutMap::default()
);
let (procs, param_map) = procs.get_specialized_procs_help(mono_env.arena);
// apply inc/dec
let stmt = mono_env.arena.alloc(ir_expr);
let ir_expr = roc_mono::inc_dec::visit_declaration(mono_env.arena, param_map, stmt);
let mut procs_string = procs
.get_specialized_procs(mono_env.arena)
.values()
.map(|proc| proc.to_pretty(200))
.collect::<Vec<_>>();
@ -1250,42 +1250,108 @@ mod test_mono {
)
}
#[test]
#[ignore]
fn is_nil() {
compiles_to_ir(
r#"
isNil : List a -> Bool
isNil = \list ->
when List.isEmpty list is
True -> True
False -> False
ConsList a : [ Cons a (ConsList a), Nil ]
isNil [ 1, 3, 4 ]
isNil : ConsList a -> Bool
isNil = \list ->
when list is
Nil -> True
Cons _ _ -> False
isNil (Cons 0x2 Nil)
"#,
indoc!(
r#"
procedure List.2 (#Attr.2):
let Test.16 = 0i64;
let Test.17 = lowlevel ListLen #Attr.2;
let Test.15 = lowlevel Eq Test.16 Test.17;
ret Test.15;
procedure Test.0 (Test.2):
let Test.8 = CallByName List.2 Test.2;
let Test.12 = true;
procedure Test.1 (Test.3):
let Test.13 = true;
let Test.14 = lowlevel Eq Test.13 Test.8;
let Test.11 = lowlevel And Test.14 Test.12;
if Test.11 then
let Test.9 = true;
ret Test.9;
else
let Test.10 = false;
let Test.15 = Index 0 Test.3;
let Test.14 = 1i64;
let Test.16 = lowlevel Eq Test.14 Test.15;
let Test.12 = lowlevel And Test.16 Test.13;
if Test.12 then
let Test.10 = true;
ret Test.10;
else
let Test.11 = false;
ret Test.11;
let Test.6 = 0i64;
let Test.7 = 2i64;
let Test.9 = 1i64;
let Test.8 = Nil Test.9;
let Test.5 = Cons Test.6 Test.7 Test.8;
let Test.4 = CallByName Test.1 Test.5;
ret Test.4;
"#
),
)
}
#[ignore]
fn has_none() {
compiles_to_ir(
r#"
Maybe a : [ Just a, Nothing ]
ConsList a : [ Cons a (ConsList a), Nil ]
hasNone : ConsList (Maybe a) -> Bool
hasNone = \list ->
when list is
Nil -> False
Cons Nothing _ -> True
Cons (Just _) xs -> hasNone xs
hasNone (Cons (Just 3) Nil)
"#,
indoc!(
r#"
procedure Test.1 (Test.3):
let Test.13 = true;
let Test.15 = Index 0 Test.3;
let Test.14 = 1i64;
let Test.16 = lowlevel Eq Test.14 Test.15;
let Test.12 = lowlevel And Test.16 Test.13;
if Test.12 then
let Test.10 = true;
ret Test.10;
else
let Test.11 = false;
ret Test.11;
let Test.6 = 0i64;
let Test.7 = 2i64;
let Test.9 = 1i64;
let Test.8 = Nil Test.9;
let Test.5 = Cons Test.6 Test.7 Test.8;
let Test.4 = CallByName Test.1 Test.5;
ret Test.4;
"#
),
)
}
#[test]
fn mk_pair_of() {
compiles_to_ir(
r#"
mkPairOf = \x -> Pair x x
mkPairOf [1,2,3]
"#,
indoc!(
r#"
procedure Test.0 (Test.2):
inc Test.2;
let Test.8 = Struct {Test.2, Test.2};
ret Test.8;
let Test.5 = 1i64;
let Test.6 = 3i64;
let Test.7 = 4i64;
let Test.6 = 2i64;
let Test.7 = 3i64;
let Test.4 = Array [Test.5, Test.6, Test.7];
let Test.3 = CallByName Test.0 Test.4;
dec Test.4;
@ -1294,4 +1360,65 @@ mod test_mono {
),
)
}
#[test]
fn fst() {
compiles_to_ir(
r#"
fst = \x, y -> x
fst [1,2,3] [3,2,1]
"#,
indoc!(
r#"
"#
),
)
}
#[test]
fn list_cannot_update_inplace() {
compiles_to_ir(
indoc!(
r#"
main = \{} ->
x : List Int
x = [1,2,3]
add : List Int -> List Int
add = \y -> List.set y 0 0
List.len (add x) + List.len x
main {}
"#
),
indoc!(
r#"
procedure Test.1 (Test.3):
let Test.9 = 0i64;
let Test.10 = 0i64;
let Test.8 = CallByName List.4 Test.3 Test.9 Test.10;
ret Test.8;
procedure List.4 (#Attr.2, #Attr.3, #Attr.4):
let Test.14 = lowlevel ListLen #Attr.2;
let Test.12 = lowlevel NumLt #Attr.3 Test.14;
if Test.12 then
let Test.13 = lowlevel ListSet #Attr.2 #Attr.3 #Attr.4;
ret Test.13;
else
ret #Attr.2;
let Test.5 = 1i64;
let Test.6 = 2i64;
let Test.7 = 3i64;
let Test.0 = Array [Test.5, Test.6, Test.7];
let Test.4 = CallByName Test.1 Test.0;
dec Test.0;
ret Test.4;
"#
),
)
}
}

View File

@ -57,9 +57,10 @@ quicksort = \originalList ->
result = quicksortHelp originalList 0 (List.len originalList - 1)
# Absolutely make the `originalList` Shared by using it again here
if List.len (List.set originalList 0 3) > 3 then
if List.len originalList > 3 then
result
else
result
# Absolutely make the `originalList` Shared by using it again here
# but this branch is not evaluated, so should not affect performance
List.set originalList 0 (List.len originalList)

View File

@ -7,7 +7,7 @@ extern "C" {
fn quicksort(list: &[i64]) -> Box<[i64]>;
}
const NUM_NUMS: usize = 1_00;
const NUM_NUMS: usize = 1_000_000;
pub fn main() {
let nums = {