From 6ab1a49a41aa003c9846e21626d43b4a8c77535a Mon Sep 17 00:00:00 2001 From: ibaryshnikov Date: Thu, 27 Jun 2019 00:06:43 +0300 Subject: [PATCH] moved lib.rs to stable.rs in wasm-bindgen-futures, updated during review --- crates/futures/Cargo.toml | 3 +- crates/futures/src/atomics.rs | 30 +-- crates/futures/src/lib.rs | 334 ++------------------------ crates/futures/src/polyfill.rs | 58 ++--- crates/futures/src/stable.rs | 308 ++++++++++++++++++++++++ examples/raytrace-parallel/src/lib.rs | 2 +- 6 files changed, 350 insertions(+), 385 deletions(-) create mode 100644 crates/futures/src/stable.rs diff --git a/crates/futures/Cargo.toml b/crates/futures/Cargo.toml index 047264097..b07b78f17 100644 --- a/crates/futures/Cargo.toml +++ b/crates/futures/Cargo.toml @@ -11,6 +11,7 @@ version = "0.3.25" edition = "2018" [dependencies] +cfg-if = "0.1.9" futures = "0.1.20" js-sys = { path = "../js-sys", version = '0.3.25' } wasm-bindgen = { path = "../..", version = '0.2.48' } @@ -20,7 +21,7 @@ lazy_static = { version = "1.3.0", optional = true } [target.'cfg(target_feature = "atomics")'.dependencies.web-sys] path = "../web-sys" -version = "0.3.23" +version = "0.3.24" features = [ "MessageEvent", "Worker", diff --git a/crates/futures/src/atomics.rs b/crates/futures/src/atomics.rs index a5d57eac3..61481707d 100644 --- a/crates/futures/src/atomics.rs +++ b/crates/futures/src/atomics.rs @@ -10,16 +10,6 @@ use futures::sync::oneshot; use js_sys::{Function, Promise}; use wasm_bindgen::prelude::*; -macro_rules! console_log { - ($($t:tt)*) => (log(&format_args!($($t)*).to_string())) -} - -#[wasm_bindgen] -extern "C" { - #[wasm_bindgen(js_namespace = console)] - fn log(s: &str); -} - /// A Rust `Future` backed by a JavaScript `Promise`. /// /// This type is constructed with a JavaScript `Promise` object and translates @@ -210,13 +200,11 @@ fn _future_to_promise(future: Box>) impl Notify for Waker { fn notify(&self, _id: usize) { - console_log!("Waker notify"); if !self.notified.swap(true, Ordering::SeqCst) { - console_log!("Waker, inside if"); let _ = unsafe { core::arch::wasm32::atomic_notify( &self.value as *const AtomicI32 as *mut i32, - 0, + std::u32::MAX, // number of threads to notify ) }; } @@ -224,11 +212,9 @@ fn _future_to_promise(future: Box>) } fn poll_again(package: Arc) { - console_log!("poll_again called"); let me = match package.notified.replace(State::Notified) { // we need to schedule polling to resume, so keep going State::Waiting(me) => { - console_log!("poll_again Waiting"); me } @@ -236,7 +222,6 @@ fn _future_to_promise(future: Box>) // having now coalesced the notifications we return as it's // still someone else's job to process this State::Notified => { - console_log!("poll_again Notified"); return; } @@ -248,20 +233,17 @@ fn _future_to_promise(future: Box>) // so we bail out. // later see State::Polling => { - console_log!("poll_again Polling"); return; } }; - let value_location = &package.waker.value as *const AtomicI32 as u32 / 4; - // Use `Promise.then` on a resolved promise to place our execution // onto the next turn of the microtask queue, enqueueing our poll // operation. We don't currently poll immediately as it turns out // `futures` crate adapters aren't compatible with it and it also // helps avoid blowing the stack by accident. let promise = - crate::polyfill::wait_async(value_location, 0).expect("Should create a Promise"); + crate::polyfill::wait_async(&package.waker.value).expect("Should create a Promise"); let closure = Closure::once(Box::new(move |_| { Package::poll(&me); }) as Box); @@ -284,9 +266,7 @@ fn _future_to_promise(future: Box>) match me.notified.replace(State::Polling) { // We received a notification while previously polling, or // this is the initial poll. We've got work to do below! - State::Notified => { - console_log!("Package::poll Notified"); - } + State::Notified => {} // We've gone through this loop once and no notification was // received while we were executing work. That means we got @@ -296,8 +276,6 @@ fn _future_to_promise(future: Box>) // When the notification comes in it'll notify our task, see // our `Waiting` state, and resume the polling process State::Polling => { - console_log!("Package::poll Polling"); - me.notified.set(State::Waiting(me.clone())); poll_again(me.clone()); @@ -306,8 +284,6 @@ fn _future_to_promise(future: Box>) } State::Waiting(_) => { - console_log!("Package::poll Waiting"); - panic!("shouldn't see waiting state!") } } diff --git a/crates/futures/src/lib.rs b/crates/futures/src/lib.rs index e21c94ea5..2b6278f65 100644 --- a/crates/futures/src/lib.rs +++ b/crates/futures/src/lib.rs @@ -101,326 +101,28 @@ //! } //! ``` -#![feature(stdsimd)] +#![cfg_attr(target_feature = "atomics", feature(stdsimd))] #![deny(missing_docs)] -#[cfg(feature = "futures_0_3")] -/// Contains a Futures 0.3 implementation of this crate. -pub mod futures_0_3; +use cfg_if::cfg_if; -#[cfg(target_feature = "atomics")] -/// Contains a thread-safe version of this crate, with Futures 0.1 -pub mod atomics; +cfg_if! { + if #[cfg(target_feature = "atomics")] { + /// Contains a thread-safe version of this crate, with Futures 0.1 + pub mod atomics; -#[cfg(target_feature = "atomics")] -/// Polyfill for `Atomics.waitAsync` function -mod polyfill; + /// Polyfill for `Atomics.waitAsync` function + mod polyfill; -use std::cell::{Cell, RefCell}; -use std::fmt; -use std::rc::Rc; -use std::sync::Arc; + pub use atomics::*; + } else if #[cfg(feature = "futures_0_3")] { + /// Contains a Futures 0.3 implementation of this crate. + pub mod futures_0_3; -use futures::executor::{self, Notify, Spawn}; -use futures::future; -use futures::prelude::*; -use futures::sync::oneshot; -use js_sys::{Function, Promise}; -use wasm_bindgen::prelude::*; - -/// A Rust `Future` backed by a JavaScript `Promise`. -/// -/// This type is constructed with a JavaScript `Promise` object and translates -/// it to a Rust `Future`. This type implements the `Future` trait from the -/// `futures` crate and will either succeed or fail depending on what happens -/// with the JavaScript `Promise`. -/// -/// Currently this type is constructed with `JsFuture::from`. -pub struct JsFuture { - rx: oneshot::Receiver>, -} - -impl fmt::Debug for JsFuture { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "JsFuture {{ ... }}") - } -} - -impl From for JsFuture { - fn from(js: Promise) -> JsFuture { - // Use the `then` method to schedule two callbacks, one for the - // resolved value and one for the rejected value. We're currently - // assuming that JS engines will unconditionally invoke precisely one of - // these callbacks, no matter what. - // - // Ideally we'd have a way to cancel the callbacks getting invoked and - // free up state ourselves when this `JsFuture` is dropped. We don't - // have that, though, and one of the callbacks is likely always going to - // be invoked. - // - // As a result we need to make sure that no matter when the callbacks - // are invoked they are valid to be called at any time, which means they - // have to be self-contained. Through the `Closure::once` and some - // `Rc`-trickery we can arrange for both instances of `Closure`, and the - // `Rc`, to all be destroyed once the first one is called. - let (tx, rx) = oneshot::channel(); - let state = Rc::new(RefCell::new(None)); - let state2 = state.clone(); - let resolve = Closure::once(move |val| finish(&state2, Ok(val))); - let state2 = state.clone(); - let reject = Closure::once(move |val| finish(&state2, Err(val))); - - js.then2(&resolve, &reject); - *state.borrow_mut() = Some((tx, resolve, reject)); - - return JsFuture { rx }; - - fn finish( - state: &RefCell< - Option<( - oneshot::Sender>, - Closure, - Closure, - )>, - >, - val: Result, - ) { - match state.borrow_mut().take() { - // We don't have any guarantee that anyone's still listening at this - // point (the Rust `JsFuture` could have been dropped) so simply - // ignore any errors here. - Some((tx, _, _)) => drop(tx.send(val)), - None => wasm_bindgen::throw_str("cannot finish twice"), - } - } - } -} - -impl Future for JsFuture { - type Item = JsValue; - type Error = JsValue; - - fn poll(&mut self) -> Poll { - match self.rx.poll() { - Ok(Async::Ready(val)) => val.map(Async::Ready), - Ok(Async::NotReady) => Ok(Async::NotReady), - Err(_) => wasm_bindgen::throw_str("cannot cancel"), - } - } -} - -/// Converts a Rust `Future` into a JavaScript `Promise`. -/// -/// This function will take any future in Rust and schedule it to be executed, -/// returning a JavaScript `Promise` which can then be passed back to JavaScript -/// to get plumbed into the rest of a system. -/// -/// The `future` provided must adhere to `'static` because it'll be scheduled -/// to run in the background and cannot contain any stack references. The -/// returned `Promise` will be resolved or rejected when the future completes, -/// depending on whether it finishes with `Ok` or `Err`. -/// -/// # Panics -/// -/// Note that in wasm panics are currently translated to aborts, but "abort" in -/// this case means that a JavaScript exception is thrown. The wasm module is -/// still usable (likely erroneously) after Rust panics. -/// -/// If the `future` provided panics then the returned `Promise` **will not -/// resolve**. Instead it will be a leaked promise. This is an unfortunate -/// limitation of wasm currently that's hoped to be fixed one day! -pub fn future_to_promise(future: F) -> Promise -where - F: Future + 'static, -{ - _future_to_promise(Box::new(future)) -} - -// Implementation of actually transforming a future into a JavaScript `Promise`. -// -// The only primitive we have to work with here is `Promise::new`, which gives -// us two callbacks that we can use to either reject or resolve the promise. -// It's our job to ensure that one of those callbacks is called at the -// appropriate time. -// -// Now we know that JavaScript (in general) can't block and is largely -// notification/callback driven. That means that our future must either have -// synchronous computational work to do, or it's "scheduled a notification" to -// happen. These notifications are likely callbacks to get executed when things -// finish (like a different promise or something like `setTimeout`). The general -// idea here is thus to do as much synchronous work as we can and then otherwise -// translate notifications of a future's task into "let's poll the future!" -// -// This isn't necessarily the greatest future executor in the world, but it -// should get the job done for now hopefully. -fn _future_to_promise(future: Box>) -> Promise { - let mut future = Some(executor::spawn(future)); - return Promise::new(&mut |resolve, reject| { - Package::poll(&Arc::new(Package { - spawn: RefCell::new(future.take().unwrap()), - resolve, - reject, - notified: Cell::new(State::Notified), - })); - }); - - struct Package { - // Our "spawned future". This'll have everything we need to poll the - // future and continue to move it forward. - spawn: RefCell>>>, - - // The current state of this future, expressed in an enum below. This - // indicates whether we're currently polling the future, received a - // notification and need to keep polling, or if we're waiting for a - // notification to come in (and no one is polling). - notified: Cell, - - // Our two callbacks connected to the `Promise` that we returned to - // JavaScript. We'll be invoking one of these at the end. - resolve: Function, - reject: Function, - } - - // The possible states our `Package` (future) can be in, tracked internally - // and used to guide what happens when polling a future. - enum State { - // This future is currently and actively being polled. Attempting to - // access the future will result in a runtime panic and is considered a - // bug. - Polling, - - // This future has been notified, while it was being polled. This marker - // is used in the `Notify` implementation below, and indicates that a - // notification was received that the future is ready to make progress. - // If seen, however, it probably means that the future is also currently - // being polled. - Notified, - - // The future is blocked, waiting for something to happen. Stored here - // is a self-reference to the future itself so we can pull it out in - // `Notify` and continue polling. - // - // Note that the self-reference here is an Arc-cycle that will leak - // memory unless the future completes, but currently that should be ok - // as we'll have to stick around anyway while the future is executing! - // - // This state is removed as soon as a notification comes in, so the leak - // should only be "temporary" - Waiting(Arc), - } - - // No shared memory right now, wasm is single threaded, no need to worry - // about this! - unsafe impl Send for Package {} - unsafe impl Sync for Package {} - - impl Package { - // Move the future contained in `me` as far forward as we can. This will - // do as much synchronous work as possible to complete the future, - // ensuring that when it blocks we're scheduled to get notified via some - // callback somewhere at some point (vague, right?) - // - // TODO: this probably shouldn't do as much synchronous work as possible - // as it can starve other computations. Rather it should instead - // yield every so often with something like `setTimeout` with the - // timeout set to zero. - fn poll(me: &Arc) { - loop { - match me.notified.replace(State::Polling) { - // We received a notification while previously polling, or - // this is the initial poll. We've got work to do below! - State::Notified => {} - - // We've gone through this loop once and no notification was - // received while we were executing work. That means we got - // `NotReady` below and we're scheduled to receive a - // notification. Block ourselves and wait for later. - // - // When the notification comes in it'll notify our task, see - // our `Waiting` state, and resume the polling process - State::Polling => { - me.notified.set(State::Waiting(me.clone())); - break; - } - - State::Waiting(_) => panic!("shouldn't see waiting state!"), - } - - let (val, f) = match me.spawn.borrow_mut().poll_future_notify(me, 0) { - // If the future is ready, immediately call the - // resolve/reject callback and then return as we're done. - Ok(Async::Ready(value)) => (value, &me.resolve), - Err(value) => (value, &me.reject), - - // Otherwise keep going in our loop, if we weren't notified - // we'll break out and start waiting. - Ok(Async::NotReady) => continue, - }; - - drop(f.call1(&JsValue::undefined(), &val)); - break; - } - } - } - - impl Notify for Package { - fn notify(&self, _id: usize) { - let me = match self.notified.replace(State::Notified) { - // we need to schedule polling to resume, so keep going - State::Waiting(me) => me, - - // we were already notified, and were just notified again; - // having now coalesced the notifications we return as it's - // still someone else's job to process this - State::Notified => return, - - // the future was previously being polled, and we've just - // switched it to the "you're notified" state. We don't have - // access to the future as it's being polled, so the future - // polling process later sees this notification and will - // continue polling. For us, though, there's nothing else to do, - // so we bail out. - // later see - State::Polling => return, - }; - - // Use `Promise.then` on a resolved promise to place our execution - // onto the next turn of the microtask queue, enqueueing our poll - // operation. We don't currently poll immediately as it turns out - // `futures` crate adapters aren't compatible with it and it also - // helps avoid blowing the stack by accident. - // - // Note that the `Rc`/`RefCell` trick here is basically to just - // ensure that our `Closure` gets cleaned up appropriately. - let promise = Promise::resolve(&JsValue::undefined()); - let slot = Rc::new(RefCell::new(None)); - let slot2 = slot.clone(); - let closure = Closure::wrap(Box::new(move |_| { - let myself = slot2.borrow_mut().take(); - debug_assert!(myself.is_some()); - Package::poll(&me); - }) as Box); - promise.then(&closure); - *slot.borrow_mut() = Some(closure); - } - } -} - -/// Converts a Rust `Future` on a local task queue. -/// -/// The `future` provided must adhere to `'static` because it'll be scheduled -/// to run in the background and cannot contain any stack references. -/// -/// # Panics -/// -/// This function has the same panic behavior as `future_to_promise`. -pub fn spawn_local(future: F) -where - F: Future + 'static, -{ - future_to_promise( - future - .map(|()| JsValue::undefined()) - .or_else(|()| future::ok::(JsValue::undefined())), - ); + pub mod stable; + pub use stable::*; + } else { + pub mod stable; + pub use stable::*; + } } diff --git a/crates/futures/src/polyfill.rs b/crates/futures/src/polyfill.rs index b742a3c04..9951c85f3 100644 --- a/crates/futures/src/polyfill.rs +++ b/crates/futures/src/polyfill.rs @@ -37,27 +37,17 @@ */ use std::cell::RefCell; -use std::rc::Rc; +use std::sync::atomic::{AtomicI32, Ordering}; use js_sys::{ - encode_uri_component, Array, Atomics, Error, Function, Int32Array, JsString, Promise, Reflect, - SharedArrayBuffer, WebAssembly, + encode_uri_component, Array, Function, Int32Array, JsString, Promise, Reflect, + WebAssembly, }; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; use web_sys::{MessageEvent, Worker}; -macro_rules! console_log { - ($($t:tt)*) => (log(&format_args!($($t)*).to_string())) -} - -#[wasm_bindgen] -extern "C" { - #[wasm_bindgen(js_namespace = console)] - fn log(s: &str); -} - -const DEFAULT_TIMEOUT: f64 = 10.0; +const DEFAULT_TIMEOUT: f64 = std::f64::INFINITY; const HELPER_CODE: &'static str = " onmessage = function (ev) { @@ -65,7 +55,6 @@ onmessage = function (ev) { switch (ev.data[0]) { case 'wait': { let [_, ia, index, value, timeout] = ev.data; - console.log('wait event inside a worker'); let result = Atomics.wait(ia, index, value, timeout); postMessage(['ok', result]); break; @@ -75,17 +64,17 @@ onmessage = function (ev) { } } } catch (e) { - console.log('Exception in wait helper'); + console.log('Exception in wait helper', e); postMessage(['error', 'Exception']); } } "; thread_local! { - static HELPERS: RefCell>> = RefCell::new(vec![]); + static HELPERS: RefCell> = RefCell::new(vec![]); } -fn alloc_helper() -> Rc { +fn alloc_helper() -> Worker { HELPERS.with(|helpers| { if let Some(helper) = helpers.borrow_mut().pop() { return helper; @@ -95,18 +84,18 @@ fn alloc_helper() -> Rc { let encoded: String = encode_uri_component(HELPER_CODE).into(); initialization_string.push_str(&encoded); - return Rc::new(Worker::new(&initialization_string).expect("Should create a Worker")); + Worker::new(&initialization_string).expect("Should create a Worker") }) } -fn free_helper(helper: &Rc) { +fn free_helper(helper: Worker) { HELPERS.with(move |helpers| { helpers.borrow_mut().push(helper.clone()); }); } -pub fn wait_async(index: u32, value: i32) -> Result { - wait_async_with_timeout(index, value, DEFAULT_TIMEOUT) +pub fn wait_async(value: &AtomicI32) -> Result { + wait_async_with_timeout(value, DEFAULT_TIMEOUT) } fn get_array_item(array: &JsValue, index: u32) -> JsValue { @@ -118,7 +107,7 @@ fn get_array_item(array: &JsValue, index: u32) -> JsValue { // for parameter validation. The promise is resolved with a string as from // Atomics.wait, or, in the case something went completely wrong, it is // rejected with an error string. -pub fn wait_async_with_timeout(index: u32, value: i32, timeout: f64) -> Result { +pub fn wait_async_with_timeout(value: &AtomicI32, timeout: f64) -> Result { let memory_buffer = wasm_bindgen::memory() .dyn_into::() .expect("Should cast a memory to WebAssembly::Memory") @@ -126,30 +115,20 @@ pub fn wait_async_with_timeout(index: u32, value: i32, timeout: f64) -> Result

() { - console_log!("polyfill, not a SharedArrayBuffer"); - return Err(Error::new("Indexed array must be created from SharedArrayBuffer").into()); - } - - // Optimization, avoid the helper thread in this common case. - if Atomics::load(&indexed_array, index)? != value { - console_log!("polyfill, not-equal"); - return Ok(Promise::resolve(&JsString::from("not-equal"))); - } + let index = value as *const AtomicI32 as u32 / 4; + let value_i32 = value.load(Ordering::SeqCst); // General case, we must wait. - console_log!("polyfill, general case"); - Ok(Promise::new( &mut move |resolve: Function, reject: Function| { let helper = alloc_helper(); let helper_ref = helper.clone(); - let onmessage_callback = Closure::once_into_js(Box::new(move |e: MessageEvent| { + let onmessage_callback = Closure::once_into_js(move |e: MessageEvent| { // Free the helper early so that it can be reused if the resolution // needs a helper. - free_helper(&helper_ref); + free_helper(helper_ref); match String::from( get_array_item(&e.data(), 0) .as_string() @@ -172,8 +151,7 @@ pub fn wait_async_with_timeout(index: u32, value: i32, timeout: f64) -> Result

(), } - }) - as Box); + }); helper.set_onmessage(Some(onmessage_callback.as_ref().unchecked_ref())); // onmessage_callback.forget(); @@ -196,7 +174,7 @@ pub fn wait_async_with_timeout(index: u32, value: i32, timeout: f64) -> Result

>, +} + +impl fmt::Debug for JsFuture { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "JsFuture {{ ... }}") + } +} + +impl From for JsFuture { + fn from(js: Promise) -> JsFuture { + // Use the `then` method to schedule two callbacks, one for the + // resolved value and one for the rejected value. We're currently + // assuming that JS engines will unconditionally invoke precisely one of + // these callbacks, no matter what. + // + // Ideally we'd have a way to cancel the callbacks getting invoked and + // free up state ourselves when this `JsFuture` is dropped. We don't + // have that, though, and one of the callbacks is likely always going to + // be invoked. + // + // As a result we need to make sure that no matter when the callbacks + // are invoked they are valid to be called at any time, which means they + // have to be self-contained. Through the `Closure::once` and some + // `Rc`-trickery we can arrange for both instances of `Closure`, and the + // `Rc`, to all be destroyed once the first one is called. + let (tx, rx) = oneshot::channel(); + let state = Rc::new(RefCell::new(None)); + let state2 = state.clone(); + let resolve = Closure::once(move |val| finish(&state2, Ok(val))); + let state2 = state.clone(); + let reject = Closure::once(move |val| finish(&state2, Err(val))); + + js.then2(&resolve, &reject); + *state.borrow_mut() = Some((tx, resolve, reject)); + + return JsFuture { rx }; + + fn finish( + state: &RefCell< + Option<( + oneshot::Sender>, + Closure, + Closure, + )>, + >, + val: Result, + ) { + match state.borrow_mut().take() { + // We don't have any guarantee that anyone's still listening at this + // point (the Rust `JsFuture` could have been dropped) so simply + // ignore any errors here. + Some((tx, _, _)) => drop(tx.send(val)), + None => wasm_bindgen::throw_str("cannot finish twice"), + } + } + } +} + +impl Future for JsFuture { + type Item = JsValue; + type Error = JsValue; + + fn poll(&mut self) -> Poll { + match self.rx.poll() { + Ok(Async::Ready(val)) => val.map(Async::Ready), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err(_) => wasm_bindgen::throw_str("cannot cancel"), + } + } +} + +/// Converts a Rust `Future` into a JavaScript `Promise`. +/// +/// This function will take any future in Rust and schedule it to be executed, +/// returning a JavaScript `Promise` which can then be passed back to JavaScript +/// to get plumbed into the rest of a system. +/// +/// The `future` provided must adhere to `'static` because it'll be scheduled +/// to run in the background and cannot contain any stack references. The +/// returned `Promise` will be resolved or rejected when the future completes, +/// depending on whether it finishes with `Ok` or `Err`. +/// +/// # Panics +/// +/// Note that in wasm panics are currently translated to aborts, but "abort" in +/// this case means that a JavaScript exception is thrown. The wasm module is +/// still usable (likely erroneously) after Rust panics. +/// +/// If the `future` provided panics then the returned `Promise` **will not +/// resolve**. Instead it will be a leaked promise. This is an unfortunate +/// limitation of wasm currently that's hoped to be fixed one day! +pub fn future_to_promise(future: F) -> Promise + where + F: Future + 'static, +{ + _future_to_promise(Box::new(future)) +} + +// Implementation of actually transforming a future into a JavaScript `Promise`. +// +// The only primitive we have to work with here is `Promise::new`, which gives +// us two callbacks that we can use to either reject or resolve the promise. +// It's our job to ensure that one of those callbacks is called at the +// appropriate time. +// +// Now we know that JavaScript (in general) can't block and is largely +// notification/callback driven. That means that our future must either have +// synchronous computational work to do, or it's "scheduled a notification" to +// happen. These notifications are likely callbacks to get executed when things +// finish (like a different promise or something like `setTimeout`). The general +// idea here is thus to do as much synchronous work as we can and then otherwise +// translate notifications of a future's task into "let's poll the future!" +// +// This isn't necessarily the greatest future executor in the world, but it +// should get the job done for now hopefully. +fn _future_to_promise(future: Box>) -> Promise { + let mut future = Some(executor::spawn(future)); + return Promise::new(&mut |resolve, reject| { + Package::poll(&Arc::new(Package { + spawn: RefCell::new(future.take().unwrap()), + resolve, + reject, + notified: Cell::new(State::Notified), + })); + }); + + struct Package { + // Our "spawned future". This'll have everything we need to poll the + // future and continue to move it forward. + spawn: RefCell>>>, + + // The current state of this future, expressed in an enum below. This + // indicates whether we're currently polling the future, received a + // notification and need to keep polling, or if we're waiting for a + // notification to come in (and no one is polling). + notified: Cell, + + // Our two callbacks connected to the `Promise` that we returned to + // JavaScript. We'll be invoking one of these at the end. + resolve: Function, + reject: Function, + } + + // The possible states our `Package` (future) can be in, tracked internally + // and used to guide what happens when polling a future. + enum State { + // This future is currently and actively being polled. Attempting to + // access the future will result in a runtime panic and is considered a + // bug. + Polling, + + // This future has been notified, while it was being polled. This marker + // is used in the `Notify` implementation below, and indicates that a + // notification was received that the future is ready to make progress. + // If seen, however, it probably means that the future is also currently + // being polled. + Notified, + + // The future is blocked, waiting for something to happen. Stored here + // is a self-reference to the future itself so we can pull it out in + // `Notify` and continue polling. + // + // Note that the self-reference here is an Arc-cycle that will leak + // memory unless the future completes, but currently that should be ok + // as we'll have to stick around anyway while the future is executing! + // + // This state is removed as soon as a notification comes in, so the leak + // should only be "temporary" + Waiting(Arc), + } + + // No shared memory right now, wasm is single threaded, no need to worry + // about this! + unsafe impl Send for Package {} + unsafe impl Sync for Package {} + + impl Package { + // Move the future contained in `me` as far forward as we can. This will + // do as much synchronous work as possible to complete the future, + // ensuring that when it blocks we're scheduled to get notified via some + // callback somewhere at some point (vague, right?) + // + // TODO: this probably shouldn't do as much synchronous work as possible + // as it can starve other computations. Rather it should instead + // yield every so often with something like `setTimeout` with the + // timeout set to zero. + fn poll(me: &Arc) { + loop { + match me.notified.replace(State::Polling) { + // We received a notification while previously polling, or + // this is the initial poll. We've got work to do below! + State::Notified => {} + + // We've gone through this loop once and no notification was + // received while we were executing work. That means we got + // `NotReady` below and we're scheduled to receive a + // notification. Block ourselves and wait for later. + // + // When the notification comes in it'll notify our task, see + // our `Waiting` state, and resume the polling process + State::Polling => { + me.notified.set(State::Waiting(me.clone())); + break; + } + + State::Waiting(_) => panic!("shouldn't see waiting state!"), + } + + let (val, f) = match me.spawn.borrow_mut().poll_future_notify(me, 0) { + // If the future is ready, immediately call the + // resolve/reject callback and then return as we're done. + Ok(Async::Ready(value)) => (value, &me.resolve), + Err(value) => (value, &me.reject), + + // Otherwise keep going in our loop, if we weren't notified + // we'll break out and start waiting. + Ok(Async::NotReady) => continue, + }; + + drop(f.call1(&JsValue::undefined(), &val)); + break; + } + } + } + + impl Notify for Package { + fn notify(&self, _id: usize) { + let me = match self.notified.replace(State::Notified) { + // we need to schedule polling to resume, so keep going + State::Waiting(me) => me, + + // we were already notified, and were just notified again; + // having now coalesced the notifications we return as it's + // still someone else's job to process this + State::Notified => return, + + // the future was previously being polled, and we've just + // switched it to the "you're notified" state. We don't have + // access to the future as it's being polled, so the future + // polling process later sees this notification and will + // continue polling. For us, though, there's nothing else to do, + // so we bail out. + // later see + State::Polling => return, + }; + + // Use `Promise.then` on a resolved promise to place our execution + // onto the next turn of the microtask queue, enqueueing our poll + // operation. We don't currently poll immediately as it turns out + // `futures` crate adapters aren't compatible with it and it also + // helps avoid blowing the stack by accident. + // + // Note that the `Rc`/`RefCell` trick here is basically to just + // ensure that our `Closure` gets cleaned up appropriately. + let promise = Promise::resolve(&JsValue::undefined()); + let slot = Rc::new(RefCell::new(None)); + let slot2 = slot.clone(); + let closure = Closure::wrap(Box::new(move |_| { + let myself = slot2.borrow_mut().take(); + debug_assert!(myself.is_some()); + Package::poll(&me); + }) as Box); + promise.then(&closure); + *slot.borrow_mut() = Some(closure); + } + } +} + +/// Converts a Rust `Future` on a local task queue. +/// +/// The `future` provided must adhere to `'static` because it'll be scheduled +/// to run in the background and cannot contain any stack references. +/// +/// # Panics +/// +/// This function has the same panic behavior as `future_to_promise`. +pub fn spawn_local(future: F) + where + F: Future + 'static, +{ + future_to_promise( + future + .map(|()| JsValue::undefined()) + .or_else(|()| future::ok::(JsValue::undefined())), + ); +} diff --git a/examples/raytrace-parallel/src/lib.rs b/examples/raytrace-parallel/src/lib.rs index 32d96d344..8f99ed923 100644 --- a/examples/raytrace-parallel/src/lib.rs +++ b/examples/raytrace-parallel/src/lib.rs @@ -92,7 +92,7 @@ impl Scene { .map(move |_data| image_data(base, len, width, height).into()); Ok(RenderingScene { - promise: wasm_bindgen_futures::atomics::future_to_promise(done), + promise: wasm_bindgen_futures::future_to_promise(done), base, len, height,