2019-08-18 18:49:20 +03:00
|
|
|
#[cfg(windows)]
|
|
|
|
use crate::os::windows::event::EventHandle;
|
2019-08-18 19:27:41 +03:00
|
|
|
#[cfg(target_os = "macos")]
|
|
|
|
use core_foundation::runloop::*;
|
2020-01-17 10:27:54 +03:00
|
|
|
use promise::SpawnFunc;
|
2019-08-18 18:49:20 +03:00
|
|
|
use std::collections::VecDeque;
|
|
|
|
use std::sync::{Arc, Mutex};
|
2020-01-04 19:52:50 +03:00
|
|
|
use std::time::Instant;
|
2019-08-18 19:30:58 +03:00
|
|
|
#[cfg(all(unix, not(target_os = "macos")))]
|
|
|
|
use {
|
|
|
|
filedescriptor::{FileDescriptor, Pipe},
|
|
|
|
mio::unix::EventedFd,
|
|
|
|
mio::{Evented, Poll, PollOpt, Ready, Token},
|
|
|
|
std::os::unix::io::AsRawFd,
|
|
|
|
};
|
2019-08-18 18:49:20 +03:00
|
|
|
|
|
|
|
lazy_static::lazy_static! {
|
|
|
|
pub(crate) static ref SPAWN_QUEUE: Arc<SpawnQueue> = Arc::new(SpawnQueue::new().expect("failed to create SpawnQueue"));
|
|
|
|
}
|
|
|
|
|
2020-01-04 19:52:50 +03:00
|
|
|
struct InstrumentedSpawnFunc {
|
|
|
|
func: SpawnFunc,
|
|
|
|
at: Instant,
|
|
|
|
}
|
|
|
|
|
2019-08-18 18:49:20 +03:00
|
|
|
pub(crate) struct SpawnQueue {
|
2020-01-04 19:52:50 +03:00
|
|
|
spawned_funcs: Mutex<VecDeque<InstrumentedSpawnFunc>>,
|
|
|
|
spawned_funcs_low_pri: Mutex<VecDeque<InstrumentedSpawnFunc>>,
|
2019-08-18 18:57:07 +03:00
|
|
|
|
|
|
|
#[cfg(windows)]
|
2019-08-18 18:49:20 +03:00
|
|
|
pub event_handle: EventHandle,
|
|
|
|
|
2019-08-18 18:57:07 +03:00
|
|
|
#[cfg(all(unix, not(target_os = "macos")))]
|
2019-08-18 18:49:20 +03:00
|
|
|
write: Mutex<FileDescriptor>,
|
2019-08-18 18:57:07 +03:00
|
|
|
#[cfg(all(unix, not(target_os = "macos")))]
|
2019-08-18 18:49:20 +03:00
|
|
|
read: Mutex<FileDescriptor>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl SpawnQueue {
|
2019-12-15 08:43:05 +03:00
|
|
|
pub fn new() -> anyhow::Result<Self> {
|
2019-08-18 18:49:20 +03:00
|
|
|
Self::new_impl()
|
|
|
|
}
|
|
|
|
|
2020-01-16 14:39:44 +03:00
|
|
|
pub fn register_promise_schedulers(&self) {
|
|
|
|
promise::spawn::set_schedulers(
|
|
|
|
Box::new(|task| {
|
2020-01-17 10:27:54 +03:00
|
|
|
SPAWN_QUEUE.spawn_impl(Box::new(move || task.run()), true);
|
2020-01-16 14:39:44 +03:00
|
|
|
}),
|
|
|
|
Box::new(|low_pri_task| {
|
2020-01-17 10:27:54 +03:00
|
|
|
SPAWN_QUEUE.spawn_impl(Box::new(move || low_pri_task.run()), false);
|
2020-01-16 14:39:44 +03:00
|
|
|
}),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
fix a starvation issue on linux/x11 systems
The `SpawnQueue::run_impl` would loop until it had exhausted
all queued items. This prevents returning to the main loop
and resulted in the UI hanging while eg: `yes` was running,
and could also block accepting keyboard input, which is
pretty bad.
In addition, the queue implementation could fill up a pipe
and block the write side while it held a lock, which in
turn would prevent the read side from making room for the
write to succeed!
This commit changes the behavior on linux to change the wakeup
behavior of the queue from having a 1:1 relationship between
enqueue:wakeup to n:m where n and m are both >= 1. This is
sufficient to wake a sleeping gui thread. The gui thread
can then pop and process a single item at a time, interleaved
with dispatching the gui events.
The result is a bit more responsive, however, there is no
backpressure from the gui to the read side, so if the read
side is eating 2MB/s of data and the GUI side is processing
less than this, then an interrupt signal may still take a
few seconds to take effect.
I have mixed feelings about adding backpressure, because
I'm not sure that it is worth actually rendering all of
the parsed output text when there is a lot of it.
I need to follow up and verify these changes on macOS
and Windows too.
Refs: https://github.com/wez/wezterm/issues/65
2019-11-22 03:43:08 +03:00
|
|
|
pub fn run(&self) -> bool {
|
2019-08-18 18:49:20 +03:00
|
|
|
self.run_impl()
|
|
|
|
}
|
2019-08-18 18:57:07 +03:00
|
|
|
|
|
|
|
// This needs to be a separate function from the loop in `run`
|
|
|
|
// in order for the lock to be released before we call the
|
|
|
|
// returned function
|
|
|
|
fn pop_func(&self) -> Option<SpawnFunc> {
|
2019-11-22 02:44:21 +03:00
|
|
|
if let Some(func) = self.spawned_funcs.lock().unwrap().pop_front() {
|
2020-01-04 19:52:50 +03:00
|
|
|
metrics::value!("executor.spawn_delay", func.at.elapsed());
|
|
|
|
Some(func.func)
|
|
|
|
} else if let Some(func) = self.spawned_funcs_low_pri.lock().unwrap().pop_front() {
|
|
|
|
metrics::value!("executor.spawn_delay.low_pri", func.at.elapsed());
|
|
|
|
Some(func.func)
|
2019-11-22 02:44:21 +03:00
|
|
|
} else {
|
2020-01-04 19:52:50 +03:00
|
|
|
None
|
2019-11-22 02:44:21 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn queue_func(&self, f: SpawnFunc, high_pri: bool) {
|
2020-01-04 19:52:50 +03:00
|
|
|
let f = InstrumentedSpawnFunc {
|
|
|
|
func: f,
|
|
|
|
at: Instant::now(),
|
|
|
|
};
|
2019-11-22 02:44:21 +03:00
|
|
|
if high_pri {
|
|
|
|
self.spawned_funcs.lock().unwrap()
|
|
|
|
} else {
|
|
|
|
self.spawned_funcs_low_pri.lock().unwrap()
|
|
|
|
}
|
|
|
|
.push_back(f);
|
2019-08-18 18:57:07 +03:00
|
|
|
}
|
2019-11-22 05:18:50 +03:00
|
|
|
|
|
|
|
fn has_any_queued(&self) -> bool {
|
2019-11-24 18:20:41 +03:00
|
|
|
!self.spawned_funcs.lock().unwrap().is_empty()
|
|
|
|
|| !self.spawned_funcs_low_pri.lock().unwrap().is_empty()
|
2019-11-22 05:18:50 +03:00
|
|
|
}
|
2019-08-18 18:49:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(windows)]
|
|
|
|
impl SpawnQueue {
|
2019-12-15 08:43:05 +03:00
|
|
|
fn new_impl() -> anyhow::Result<Self> {
|
2019-08-18 18:49:20 +03:00
|
|
|
let spawned_funcs = Mutex::new(VecDeque::new());
|
2019-11-22 05:31:02 +03:00
|
|
|
let spawned_funcs_low_pri = Mutex::new(VecDeque::new());
|
2019-08-18 18:49:20 +03:00
|
|
|
let event_handle = EventHandle::new_manual_reset().expect("EventHandle creation failed");
|
|
|
|
Ok(Self {
|
|
|
|
spawned_funcs,
|
2019-11-22 05:31:02 +03:00
|
|
|
spawned_funcs_low_pri,
|
2019-08-18 18:49:20 +03:00
|
|
|
event_handle,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-11-22 02:44:21 +03:00
|
|
|
fn spawn_impl(&self, f: SpawnFunc, high_pri: bool) {
|
|
|
|
self.queue_func(f, high_pri);
|
2019-08-18 18:49:20 +03:00
|
|
|
self.event_handle.set_event();
|
|
|
|
}
|
|
|
|
|
fix a starvation issue on linux/x11 systems
The `SpawnQueue::run_impl` would loop until it had exhausted
all queued items. This prevents returning to the main loop
and resulted in the UI hanging while eg: `yes` was running,
and could also block accepting keyboard input, which is
pretty bad.
In addition, the queue implementation could fill up a pipe
and block the write side while it held a lock, which in
turn would prevent the read side from making room for the
write to succeed!
This commit changes the behavior on linux to change the wakeup
behavior of the queue from having a 1:1 relationship between
enqueue:wakeup to n:m where n and m are both >= 1. This is
sufficient to wake a sleeping gui thread. The gui thread
can then pop and process a single item at a time, interleaved
with dispatching the gui events.
The result is a bit more responsive, however, there is no
backpressure from the gui to the read side, so if the read
side is eating 2MB/s of data and the GUI side is processing
less than this, then an interrupt signal may still take a
few seconds to take effect.
I have mixed feelings about adding backpressure, because
I'm not sure that it is worth actually rendering all of
the parsed output text when there is a lot of it.
I need to follow up and verify these changes on macOS
and Windows too.
Refs: https://github.com/wez/wezterm/issues/65
2019-11-22 03:43:08 +03:00
|
|
|
fn run_impl(&self) -> bool {
|
2019-08-18 18:49:20 +03:00
|
|
|
self.event_handle.reset_event();
|
2019-08-18 18:57:07 +03:00
|
|
|
while let Some(func) = self.pop_func() {
|
|
|
|
func();
|
2019-08-18 18:49:20 +03:00
|
|
|
}
|
2019-11-22 05:18:50 +03:00
|
|
|
self.has_any_queued()
|
2019-08-18 18:49:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(all(unix, not(target_os = "macos")))]
|
|
|
|
impl SpawnQueue {
|
2019-12-15 08:43:05 +03:00
|
|
|
fn new_impl() -> anyhow::Result<Self> {
|
fix a starvation issue on linux/x11 systems
The `SpawnQueue::run_impl` would loop until it had exhausted
all queued items. This prevents returning to the main loop
and resulted in the UI hanging while eg: `yes` was running,
and could also block accepting keyboard input, which is
pretty bad.
In addition, the queue implementation could fill up a pipe
and block the write side while it held a lock, which in
turn would prevent the read side from making room for the
write to succeed!
This commit changes the behavior on linux to change the wakeup
behavior of the queue from having a 1:1 relationship between
enqueue:wakeup to n:m where n and m are both >= 1. This is
sufficient to wake a sleeping gui thread. The gui thread
can then pop and process a single item at a time, interleaved
with dispatching the gui events.
The result is a bit more responsive, however, there is no
backpressure from the gui to the read side, so if the read
side is eating 2MB/s of data and the GUI side is processing
less than this, then an interrupt signal may still take a
few seconds to take effect.
I have mixed feelings about adding backpressure, because
I'm not sure that it is worth actually rendering all of
the parsed output text when there is a lot of it.
I need to follow up and verify these changes on macOS
and Windows too.
Refs: https://github.com/wez/wezterm/issues/65
2019-11-22 03:43:08 +03:00
|
|
|
// On linux we have a slightly sloppy wakeup mechanism;
|
|
|
|
// we have a non-blocking pipe that we can use to get
|
|
|
|
// woken up after some number of enqueues. We don't
|
|
|
|
// guarantee a 1:1 enqueue to wakeup with this mechanism
|
|
|
|
// but in practical terms it does guarantee a wakeup
|
|
|
|
// if the main thread is asleep and we enqueue some
|
|
|
|
// number of items.
|
|
|
|
// We can't affort to use a blocking pipe for the wakeup
|
|
|
|
// because the write needs to hold a mutex and that
|
|
|
|
// can block reads as well as other writers.
|
2020-01-16 08:30:14 +03:00
|
|
|
let mut pipe = Pipe::new()?;
|
|
|
|
pipe.write.set_non_blocking(true)?;
|
|
|
|
pipe.read.set_non_blocking(true)?;
|
2019-08-18 18:49:20 +03:00
|
|
|
Ok(Self {
|
|
|
|
spawned_funcs: Mutex::new(VecDeque::new()),
|
2019-11-22 02:44:21 +03:00
|
|
|
spawned_funcs_low_pri: Mutex::new(VecDeque::new()),
|
2019-08-18 18:49:20 +03:00
|
|
|
write: Mutex::new(pipe.write),
|
|
|
|
read: Mutex::new(pipe.read),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-11-22 02:44:21 +03:00
|
|
|
fn spawn_impl(&self, f: SpawnFunc, high_pri: bool) {
|
2019-08-18 18:49:20 +03:00
|
|
|
use std::io::Write;
|
|
|
|
|
2019-11-22 02:44:21 +03:00
|
|
|
self.queue_func(f, high_pri);
|
2019-08-18 18:49:20 +03:00
|
|
|
self.write.lock().unwrap().write(b"x").ok();
|
|
|
|
}
|
|
|
|
|
fix a starvation issue on linux/x11 systems
The `SpawnQueue::run_impl` would loop until it had exhausted
all queued items. This prevents returning to the main loop
and resulted in the UI hanging while eg: `yes` was running,
and could also block accepting keyboard input, which is
pretty bad.
In addition, the queue implementation could fill up a pipe
and block the write side while it held a lock, which in
turn would prevent the read side from making room for the
write to succeed!
This commit changes the behavior on linux to change the wakeup
behavior of the queue from having a 1:1 relationship between
enqueue:wakeup to n:m where n and m are both >= 1. This is
sufficient to wake a sleeping gui thread. The gui thread
can then pop and process a single item at a time, interleaved
with dispatching the gui events.
The result is a bit more responsive, however, there is no
backpressure from the gui to the read side, so if the read
side is eating 2MB/s of data and the GUI side is processing
less than this, then an interrupt signal may still take a
few seconds to take effect.
I have mixed feelings about adding backpressure, because
I'm not sure that it is worth actually rendering all of
the parsed output text when there is a lot of it.
I need to follow up and verify these changes on macOS
and Windows too.
Refs: https://github.com/wez/wezterm/issues/65
2019-11-22 03:43:08 +03:00
|
|
|
fn run_impl(&self) -> bool {
|
|
|
|
// On linux we only ever process one at at time, so that
|
|
|
|
// we can return to the main loop and process messages
|
|
|
|
// from the X server
|
|
|
|
if let Some(func) = self.pop_func() {
|
2019-08-18 18:49:20 +03:00
|
|
|
func();
|
|
|
|
}
|
2019-11-23 01:06:17 +03:00
|
|
|
|
|
|
|
// try to drain the pipe.
|
|
|
|
// We do this regardless of whether we popped an item
|
|
|
|
// so that we avoid being in a perpetually signalled state.
|
|
|
|
// It is ok if we completely drain the pipe because the
|
|
|
|
// main loop uses the return value to set the sleep
|
|
|
|
// interval and will unconditionally call us on each
|
|
|
|
// iteration.
|
|
|
|
let mut byte = [0u8; 64];
|
|
|
|
use std::io::Read;
|
|
|
|
self.read.lock().unwrap().read(&mut byte).ok();
|
|
|
|
|
2019-11-22 05:18:50 +03:00
|
|
|
self.has_any_queued()
|
2019-08-18 18:49:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(all(unix, not(target_os = "macos")))]
|
|
|
|
impl Evented for SpawnQueue {
|
|
|
|
fn register(
|
|
|
|
&self,
|
|
|
|
poll: &Poll,
|
|
|
|
token: Token,
|
|
|
|
interest: Ready,
|
|
|
|
opts: PollOpt,
|
|
|
|
) -> std::io::Result<()> {
|
|
|
|
EventedFd(&self.read.lock().unwrap().as_raw_fd()).register(poll, token, interest, opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn reregister(
|
|
|
|
&self,
|
|
|
|
poll: &Poll,
|
|
|
|
token: Token,
|
|
|
|
interest: Ready,
|
|
|
|
opts: PollOpt,
|
|
|
|
) -> std::io::Result<()> {
|
|
|
|
EventedFd(&self.read.lock().unwrap().as_raw_fd()).reregister(poll, token, interest, opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn deregister(&self, poll: &Poll) -> std::io::Result<()> {
|
|
|
|
EventedFd(&self.read.lock().unwrap().as_raw_fd()).deregister(poll)
|
|
|
|
}
|
|
|
|
}
|
2019-08-18 18:57:07 +03:00
|
|
|
|
|
|
|
#[cfg(target_os = "macos")]
|
|
|
|
impl SpawnQueue {
|
2019-12-15 08:43:05 +03:00
|
|
|
fn new_impl() -> anyhow::Result<Self> {
|
2019-08-18 18:57:07 +03:00
|
|
|
let spawned_funcs = Mutex::new(VecDeque::new());
|
2019-11-22 05:31:02 +03:00
|
|
|
let spawned_funcs_low_pri = Mutex::new(VecDeque::new());
|
2019-08-18 18:57:07 +03:00
|
|
|
|
|
|
|
let observer = unsafe {
|
|
|
|
CFRunLoopObserverCreate(
|
|
|
|
std::ptr::null(),
|
|
|
|
kCFRunLoopAllActivities,
|
|
|
|
1,
|
|
|
|
0,
|
|
|
|
SpawnQueue::trigger,
|
|
|
|
std::ptr::null_mut(),
|
|
|
|
)
|
|
|
|
};
|
|
|
|
unsafe {
|
|
|
|
CFRunLoopAddObserver(CFRunLoopGetMain(), observer, kCFRunLoopCommonModes);
|
|
|
|
}
|
|
|
|
|
2019-11-22 05:31:02 +03:00
|
|
|
Ok(Self {
|
|
|
|
spawned_funcs,
|
|
|
|
spawned_funcs_low_pri,
|
|
|
|
})
|
2019-08-18 18:57:07 +03:00
|
|
|
}
|
|
|
|
|
2019-11-12 07:23:38 +03:00
|
|
|
extern "C" fn trigger(
|
|
|
|
_observer: *mut __CFRunLoopObserver,
|
|
|
|
_: CFRunLoopActivity,
|
|
|
|
_: *mut std::ffi::c_void,
|
|
|
|
) {
|
2019-11-22 04:32:58 +03:00
|
|
|
if SPAWN_QUEUE.run() {
|
2019-11-22 05:31:02 +03:00
|
|
|
Self::queue_wakeup();
|
2019-11-22 04:32:58 +03:00
|
|
|
}
|
2019-08-18 18:57:07 +03:00
|
|
|
}
|
|
|
|
|
2019-11-22 05:31:02 +03:00
|
|
|
fn queue_wakeup() {
|
2019-08-18 18:57:07 +03:00
|
|
|
unsafe {
|
|
|
|
CFRunLoopWakeUp(CFRunLoopGetMain());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-22 02:44:21 +03:00
|
|
|
fn spawn_impl(&self, f: SpawnFunc, high_pri: bool) {
|
|
|
|
self.queue_func(f, high_pri);
|
2019-11-22 05:31:02 +03:00
|
|
|
Self::queue_wakeup();
|
2019-11-22 04:32:58 +03:00
|
|
|
}
|
|
|
|
|
fix a starvation issue on linux/x11 systems
The `SpawnQueue::run_impl` would loop until it had exhausted
all queued items. This prevents returning to the main loop
and resulted in the UI hanging while eg: `yes` was running,
and could also block accepting keyboard input, which is
pretty bad.
In addition, the queue implementation could fill up a pipe
and block the write side while it held a lock, which in
turn would prevent the read side from making room for the
write to succeed!
This commit changes the behavior on linux to change the wakeup
behavior of the queue from having a 1:1 relationship between
enqueue:wakeup to n:m where n and m are both >= 1. This is
sufficient to wake a sleeping gui thread. The gui thread
can then pop and process a single item at a time, interleaved
with dispatching the gui events.
The result is a bit more responsive, however, there is no
backpressure from the gui to the read side, so if the read
side is eating 2MB/s of data and the GUI side is processing
less than this, then an interrupt signal may still take a
few seconds to take effect.
I have mixed feelings about adding backpressure, because
I'm not sure that it is worth actually rendering all of
the parsed output text when there is a lot of it.
I need to follow up and verify these changes on macOS
and Windows too.
Refs: https://github.com/wez/wezterm/issues/65
2019-11-22 03:43:08 +03:00
|
|
|
fn run_impl(&self) -> bool {
|
2019-11-22 04:32:58 +03:00
|
|
|
if let Some(func) = self.pop_func() {
|
2019-08-18 18:57:07 +03:00
|
|
|
func();
|
|
|
|
}
|
2019-11-22 05:18:50 +03:00
|
|
|
self.has_any_queued()
|
2019-08-18 18:57:07 +03:00
|
|
|
}
|
|
|
|
}
|