Skip to content

Commit

Permalink
Switch to new futures
Browse files Browse the repository at this point in the history
  • Loading branch information
tomaka committed Jul 23, 2019
1 parent a4036f5 commit 8964804
Show file tree
Hide file tree
Showing 11 changed files with 1,927 additions and 5 deletions.
4 changes: 3 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,12 @@ repository = "https://github.com/tomaka/wasm-timer"

[dependencies]
futures-preview = "0.3.0-alpha"
parking_lot = "0.9"
pin-utils = "0.1.0-alpha.4"

[target.'cfg(any(target_arch = "wasm32"))'.dependencies]
js-sys = "0.3.14"
send_wrapper = "0.2"
wasm-bindgen = "0.2.37"
wasm-bindgen = { version = "0.2.37" }
wasm-bindgen-futures = { version = "0.3.25", features = ["futures_0_3"] }
web-sys = { version = "0.3.14", features = ["Performance", "Window"] }
625 changes: 625 additions & 0 deletions src/timer.rs

Large diffs are not rendered by default.

155 changes: 155 additions & 0 deletions src/timer/arc_list.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
//! An atomically managed intrusive linked list of `Arc` nodes

use std::marker;
use std::ops::Deref;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::sync::Arc;

pub struct ArcList<T> {
list: AtomicUsize,
_marker: marker::PhantomData<T>,
}

impl<T> ArcList<T> {
pub fn new() -> ArcList<T> {
ArcList {
list: AtomicUsize::new(0),
_marker: marker::PhantomData,
}
}

/// Pushes the `data` provided onto this list if it's not already enqueued
/// in this list.
///
/// If `data` is already enqueued in this list then this is a noop,
/// otherwise, the `data` here is pushed on the end of the list.
pub fn push(&self, data: &Arc<Node<T>>) -> Result<(), ()> {
if data.enqueued.swap(true, SeqCst) {
// note that even if our list is sealed off then the other end is
// still guaranteed to see us because we were previously enqueued.
return Ok(());
}
let mut head = self.list.load(SeqCst);
let node = Arc::into_raw(data.clone()) as usize;
loop {
// If we've been sealed off, abort and return an error
if head == 1 {
unsafe {
drop(Arc::from_raw(node as *mut Node<T>));
}
return Err(());
}

// Otherwise attempt to push this node
data.next.store(head, SeqCst);
match self.list.compare_exchange(head, node, SeqCst, SeqCst) {
Ok(_) => break Ok(()),
Err(new_head) => head = new_head,
}
}
}

/// Atomically empties this list, returning a new owned copy which can be
/// used to iterate over the entries.
pub fn take(&self) -> ArcList<T> {
let mut list = self.list.load(SeqCst);
loop {
if list == 1 {
break;
}
match self.list.compare_exchange(list, 0, SeqCst, SeqCst) {
Ok(_) => break,
Err(l) => list = l,
}
}
ArcList {
list: AtomicUsize::new(list),
_marker: marker::PhantomData,
}
}

/// Atomically empties this list and prevents further successful calls to
/// `push`.
pub fn take_and_seal(&self) -> ArcList<T> {
ArcList {
list: AtomicUsize::new(self.list.swap(1, SeqCst)),
_marker: marker::PhantomData,
}
}

/// Removes the head of the list of nodes, returning `None` if this is an
/// empty list.
pub fn pop(&mut self) -> Option<Arc<Node<T>>> {
let head = *self.list.get_mut();
if head == 0 || head == 1 {
return None;
}
let head = unsafe { Arc::from_raw(head as *const Node<T>) };
*self.list.get_mut() = head.next.load(SeqCst);
// At this point, the node is out of the list, so store `false` so we
// can enqueue it again and see further changes.
assert!(head.enqueued.swap(false, SeqCst));
Some(head)
}
}

impl<T> Drop for ArcList<T> {
fn drop(&mut self) {
while let Some(_) = self.pop() {
// ...
}
}
}

pub struct Node<T> {
next: AtomicUsize,
enqueued: AtomicBool,
data: T,
}

impl<T> Node<T> {
pub fn new(data: T) -> Node<T> {
Node {
next: AtomicUsize::new(0),
enqueued: AtomicBool::new(false),
data: data,
}
}
}

impl<T> Deref for Node<T> {
type Target = T;

fn deref(&self) -> &T {
&self.data
}
}

#[cfg(test)]
mod tests {
use super::*;

#[test]
fn smoke() {
let a = ArcList::new();
let n = Arc::new(Node::new(1));
assert!(a.push(&n).is_ok());

let mut l = a.take();
assert_eq!(**l.pop().unwrap(), 1);
assert!(l.pop().is_none());
}

#[test]
fn seal() {
let a = ArcList::new();
let n = Arc::new(Node::new(1));
let mut l = a.take_and_seal();
assert!(l.pop().is_none());
assert!(a.push(&n).is_err());

assert!(a.take().pop().is_none());
assert!(a.take_and_seal().pop().is_none());
}
}
208 changes: 208 additions & 0 deletions src/timer/delay.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,208 @@
//! Support for creating futures that represent timeouts.
//!
//! This module contains the `Delay` type which is a future that will resolve
//! at a particular point in the future.

use std::fmt;
use std::future::Future;
use std::io;
use std::pin::Pin;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::{Arc, Mutex};
use std::task::{Context, Poll};
use std::time::Duration;

use futures::task::AtomicWaker;

use crate::Instant;
use crate::timer::arc_list::Node;
use crate::timer::{ScheduledTimer, TimerHandle};

/// A future representing the notification that an elapsed duration has
/// occurred.
///
/// This is created through the `Delay::new` or `Delay::new_at` methods
/// indicating when the future should fire at. Note that these futures are not
/// intended for high resolution timers, but rather they will likely fire some
/// granularity after the exact instant that they're otherwise indicated to
/// fire at.
pub struct Delay {
state: Option<Arc<Node<ScheduledTimer>>>,
when: Instant,
}

impl Delay {
/// Creates a new future which will fire at `dur` time into the future.
///
/// The returned object will be bound to the default timer for this thread.
/// The default timer will be spun up in a helper thread on first use.
#[inline]
pub fn new(dur: Duration) -> Delay {
Delay::new_at(Instant::now() + dur)
}

/// Creates a new future which will fire at the time specified by `at`.
///
/// The returned object will be bound to the default timer for this thread.
/// The default timer will be spun up in a helper thread on first use.
#[inline]
pub fn new_at(at: Instant) -> Delay {
Delay::new_handle(at, Default::default())
}

/// Creates a new future which will fire at the time specified by `at`.
///
/// The returned instance of `Delay` will be bound to the timer specified by
/// the `handle` argument.
pub fn new_handle(at: Instant, handle: TimerHandle) -> Delay {
let inner = match handle.inner.upgrade() {
Some(i) => i,
None => {
return Delay {
state: None,
when: at,
}
}
};
let state = Arc::new(Node::new(ScheduledTimer {
at: Mutex::new(Some(at)),
state: AtomicUsize::new(0),
waker: AtomicWaker::new(),
inner: handle.inner,
slot: Mutex::new(None),
}));

// If we fail to actually push our node then we've become an inert
// timer, meaning that we'll want to immediately return an error from
// `poll`.
if inner.list.push(&state).is_err() {
return Delay {
state: None,
when: at,
};
}

inner.waker.wake();
Delay {
state: Some(state),
when: at,
}
}

/// Resets this timeout to an new timeout which will fire at the time
/// specified by `dur`.
///
/// This is equivalent to calling `reset_at` with `Instant::now() + dur`
#[inline]
pub fn reset(&mut self, dur: Duration) {
self.reset_at(Instant::now() + dur)
}

/// Resets this timeout to an new timeout which will fire at the time
/// specified by `at`.
///
/// This method is usable even of this instance of `Delay` has "already
/// fired". That is, if this future has resovled, calling this method means
/// that the future will still re-resolve at the specified instant.
///
/// If `at` is in the past then this future will immediately be resolved
/// (when `poll` is called).
///
/// Note that if any task is currently blocked on this future then that task
/// will be dropped. It is required to call `poll` again after this method
/// has been called to ensure tha ta task is blocked on this future.
#[inline]
pub fn reset_at(&mut self, at: Instant) {
self.when = at;
if self._reset(at).is_err() {
self.state = None
}
}

fn _reset(&mut self, at: Instant) -> Result<(), ()> {
let state = match self.state {
Some(ref state) => state,
None => return Err(()),
};
if let Some(timeouts) = state.inner.upgrade() {
let mut bits = state.state.load(SeqCst);
loop {
// If we've been invalidated, cancel this reset
if bits & 0b10 != 0 {
return Err(());
}
let new = bits.wrapping_add(0b100) & !0b11;
match state.state.compare_exchange(bits, new, SeqCst, SeqCst) {
Ok(_) => break,
Err(s) => bits = s,
}
}
*state.at.lock().unwrap() = Some(at);
// If we fail to push our node then we've become an inert timer, so
// we'll want to clear our `state` field accordingly
timeouts.list.push(state)?;
timeouts.waker.wake();
}

Ok(())
}
}

#[inline]
pub fn fires_at(timeout: &Delay) -> Instant {
timeout.when
}

impl Future for Delay {
type Output = io::Result<()>;

fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let state = match self.state {
Some(ref state) => state,
None => {
let err = Err(io::Error::new(io::ErrorKind::Other, "timer has gone away"));
return Poll::Ready(err);
}
};

if state.state.load(SeqCst) & 1 != 0 {
return Poll::Ready(Ok(()));
}

state.waker.register(&cx.waker());

// Now that we've registered, do the full check of our own internal
// state. If we've fired the first bit is set, and if we've been
// invalidated the second bit is set.
match state.state.load(SeqCst) {
n if n & 0b01 != 0 => Poll::Ready(Ok(())),
n if n & 0b10 != 0 => Poll::Ready(Err(io::Error::new(
io::ErrorKind::Other,
"timer has gone away",
))),
_ => Poll::Pending,
}
}
}

impl Drop for Delay {
fn drop(&mut self) {
let state = match self.state {
Some(ref s) => s,
None => return,
};
if let Some(timeouts) = state.inner.upgrade() {
*state.at.lock().unwrap() = None;
if timeouts.list.push(state).is_ok() {
timeouts.waker.wake();
}
}
}
}

impl fmt::Debug for Delay {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("Delay").field("when", &self.when).finish()
}
}
Loading

0 comments on commit 8964804

Please sign in to comment.