diff --git a/Examples.md b/Examples.md
index c72a18da7..672944f0e 100644
--- a/Examples.md
+++ b/Examples.md
@@ -4,36 +4,6 @@ Many examples of using and creating streams are given in-line in the specificati
## Readable Streams
-### Getting the Next Piece of Available Data
-
-As another example, this helper function will return a promise for the next available piece of data from a given readable stream. This introduces an artificial delay if there is already data queued, but can provide a convenient interface for simple chunk-by-chunk consumption, as one might do e.g. when streaming database records. It uses an EOF sentinel to signal the end of the stream, and behaves poorly if called twice in parallel without waiting for the previously-returned promise to fulfill.
-
-```js
-const EOF = Symbol("ReadableStream getNext EOF");
-
-function getNext(stream) {
- if (stream.state === "closed") {
- return Promise.resolve(EOF);
- }
-
- return stream.ready.then(() => {
- if (stream.state === "closed") {
- return EOF;
- }
-
- // If stream is "errored", this will throw, causing the promise to be rejected.
- return stream.read();
- });
-}
-
-// Usage with proposed ES2016 async/await keywords:
-async function processStream(stream) {
- while ((const chunk = await getNext(stream)) !== EOF) {
- // do something with `chunk`.
- }
-}
-```
-
### Buffering the Entire Stream Into Memory
This function uses the reading APIs to buffer the entire stream in memory and give a promise for the results, defeating the purpose of streams but educating us while doing so:
@@ -42,19 +12,17 @@ This function uses the reading APIs to buffer the entire stream in memory and gi
function readableStreamToArray(readable) {
const chunks = [];
- pump();
- return readable.closed.then(() => chunks);
+ return pump();
function pump() {
- while (readable.state === "readable") {
- chunks.push(readable.read());
- }
-
- if (readable.state === "waiting") {
- readable.ready.then(pump);
- }
+ return readable.read().then(({ value, done }) => {
+ if (done) {
+ return chunks;
+ }
- // Otherwise the stream is "closed" or "errored", which will be handled above.
+ chunks.push(value);
+ return pump();
+ });
}
}
@@ -65,6 +33,21 @@ readableStreamToArray(myStream).then(chunks => {
})
```
+We can also write this using the [async function syntax](https://github.com/lukehoban/ecmascript-asyncawait/) proposed for ES2016:
+
+```js
+async function readableStreamToArray(readable) {
+ const chunks = [];
+
+ let result;
+ while (!(result = await readable.read()).done) {
+ chunks.push(result.value);
+ }
+
+ return chunks;
+}
+```
+
## Writable Streams
### Reporting Incremental Progress
diff --git a/Locking Design Doc.md b/Locking Design Doc.md
deleted file mode 100644
index 4ded526fb..000000000
--- a/Locking Design Doc.md
+++ /dev/null
@@ -1,92 +0,0 @@
-# Locking a Stream for Exclusive Reading
-
-In [#241](https://github.com/whatwg/streams/issues/241) we had a great conversation about the need for being able to "lock" a stream for exclusive use. This would be done implicitly while piping, but could also be useful for building user-facing abstractions, as we'll see below.
-
-What emerged was the idea of a "stream reader," which has most of the readable stream interface, but while it exists you cannot read from the stream except through that reader.
-
-This document represents some formative rationales for the design of the reader concept, approached from the perspective of a developer that uses increasingly complex features of the streams ecosystem.
-
-## Developer usage
-
-### Level 0: no reader usage
-
-If the developer knows nothing about readers, they can continue using the stream just fine.
-
-- `read()`, `state`, and `ready` all behave as they do now if used without `pipeTo`.
-- `pipeTo` will cause the following side effects:
- - `read()` will throw an informative error
- - `state` will return `"waiting"` until the pipe completes (successfully or otherwise)
- - `ready` will return a promise that remains pending until the pipe completes
-
-### Level 1: using readers directly
-
-The developer might want to create their own abstractions that require exclusive access to the stream. For example, a read-to-end function would probably want to avoid others being able to call `.read()` in the middle.
-
-Example code:
-
-```js
-function readAsJson(rs) {
- let string = "";
- const reader = rs.getReader();
-
- pump();
-
- // These lines would be simpler with `Promise.prototype.finally` (or async functions).
- return reader.closed.then(
- () => {
- reader.releaseLock();
- return JSON.parse(string);
- },
- e => {
- reader.releaseLock();
- throw e;
- }
- );
-
- function pump() {
- while (reader.state === "readable") {
- string += reader.read();
- }
- if (reader.state === "waiting") {
- reader.ready.then(pump);
- }
- }
-}
-```
-
-The stream would have the same behaviors after being passed to `readAsJson` that it would have after calling its `pipeTo` method.
-
-The reader should have all of the non-piping-related public interface of the stream. This includes:
-
-- `closed` getter, which is a pass-through
-- `state` and `ready` getters, which reveal the "true" state and state transitions of the stream which the stream itself no longer reveals
-- `read()` method, which has the same behavior as that of the stream's except that it works while the stream is locked
-- `cancel()` method, which first calls `this.releaseLock()` before the pass-through
-
-While a stream is locked, it is indistinguishable from a stream that has been drained of all chunks and is not getting any more enqueued. We could consider adding some kind of test, like `stream.isLocked`, to distinguish. However, it's not clear there's a compelling reason for doing so (let us know if so?), and the indistinguishability is kind of a nice property from the perspective of the principle of least authority.
-
-For readers, you should be able to tell if they're still active (i.e. have not been released) via `reader.isActive`.
-
-Once a reader is released, it behaves like a closed stream (unless the encapsulated stream has already errored, in which case it behaves like the errored stream).
-
-Note that with this setup, all the same invariants apply to readable streams as they do to readers. For example, when `ready` is fulfilled, the reader's `state` property will no longer return `"waiting"`, and `read()` will return a chunk, just like with a stream.
-
-### Level 2: subclassers of `ReadableStream`
-
-Subclasses of `ReadableStream` should get locking support "for free," within reason. The same mechanisms for acquiring and using a lock should work flawlessly.
-
-However, if the subclasser starts overriding `read()`, `state`, or `ready`, they will be in trouble. These are delicate operations that reflect the state of the internal queue. The point of the `ExclusiveStreamReader` is to bypass the developer's ability to directly inspect the internal queue.
-
-As such, we design `read()`, `state`, and `ready` for readers to bypass the public API of the readable stream and go directly to its internal queue. That means any subclass customizations to `read()` et al. will be bypassed when using `ExclusiveStreamReader`.
-
-In most cases we can imagine, this will be exactly what you want. For example, let's say you were trying to do something like Node.js streams, which emit a `"data"` event whenever a chunk is read. One way of doing this (perhaps not the best way) would be to subclass `ReadableStream` and replace the `read` method with something like `this.emit("data", chunk); super.read(chunk);`. But this is exactly the kind of code an exclusive reader should bypass!
-
-However, if you really need your exclusive reader to work with customized `read()`, `state`, or `ready`, there's an escape hatch! You can just implement your own reader class, and return it from your overridden `getReader()` method. It can coordinate with your overridden `read()`/`state`/`ready` all it wants.
-
-### Level 3: custom readable stream implementations
-
-A custom readable stream implementation is a class that intends to behave like a readable stream, but does not subclass `ReadableStream` per se. A good example is the experimental `ReadableByteStream` we have in this repo.
-
-As explained above, readers are coupled fairly tightly to the internal queue of the stream. Since custom readable streams can have an arbitrary internal structure, custom readable streams will need to implement their own readers and `getReader()` method.
-
-Note that if they implement a `getReader()` that returns something conforming to the exclusive stream reader API, along with `state`, `ready`, `closed`, `read()`, and `cancel()`, then `ReadableStream.prototype.pipeTo` (and `pipeThrough`) will still work when applied generically to the custom readable stream.
diff --git a/index.bs b/index.bs
index 9844d75af..b875dc0a1 100644
--- a/index.bs
+++ b/index.bs
@@ -88,6 +88,10 @@ time via the stream's public interface.
Code that reads from a readable stream using its public interface is known as a consumer.
+Consumers also have the ability to cancel a readable stream. This indicates
+that the consumer has lost interest in the stream, and will immediately close the stream, throw away any queued
+chunks, and execute any cancellation mechanism of the underlying source.
+
\[[underlyingSource]]
@@ -328,9 +330,9 @@ Instances of ReadableStream are created with the internal slots des
source by setting up relevant event listeners, or to acquire access to a pull source. If this process
is asynchronous, it can return a promise to signal success or failure.
pull(enqueue, close) is called when the stream's internal queue of chunks is depleted, and the
- consumer has signaled that they wish to consume more data. If pull returns a promise, then it will
- not be called again until the promise returned by the previous call has fulfilled; if the promise rejects, the
- stream will become errored.
+ consumer has signaled that they wish to consume more data. If pull returns a promise, then
+ pull will not be called again until that promise fulfills; if the promise rejects, the stream will
+ become errored.
cancel(reason) is called when the consumer signals that they are no longer interested in the
stream. It should perform any actions necessary to release access to the underlying source. If this
process is asynchronous, it can return a promise to signal success or failure.
@@ -349,11 +351,12 @@ Instances of ReadableStream are created with the internal slots des
- Set this@\[[underlyingSource]] to underlyingSource.
-
- Set this@\[[readyPromise]] and this@\[[closedPromise]] to new promises.
+
- Set this@\[[closedPromise]] to a new promise.
- Set this@\[[queue]] to a new empty List.
-
- Set this@\[[state]] to
"waiting" .
+ - Set this@\[[state]] to
"readable" .
- Set this@\[[started]], this@\[[draining]], and this@\[[pullScheduled]] to false.
-
- Set this@\[[readableStreamReader]] to undefined.
+
- Set this@\[[reader]], this@\[[pullingPromise]], and this@\[[storedError]] to
+ undefined.
- Set this@\[[enqueue]] to CreateReadableStreamEnqueueFunction(this).
- Set this@\[[close]] to CreateReadableStreamCloseFunction(this).
- Set this@\[[error]] to CreateReadableStreamErrorFunction(this).
@@ -386,109 +389,58 @@ Instances of
ReadableStream are created with the internal slots des
- Return this@\[[closedPromise]].
-get ready
-
-
- The ready getter returns a promise that will be fulfilled when the stream transitions away from the
- "waiting" state to any other state. Once the stream transitions back to "waiting" , the
- getter will return a new promise that stays pending until the next state transition.
-
-
-
- - If IsReadableStream(this) is false, return a promise rejected with a TypeError exception.
-
- Return this@\[[readyPromise]].
-
-
-get state
-
-
- The state getter returns the state of the stream, which will be one of the following:
-
-
- "waiting"
- - The stream's internal queue is empty; use
.ready to be notified of any changes.
-
- "readable"
- - The stream's internal queue has chunks available; call
.read() to retrieve the next one.
-
- "closed"
- - All data has been successfully read from the stream, and no more is available.
-
-
"errored"
- - An error occurred interacting with the underlying source, and so the stream is now dead.
-
-
- If the stream is locked to a reader, the stream will appear to be "waiting" .
-
- The way in which the stream will transition between states is summarized in more detail in [[#rs-state-diagram]].
-
-
-
- - If IsReadableStream(this) is false, throw a TypeError exception.
-
- If IsReadableStreamLocked(this) is true, return
"waiting" .
- - Return this@\[[state]].
-
-
cancel(reason)
- The cancel method signals a loss of interest in the stream by a consumer. Calling it will immediately
- move the stream to a "closed" state, throwing away any queued data, as well as executing any
- cancellation mechanism of the underlying source.
-
- Readable streams cannot be cancelled while locked to a reader; this method will return a rejected promise.
+ The cancel method cancels the stream, signaling a loss of interest
+ in the stream by a consumer. The supplied reason argument will be given to the underlying source, which
+ may or may not use it.
- If IsReadableStream(this) is false, return a promise rejected with a TypeError exception.
-
- If IsReadableStreamLocked(this) is true, return a new promise rejected with a TypeError
- exception.
- Return CancelReadableStream(this, reason).
getReader()
- The getReader method creates an exclusive stream reader and
- locks the stream to the the new reader. While the stream is locked, it cannot be
- manipulated directly, and will appear to be an inert, empty stream waiting for new chunks to be enqueued.
- Instead, the returned reader object can be used to read from or cancel the stream, or to discern its state and state
- transitions. If or when the lock is released, the stream can be used again as
- normal.
+ The getReader method creates an readable stream reader and
+ locks the stream to the the new reader. While the stream is locked, no other reader
+ can be acquired until this one is released.
- This functionality is especially useful for creating abstractions that desire the ability to consume a stream in its
- entirety. By getting a reader for the stream, you can ensure nobody else can interleave reads with yours, interfering
- with your abstraction or observing its side-effects.
+ The returned reader provides the ability to directly read individual chunks from the stream via the reader's
+ read method. This design ensures that if you control the reader, nobody else can interleave reads with
+ yours, interfering with your code or observing its side-effects.
Note that when a stream is closed or errors, any reader it is locked to is automatically released.
- If IsReadableStream(this) is false, throw a TypeError exception.
-
- Return AcquireExclusiveStreamReader(this).
+
- Return AcquireReadableStreamReader(this).
- An example of an abstraction that might benefit from using an exclusive reader is a function like the following,
- which is designed to read an entire readable stream into memory as an array of chunks.
+ An example of an abstraction that might benefit from using a reader is a function like the following, which is
+ designed to read an entire readable stream into memory as an array of chunks.
function readAllChunks(readableStream) {
const reader = readableStream.getReader();
const chunks = [];
- pump();
-
- return reader.closed.then(() => chunks);
+ return pump();
function pump() {
- while (reader.state === "readable") {
- chunks.push(reader.read());
- }
+ return reader.read().then(({ value, done })=> {
+ if (done) {
+ return chunks;
+ }
- if (reader.state === "waiting") {
- reader.ready.then(pump);
- }
+ chunks.push(value);
+ return pump();
+ });
}
}
@@ -503,6 +455,9 @@ Instances of ReadableStream are created with the internal slots des
through a transform stream (or any other { writable, readable } pair). It simply pipes the stream
into the writable side of the supplied pair, and returns the readable side for further use.
+ Piping a stream will generally lock it for the duration of the pipe, preventing any
+ other consumer from acquiring a reader.
+
This method is intentionally generic; it does not require that its this value be a ReadableStream
object. It also does not require that its writable argument be a WritableStream instance,
or that its readable argument be a ReadableStream instance.
@@ -532,6 +487,9 @@ Instances of ReadableStream are created with the internal slots des
number of passed options. It returns a promise that fulfills when the piping process completes successfully, or
rejects if any errors were encountered.
+ Piping a stream will generally lock it for the duration of the pipe, preventing any
+ other consumer from acquiring a reader.
+
This method is intentionally generic; it does not require that its this value be a ReadableStream
object.
@@ -543,36 +501,24 @@ For now, please consider the reference implementation normative:
reference-implementation/lib/readable-stream.js,
look for the pipeTo method.
-read()
+Class ReadableStreamReader
-
- The read method will return the next chunk from the stream's internal queue, if available. If
- this causes the queue to become empty, more data will be pulled from the underlying source.
-
-
-
- - If IsReadableStream(this) is false, throw a TypeError exception.
-
- If IsReadableStreamLocked(this) is true, throw a TypeError exception.
-
- Return ReadFromReadableStream(this).
-
-
-Class ExclusiveStreamReader
+The ReadableStreamReader class represents a readable stream reader designed to be vended by a
+ReadableStream instance.
Class Definition
This section is non-normative.
-If one were to write the ExclusiveStreamReader class in something close to the syntax of [[!ECMASCRIPT]],
+If one were to write the ReadableStreamReader class in something close to the syntax of [[!ECMASCRIPT]],
it would look like
- class ExclusiveStreamReader {
+ class ReadableStreamReader {
constructor(stream)
get closed()
get isActive()
- get ready()
- get state()
cancel(reason)
read()
@@ -582,7 +528,7 @@ it would look like
Internal Slots
-Instances of ExclusiveStreamReader are created with the internal slots described in the following table:
+Instances of ReadableStreamReader are created with the internal slots described in the following table:
@@ -596,48 +542,47 @@ Instances of ExclusiveStreamReader are created with the internal sl
A promise returned by the reader's closed getter
|
- \[[encapsulatedReadableStream]]
- | A ReadableStream instance that this reader encapsulates; also used for the
- IsExclusiveStreamReader brand check
+ | \[[ownerReadableStream]]
+ | A ReadableStream instance that owns this reader; also used for the
+ IsReadableStreamReader brand check
|
- \[[readyPromise]]
- | A promise returned by the reader's ready getter
- |
-
- \[[state]]
- | A string returned by the reader's state getter
+ | \[[readRequests]]
+ | A List of promises returned by calls to the reader's read() method that have not yet been resolved,
+ due to the consumer requesting chunks sooner than they are available.
|
-new ExclusiveStreamReader(stream)
+new ReadableStreamReader(stream)
+
+
+ The ReadableStreamReader constructor is generally not meant to be used directly; instead, a stream's
+ getReader() method should be used. This allows different classes of readable streams to vend different
+ classes of readers without the consumer needing to know which goes with which.
+
- - If stream does not have a \[[readableStreamReader]] internal slot, throw a TypeError exception.
+
- If IsReadableStream(stream) is false, throw a TypeError exception.
+
- If stream@\[[state]] is
"closed" , throw a TypeError exception.
+ - If stream@\[[state]] is
"errored" , throw stream@\[[storedError]].
- If IsReadableStreamLocked(stream) is true, throw a TypeError exception.
-
- Assert: stream@\[[state]] is
"waiting" or "readable" .
- - If stream@\[[state]] is
"readable" , set stream@\[[readyPromise]] to a new
- promise.
- - Set stream@\[[readableStreamReader]] to this.
-
- Set this@\[[state]] to stream@\[[state]].
-
- If stream@\[[state]] is
"waiting" , set this@\[[readyPromise]] to a new promise.
- - Otherwise, set this@\[[readyPromise]] to a new promise resolved with undefined.
+
- Set stream@\[[reader]] to this.
+
- Set this@\[[ownerReadableStream]] to stream.
+
- Set this@\[[readRequests]] to a new empty List.
- Set this@\[[closedPromise]] to a new promise.
-
- Set this@\[[encapsulatedReadableStream]] to stream.
-Properties of the ExclusiveStreamReader Prototype
+Properties of the ReadableStreamReader Prototype
get closed
- While the reader is active, the closed getter for a stream reader simply
- delegates to the encapsulated stream, to allow consumers to use the reader interface as they would the readable
- stream interface.
+ While the reader is active, the promise returned by the closed getter for a
+ stream reader will behave the same as that for the original stream, for convenience.
- - If IsExclusiveStreamReader(this) is false, return a promise rejected with a TypeError
+
- If IsReadableStreamReader(this) is false, return a promise rejected with a TypeError
exception.
- Return this@\[[closedPromise]].
@@ -650,105 +595,100 @@ Instances of ExclusiveStreamReader are created with the internal sl
- - If IsExclusiveStreamReader(this) is false, throw a TypeError exception.
-
- Return SameValue(this@\[[encapsulatedReadableStream]]@\[[readableStreamReader]], this).
-
-
-get ready
-
-
- The ready getter behaves the same as that for the readable stream encapsulated by this reader, except
- that while the reader is active, the promise returned will reveal the stream's true
- state transitions. (In contrast, the stream itself does not signal any state transitions while
- locked, giving off the appearance of being "waiting" for the
- duration.)
-
-
-
- - If IsExclusiveStreamReader(this) is false, return a promise rejected with a TypeError
- exception.
-
- Return this@\[[readyPromise]].
-
-
-get state
-
-
- The state getter behaves the same as that for the readable stream encapsulated by this reader, except
- that while the reader is active, it will reveal the stream's true state. (In contrast,
- the stream itself gives off the appearance of being "waiting" while it is
- locked.)
-
-
-
- - If IsExclusiveStreamReader(this) is false, throw a TypeError exception.
-
- Return this@\[[state]].
+
- If IsReadableStreamReader(this) is false, throw a TypeError exception.
+
- If this@\[[ownerReadableStream]] is undefined, return false; otherwise, return
+ true.
cancel(reason)
If the reader is active, the cancel method behaves the same as that for the
- encapsulated stream. When done, it automatically releases the lock.
+ associated stream. When done, it automatically releases the lock.
- - If IsExclusiveStreamReader(this) is false, return a promise rejected with a TypeError
+
- If IsReadableStreamReader(this) is false, return a promise rejected with a TypeError
exception.
-
- If SameValue(this@\[[encapsulatedReadableStream]]@\[[readableStreamReader]], this) is
- false, return this@\[[closedPromise]].
-
- Call-with-rethrow CancelReadableStream(this@\[[encapsulatedReadableStream]], reason).
+
- If this@\[[ownerReadableStream]] is undefined, return a new promise resolved with
+ undefined.
+
- Return CancelReadableStream(this@\[[ownerReadableStream]], reason).
read()
+
- If the reader is active, the read method behaves the same as that for the
- encapsulated stream, except that the reader will be able to use its exclusive access to the stream to retrieve
- chunks. (In contrast, the stream itself will not allow any chunks to be read from it while it is
- locked.)
+ The read method will return a promise that allows access to the next chunk from the stream's
+ internal queue, if available.
+
+
+ - If the chunk does become available, the promise will be fulfilled with an object of the form
+
{ value: theChunk, done: false } .
+ - If the stream becomes closed, the promise will be fulfilled with an object of the form
+
{ value: undefined, done: true } .
+ - If the stream becomes errored, the promise will be rejected with the relevant error.
+
+
+ If reading a chunk causes the queue to become empty, more data will be pulled from the underlying source.
- - If IsExclusiveStreamReader(this) is false, throw a TypeError exception.
-
- If SameValue(this@\[[encapsulatedReadableStream]]@\[[readableStreamReader]], this) is
- false, throw a TypeError exception.
-
- Return ReadFromReadableStream(this@\[[encapsulatedReadableStream]]).
+
- If IsReadableStreamReader(this) is false, throw a TypeError exception.
+
- If this@\[[ownerReadableStream]] is undefined or
+ this@\[[ownerReadableStream]]@\[[state]] is
"closed" , return a new promise resolved with
+ CreateIterResultObject(undefined, true).
+ - If this@\[[ownerReadableStream]]@\[[state]] is
"errored" , return a new promise
+ rejected with this@\[[ownerReadableStream]]@\[[storedError]].
+ - If this@\[[ownerReadableStream]]@\[[queue]] is not empty,
+
+ - Let chunk be DequeueValue(this@\[[ownerReadableStream]]@\[[queue]]).
+
- If this@\[[ownerReadableStream]]@\[[draining]] is true and
+ this@\[[ownerReadableStream]]@\[[queue]] is now empty, call-with-rethrow
+ CloseReadableStream(this@\[[ownerReadableStream]]).
+
- Otherwise, call-with-rethrow CallReadableStreamPull(this@\[[ownerReadableStream]]).
+
- Return a new promise resolved with CreateIterResultObject(chunk, false).
+
+ - Otherwise,
+
+ - Let readRequestPromise be a new promise.
+
- Append readRequestPromise as the last element of this@\[[readRequests]].
+
- Return readRequestPromise.
+
releaseLock()
- The releaseLock method releases the reader's lock on the encapsulated
- stream. After the lock is released, the reader is no longer active. If the encapsulated
+ The releaseLock method releases the reader's lock on the corresponding
+ stream. After the lock is released, the reader is no longer active. If the associated
stream is errored when the lock is released, the reader will appear errored in the same way from now on; otherwise,
the reader will appear closed.
+
+ A reader's lock cannot be released while it still has a pending read request, i.e., if a promise returned by the
+ reader's read() method has not yet been settled. Attempting to do so will throw a TypeError
+ and leave the reader locked to the stream.
- - If IsExclusiveStreamReader(this) is false, throw a TypeError exception.
-
- If SameValue(this@\[[encapsulatedReadableStream]]@\[[readableStreamReader]], this) is
- false, return undefined.
-
- Assert: this@\[[state]] is
"waiting" or "readable" .
- - Call-with-rethrow CloseReadableStreamReader(this).
-
- If this@\[[encapsulatedReadableStream]]@\[[state]] is
"readable" , resolve
- this@\[[encapsulatedReadableStream]]@\[[readyPromise]] with undefined.
- - Set this@\[[encapsulatedReadableStream]]@\[[readableStreamReader]] to undefined.
+
- If IsReadableStreamReader(this) is false, throw a TypeError exception.
+
- If this@\[[ownerReadableStream]] is undefined, return undefined.
+
- If this@\[[readRequests]] is not empty, throw a TypeError exception.
+
- Return ReleaseReadableStreamReader(this).
Readable Stream Abstract Operations
-AcquireExclusiveStreamReader ( stream )
+AcquireReadableStreamReader ( stream )
- - If stream@\[[state]] is
"closed" , throw a TypeError exception.
- - If stream@\[[state]] is
"errored" , throw stream@\[[storedError]].
- - Return Construct(
ExclusiveStreamReader , «stream»).
+ - Return Construct(
ReadableStreamReader , «stream»).
CallReadableStreamPull ( stream )
@@ -778,14 +718,15 @@ Instances of ExclusiveStreamReader are created with the internal sl
Return undefined.
-CancelReadableStream ( stream )
+CancelReadableStream ( stream, reason )
- - If this@\[[state]] is
"closed" or "errored" , return
- this@\[[closedPromise]].
- - Let this@\[[queue]] be a new empty List.
+
- If stream@\[[state]] is
"closed" , return a new promise resolved with undefined.
+ - If stream@\[[state]] is
"errored" , return a new promise rejected with
+ stream@\[[storedError]].
+ - Set stream@\[[queue]] to a new empty List.
- Call-with-rethrow CloseReadableStream(stream).
-
- Let sourceCancelPromise be PromiseInvokeOrNoop(this@\[[underlyingSource]],
+
- Let sourceCancelPromise be PromiseInvokeOrNoop(stream@\[[underlyingSource]],
"cancel" , «reason»).
- Return the result of transforming sourceCancelPromise by a fulfillment handler that returns
undefined.
@@ -794,31 +735,14 @@ Instances of
ExclusiveStreamReader are created with the internal sl
CloseReadableStream ( stream )
- - If IsReadableStreamLocked(stream) is true,
-
- - Call-with-rethrow CloseReadableStreamReader(stream@\[[readableStreamReader]]).
-
- Set stream@\[[readableStreamReader]] to undefined
-
- Resolve stream@\[[readyPromise]] with undefined.
-
- - Otherwise,
-
- - If stream@\[[state]] is
"waiting" , resolve stream@\[[readyPromise]] with
- undefined.
-
+ - Assert: stream@\[[state]] is
"readable" .
- Resolve stream@\[[closedPromise]] with undefined.
- Set stream@\[[state]] to
"closed" .
+ - If IsReadableStreamLocked(stream) is true, return
+ ReleaseReadableStreamReader(stream).
- Return undefined.
-CloseReadableStreamReader ( reader )
-
-
- - If reader@\[[state]] is
"waiting" , resolve reader@\[[readyPromise]] with
- undefined.
- - Resolve reader@\[[closedPromise]] with undefined.
-
- Set reader@\[[state]] to
"closed" .
-
-
CreateReadableStreamCloseFunction ( stream )
@@ -829,14 +753,9 @@ A Readable Stream Close Function is a built-in anonymous function of
stream, that performs the following steps:
- - If stream@\[[state]] is
"waiting" ,
-
- - Call-with-rethrow CloseReadableStream(this).
-
- - If stream@\[[state]] is
"readable" ,
-
- - Set stream@\[[draining]] to true.
-
+ - If stream@\[[state]] is not
"readable" , return undefined.
+ - If stream@\[[queue]] is empty, return CloseReadableStream(stream).
+
- Set stream@\[[draining]] to true.
CreateReadableStreamEnqueueFunction ( stream )
@@ -852,39 +771,48 @@ closing over a variable stream, that performs the following steps:
- If stream@\[[state]] is
"errored" , throw stream@\[[storedError]].
- If stream@\[[state]] is
"closed" , throw a TypeError exception.
- If stream@\[[draining]] is true, throw a TypeError exception.
-
- Let chunkSize be 1.
-
- Let strategy be Get(stream@\[[underlyingSource]],
"strategy" ).
- - If strategy is an abrupt completion,
+
- If IsReadableStreamLocked(stream) is true and stream@\[[reader]]@\[[readRequests]]
+ is not empty,
- - Call-with-rethrow Call(stream@\[[error]], undefined, «strategy.\[[value]]»).
-
- Return strategy.
+
- Let readRequestPromise be the first element of stream@\[[reader]]@\[[readRequests]].
+
- Remove readRequestPromise from stream@\[[reader]]@\[[readRequests]], shifting all other
+ elements downward (so that the second becomes the first, and so on).
+
- Resolve readRequestPromise with CreateIterResultObject(chunk, false).
- - Let strategy be strategy.\[[value]].
-
- If strategy is not undefined, then
+
- Otherwise,
- - Set chunkSize to Invoke(strategy,
"size" , «chunk»).
- - If chunkSize is an abrupt completion,
+
- Let chunkSize be 1.
+
- Let strategy be Get(stream@\[[underlyingSource]],
"strategy" ).
+ - If strategy is an abrupt completion,
+
+ - Call-with-rethrow Call(stream@\[[error]], undefined, «strategy.\[[value]]»).
+
- Return strategy.
+
+ - Let strategy be strategy.\[[value]].
+
- If strategy is not undefined, then
+
+ - Set chunkSize to Invoke(strategy,
"size" , «chunk»).
+ - If chunkSize is an abrupt completion,
+
+ - Call-with-rethrow Call(stream@\[[error]], undefined,
+ «chunkSize.\[[value]]»).
+
- Return chunkSize.
+
+ - Let chunkSize be chunkSize.\[[value]].
+
+ - Let enqueueResult be EnqueueValueWithSize(stream@\[[queue]], chunk,
+ chunkSize.\[[value]]).
+
- If enqueueResult is an abrupt completion,
- Call-with-rethrow Call(stream@\[[error]], undefined,
- «chunkSize.\[[value]]»).
-
- Return chunkSize.
+ «enqueueResult.\[[value]]»).
+
- Return enqueueResult.
- - Let chunkSize be chunkSize.\[[value]].
-
- - Let enqueueResult be EnqueueValueWithSize(stream@\[[queue]], chunk,
- chunkSize.\[[value]]).
-
- If enqueueResult is an abrupt completion,
-
- - Call-with-rethrow Call(stream@\[[error]], undefined,
- «enqueueResult.\[[value]]»).
-
- Return enqueueResult.
+ - Call-with-rethrow CallReadableStreamPull(stream).
- Let shouldApplyBackpressure be ShouldReadableStreamApplyBackpressure(stream).
-
- If stream@\[[state]] is
"waiting" ,
-
- - Call-with-rethrow MarkReadableStreamReadable(stream).
-
- - If shouldApplyBackpressure.\[[value]] is true, return false.
+
- ReturnIfAbrupt(shouldApplyBackpressure).
+
- If shouldApplyBackpressure is true, return false.
- Return true.
@@ -898,33 +826,21 @@ A Readable Stream Error Function is a built-in anonymous function of
a variable stream, that performs the following steps:
- - If stream@\[[state]] is
"closed" or "errored" , return undefined.
- - If stream@\[[state]] is
"readable" , let stream@\[[queue]] be a new empty List.
- - If IsReadableStreamLocked(stream) is true,
-
- - If stream@\[[state]] is
"waiting" , resolve
- stream@\[[readableStreamReader]]@\[[readyPromise]] with undefined.
- - Resolve stream@\[[readyPromise]] with undefined.
-
- Reject stream@\[[readableStreamReader]]@\[[closedPromise]] with e.
-
- Set stream@\[[readableStreamReader]]@\[[state]] to
"errored" .
- - Set stream@\[[readableStreamReader]] to undefined.
-
- - Otherwise,
-
- - If stream@\[[state]] is
"waiting" , resolve stream@\[[readyPromise]] with
- undefined.
-
+ - If stream@\[[state]] is not
"readable" return undefined.
+ - Let stream@\[[queue]] be a new empty List.
- Reject stream@\[[closedPromise]] with e.
- Set stream@\[[storedError]] to e.
- Set stream@\[[state]] to
"errored" .
-
-
-IsExclusiveStreamReader ( x )
-
-
- - If Type(x) is not Object, return false.
-
- If x does not have a \[[encapsulatedReadableStream]] internal slot, return false.
-
- Return true.
+
- If IsReadableStreamLocked(stream) is true,
+
+ - Reject stream@\[[reader]]@\[[closedPromise]] with e.
+
- Repeat for each readRequestPromise that is an element of
+ stream@\[[reader]]@\[[readRequests]],
+
+ - Reject readRequestPromise with e.
+
+ - Set stream@\[[reader]]@\[[readRequests]] to a new empty List.
+
IsReadableStream ( x )
@@ -944,59 +860,30 @@ a variable stream, that performs the following steps:
- Assert: IsReadableStream(stream) is true.
-
- If stream@\[[readableStreamReader]] is undefined, return false.
+
- If stream@\[[reader]] is undefined, return false.
- Return true.
-MarkReadableStreamReadable ( stream )
-
-
- - If IsReadableStreamLocked(stream) is true,
-
- - Resolve stream@\[[readableStreamReader]]@\[[readyPromise]] with undefined.
-
- Set stream@\[[readableStreamReader]]@\[[state]] to
"readable" .
-
- - Otherwise,
-
- - Resolve stream@\[[readyPromise]] with undefined.
-
- - Set stream@\[[state]] to
"readable" .
- - Return undefined.
-
-
-MarkReadableStreamWaiting ( stream )
+IsReadableStreamReader ( x )
- - If IsReadableStreamLocked(stream) is true,
-
- - Set stream@\[[readableStreamReader]]@\[[readyPromise]] to a new promise.
-
- Set stream@\[[readableStreamReader]]@\[[state]] to
"waiting" .
-
- - Otherwise,
-
- - Set stream@\[[readyPromise]] to a new promise.
-
- - Set stream@\[[state]] to
"waiting" .
- - Return undefined.
+
- If Type(x) is not Object, return false.
+
- If x does not have a \[[ownerReadableStream]] internal slot, return false.
+
- Return true.
-ReadFromReadableStream ( stream )
+ReleaseReadableStreamReader ( reader )
- - If stream@\[[state]] is
"waiting" or "closed" , throw a TypeError
- exception.
- - If stream@\[[state]] is
"errored" , throw stream@\[[storedError]].
- - Assert: stream@\[[state]] is
"readable" .
- - Assert: stream@\[[queue]] is not empty.
-
- Let chunk be DequeueValue(stream@\[[queue]]).
-
- If stream@\[[queue]] is now empty,
+
- Assert: reader@\[[ownerReadableStream]] is not undefined.
+
- Repeat for each readRequestPromise that is an element of reader@\[[readRequests]],
- - If stream@\[[draining]] is true, call-with-rethrow CloseReadableStream(stream).
-
- If stream@\[[draining]] is false, call-with-rethrow
- MarkReadableStreamWaiting(stream).
+
- Resolve readRequestPromise with CreateIterResultObject(undefined, true).
- - Call-with-rethrow CallReadableStreamPull(stream).
-
- Return chunk.
+
- Set reader@\[[readRequests]] to a new empty List.
+
- Set reader@\[[ownerReadableStream]]@\[[reader]] to undefined.
+
- Set reader@\[[ownerReadableStream]] to undefined.
+
- Resolve reader@\[[closedPromise]] with undefined.
ShouldReadableStreamApplyBackpressure ( stream )
@@ -1871,7 +1758,7 @@ standard, and other attributes { \[[Writable]]: true, \[[Enumerable]]: true }.
- The ExclusiveStreamReader class is specifically not exposed, as while it does have a
+ The ReadableStreamReader class is specifically not exposed, as while it does have a
functioning constructor, instances should instead be created through the getReader method of a
ReadableStream instance.
@@ -1881,28 +1768,35 @@ standard, and other attributes { \[[Writable]]: true, \[[Enumerable]]:
-Subclassing Streams
+Other Stream Implementations
This section is non-normative.
-Specific APIs may wish to subclass ReadableStream or WritableStream in order to provide
-additional functionality. Examples would include:
+The ReadableStream and WritableStream classes defined in this specification are not expected
+to be the only manifestations of the corresponding readable stream and writable stream concepts. They are
+explicitly meant to cooperate with other stream instances that behave similarly. Those instances could be e.g.
+platform- or developer-created subclasses of these classes, or they could be anything else that obeys the same public
+API contract.
+
+For example, we are already prototyping and planning an additional ReadableByteStream class, which will be
+a readable stream while not being a subclass of ReadableStream . It will have the same set of
+methods as a baseline ReadableStream , and can be used in the same way by most consumers, who will
+be agnostic to which type of readable stream they are using. However, specialized consumers who know they are dealing
+with a ReadableByteStream will be able to take advantage of extra APIs it provides for extremely efficient
+"bring-your-own-buffer" memory management.
+
+This kind of ecosystem is largely enabled by the genericness of the pipeTo method. Any object which has
+the appropriate public writable stream APIs will work with ReadableStream.prototype.pipeTo . We strongly
+recommend that any readable stream classs you create maintain this property for its pipeTo
+method as well. Even if they use specialized algorithms when given a specific type of writable stream, they should
+always fall back to an algorithm that works with any object obeying the writable stream contract.
-
- - A file stream that is constructed from a filename, and includes file metadata
- - A HTTP stream that is constructed from a URL, and includes header-accessing APIs
- - A TCP stream that overrides
read , ready , cancel , state , and
- closed to reflect and manipulate a kernel-level TCP buffer
-
-
-The first two examples here could be expressed by allowing their constructors to call super with
-appropriate functions that manipulate the stream's internal state. The latter takes a different approach, which gives
-both more power and more responsibility: it requires that the author of that stream manually implement many of the
-complex requirements baked into the readable stream state machine and the contract it presents to users.
-
-Because streams only interact through their public API, all streams—whether subclassed or not—can coexist and
-interoperate. For example, you can pipe to or from any of the above streams, without worrying what type of
-implementation is under the covers, since they all provide the appropriate properties and methods.
+
+ The keen reader may be asking for more precise definitions of the "contracts" in play here, for both readable and
+ writable streams. We're working on a test suite that should do the trick! For now, please check out our
+ templated tests
+ to see initial progress in that direction.
+
Examples of Creating Streams
@@ -2206,8 +2100,8 @@ APIs:
streamyWS.writable.write("Hello");
streamyWS.writable.write("web socket!");
- streamyWS.readable.ready.then(() => {
- console.log("The web socket says: ", streamyWS.readable.read());
+ streamyWS.readable.read().then(chunk => {
+ console.log("The web socket says: ", chunk);
});
@@ -2288,7 +2182,6 @@ itself will evolve in these ways.
The editor would like to thank
Tab Atkins,
-Bert Belder,
Marcos Caceres,
Tim Caswell,
Brian di Palma,
@@ -2305,9 +2198,11 @@ tzik
for their contributions to this specification.
Special thanks to:
+Bert Belder for bringing up implementation concerns that led
+ to crucial API changes,
Will Chan for his help ensuring that the API allows high-performance network streaming,
Janessa Det for the logo,
-Yutaka Hirano for his help with the exclusive reader design,
+Yutaka Hirano for his help with the readable stream reader design,
Gorgi Kosev for his breakthrough idea of separating piping into two methods, thus resolving
a major sticking point;
Forrest Norvell for his enthusiastic iteration on the reference implementation;
diff --git a/readable-stream.svg b/readable-stream.svg
index b35d2e7d0..dc579591d 100644
--- a/readable-stream.svg
+++ b/readable-stream.svg
@@ -43,11 +43,11 @@
-
+
- Waiting
+ Readable, empty
([[started]] = false)
@@ -90,11 +90,11 @@
-
+
- Waiting
+ Readable, empty
([[started]] = true)
@@ -123,11 +123,11 @@
-
+
- Readable
+ Readable, nonempty
([[draining]] = false)
@@ -166,11 +166,11 @@
-
+
- Readable
+ Readable, nonempty
([[draining]] = true)
diff --git a/reference-implementation/lib/exclusive-stream-reader.js b/reference-implementation/lib/exclusive-stream-reader.js
deleted file mode 100644
index 5fc7cfb01..000000000
--- a/reference-implementation/lib/exclusive-stream-reader.js
+++ /dev/null
@@ -1,148 +0,0 @@
-const assert = require('assert');
-import { ReadFromReadableStream, CancelReadableStream, CloseReadableStreamReader, IsExclusiveStreamReader,
- IsReadableStreamLocked } from './readable-stream-abstract-ops';
-
-export default class ExclusiveStreamReader {
- constructor(stream) {
- if (!('_readableStreamReader' in stream)) {
- throw new TypeError('ExclusiveStreamReader can only be used with ReadableStream objects or subclasses');
- }
-
- if (IsReadableStreamLocked(stream)) {
- throw new TypeError('This stream has already been locked for exclusive reading by another reader');
- }
-
- assert(stream._state === 'waiting' || stream._state === 'readable');
-
- // Update the states of the encapsulated stream to represent a locked stream.
- if (stream._state === 'readable') {
- stream._initReadyPromise();
- }
- stream._readableStreamReader = this;
-
- // Sync the states of this reader with the encapsulated stream.
- this._state = stream._state;
- if (stream._state === 'waiting') {
- this._initReadyPromise();
- } else {
- this._readyPromise = Promise.resolve(undefined);
- }
- this._initClosedPromise();
-
- this._encapsulatedReadableStream = stream;
- }
-
- get ready() {
- if (!IsExclusiveStreamReader(this)) {
- return Promise.reject(new TypeError('ExclusiveStreamReader.prototype.ready can only be used on a ' +
- 'ExclusiveStreamReader'));
- }
-
- return this._readyPromise;
- }
-
- get state() {
- if (!IsExclusiveStreamReader(this)) {
- throw new TypeError('ExclusiveStreamReader.prototype.state can only be used on a ExclusiveStreamReader');
- }
-
- return this._state;
- }
-
- get closed() {
- if (!IsExclusiveStreamReader(this)) {
- return Promise.reject(new TypeError('ExclusiveStreamReader.prototype.closed can only be used on a ' +
- 'ExclusiveStreamReader'));
- }
-
- return this._closedPromise;
- }
-
- get isActive() {
- if (!IsExclusiveStreamReader(this)) {
- throw new TypeError('ExclusiveStreamReader.prototype.isActive can only be used on a ExclusiveStreamReader');
- }
-
- return this._encapsulatedReadableStream._readableStreamReader === this;
- }
-
- read() {
- if (!IsExclusiveStreamReader(this)) {
- throw new TypeError('ExclusiveStreamReader.prototype.read can only be used on a ExclusiveStreamReader');
- }
-
- if (this._encapsulatedReadableStream._readableStreamReader !== this) {
- throw new TypeError('This stream reader has released its lock on the stream and can no longer be used');
- }
-
- // Bypass lock check.
- return ReadFromReadableStream(this._encapsulatedReadableStream);
- }
-
- cancel(reason) {
- if (!IsExclusiveStreamReader(this)) {
- return Promise.reject(new TypeError('ExclusiveStreamReader.prototype.cancel can only be used on a ' +
- 'ExclusiveStreamReader'));
- }
-
- if (this._encapsulatedReadableStream._readableStreamReader !== this) {
- return this._closedPromise;
- }
-
- // Bypass lock check.
- return CancelReadableStream(this._encapsulatedReadableStream, reason);
- }
-
- releaseLock() {
- if (!IsExclusiveStreamReader(this)) {
- throw new TypeError('ExclusiveStreamReader.prototype.releaseLock can only be used on a ExclusiveStreamReader');
- }
-
- if (this._encapsulatedReadableStream._readableStreamReader !== this) {
- return undefined;
- }
-
- // When the stream is errored or closed, the reader is released automatically. So, here, this._state is neither
- // 'closed' nor 'errored'.
- assert(this._state === 'waiting' || this._state === 'readable');
-
- CloseReadableStreamReader(this);
-
- if (this._encapsulatedReadableStream._state === 'readable') {
- this._encapsulatedReadableStream._resolveReadyPromise(undefined);
- }
- this._encapsulatedReadableStream._readableStreamReader = undefined;
- }
-
- // Utility functions
-
- _initReadyPromise() {
- this._readyPromise = new Promise((resolve, reject) => {
- this._readyPromise_resolve = resolve;
- });
- }
-
- _initClosedPromise() {
- this._closedPromise = new Promise((resolve, reject) => {
- this._closedPromise_resolve = resolve;
- this._closedPromise_reject = reject;
- });
- }
-
- _resolveReadyPromise(value) {
- this._readyPromise_resolve(value);
- this._readyPromise_resolve = null;
- }
-
- _resolveClosedPromise(value) {
- this._closedPromise_resolve(value);
- this._closedPromise_resolve = null;
- this._closedPromise_reject = null;
- }
-
- _rejectClosedPromise(reason) {
- this._closedPromise_reject(reason);
- this._closedPromise_resolve = null;
- this._closedPromise_reject = null;
- }
-}
diff --git a/reference-implementation/lib/helpers.js b/reference-implementation/lib/helpers.js
index 320fa8221..e34c0dc39 100644
--- a/reference-implementation/lib/helpers.js
+++ b/reference-implementation/lib/helpers.js
@@ -1,3 +1,5 @@
+const assert = require('assert');
+
export function promiseCall(func, ...args) {
try {
return Promise.resolve(func(...args));
@@ -23,6 +25,14 @@ export function toInteger(v) {
return Math.floor(Math.abs(v));
}
+export function CreateIterResultObject(value, done) {
+ assert(typeof done === 'boolean');
+ const obj = {};
+ Object.defineProperty(obj, 'value', { value: value, enumerable: true, writable: true, configurable: true });
+ Object.defineProperty(obj, 'done', { value: done, enumerable: true, writable: true, configurable: true });
+ return obj;
+}
+
export function InvokeOrNoop(O, P, args) {
const method = O[P];
if (method === undefined) {
diff --git a/reference-implementation/lib/readable-stream-abstract-ops.js b/reference-implementation/lib/readable-stream-abstract-ops.js
deleted file mode 100644
index 65abcb44d..000000000
--- a/reference-implementation/lib/readable-stream-abstract-ops.js
+++ /dev/null
@@ -1,299 +0,0 @@
-const assert = require('assert');
-import ExclusiveStreamReader from './exclusive-stream-reader';
-import { DequeueValue, EnqueueValueWithSize, GetTotalQueueSize } from './queue-with-sizes';
-import { PromiseInvokeOrNoop, typeIsObject } from './helpers';
-
-export function AcquireExclusiveStreamReader(stream) {
- if (stream._state === 'closed') {
- throw new TypeError('The stream has already been closed, so a reader cannot be acquired.');
- }
- if (stream._state === 'errored') {
- throw stream._storedError;
- }
-
- return new ExclusiveStreamReader(stream);
-}
-
-export function CallReadableStreamPull(stream) {
- if (stream._draining === true || stream._started === false ||
- stream._state === 'closed' || stream._state === 'errored' ||
- stream._pullScheduled === true) {
- return undefined;
- }
-
- if (stream._pullingPromise !== undefined) {
- stream._pullScheduled = true;
- stream._pullingPromise.then(() => {
- stream._pullScheduled = false;
- CallReadableStreamPull(stream);
- });
- return undefined;
- }
-
- const shouldApplyBackpressure = ShouldReadableStreamApplyBackpressure(stream);
- if (shouldApplyBackpressure === true) {
- return undefined;
- }
-
- stream._pullingPromise = PromiseInvokeOrNoop(stream._underlyingSource, 'pull', [stream._enqueue, stream._close]);
- stream._pullingPromise.then(
- () => { stream._pullingPromise = undefined; },
- e => { stream._error(e); }
- );
-
- return undefined;
-}
-
-export function CancelReadableStream(stream, reason) {
- if (stream._state === 'closed' || stream._state === 'errored') {
- return stream._closedPromise;
- }
-
- stream._queue = [];
- CloseReadableStream(stream);
-
- const sourceCancelPromise = PromiseInvokeOrNoop(stream._underlyingSource, 'cancel', [reason]);
- return sourceCancelPromise.then(() => undefined);
-}
-
-function CloseReadableStream(stream) {
- if (IsReadableStreamLocked(stream)) {
- CloseReadableStreamReader(stream._readableStreamReader);
-
- stream._readableStreamReader = undefined;
-
- // rs.ready() was pending because there was a reader.
- stream._resolveReadyPromise(undefined);
- } else if (stream._state === 'waiting') {
- stream._resolveReadyPromise(undefined);
- }
-
- stream._resolveClosedPromise(undefined);
-
- stream._state = 'closed';
-
- return undefined;
-}
-
-export function CloseReadableStreamReader(reader) {
- if (reader._state === 'waiting') {
- reader._resolveReadyPromise(undefined);
- }
- reader._resolveClosedPromise(undefined);
- reader._state = 'closed';
-}
-
-export function CreateReadableStreamCloseFunction(stream) {
- return () => {
- if (stream._state === 'waiting') {
- CloseReadableStream(stream);
- }
- if (stream._state === 'readable') {
- stream._draining = true;
- }
- };
-}
-
-export function CreateReadableStreamEnqueueFunction(stream) {
- return chunk => {
- if (stream._state === 'errored') {
- throw stream._storedError;
- }
-
- if (stream._state === 'closed') {
- throw new TypeError('stream is closed');
- }
-
- if (stream._draining === true) {
- throw new TypeError('stream is draining');
- }
-
- let chunkSize = 1;
-
- let strategy;
- try {
- strategy = stream._underlyingSource.strategy;
- } catch (strategyE) {
- stream._error(strategyE);
- throw strategyE;
- }
-
- if (strategy !== undefined) {
- try {
- chunkSize = strategy.size(chunk);
- } catch (chunkSizeE) {
- stream._error(chunkSizeE);
- throw chunkSizeE;
- }
- }
-
- try {
- EnqueueValueWithSize(stream._queue, chunk, chunkSize);
- } catch (enqueueE) {
- stream._error(enqueueE);
- throw enqueueE;
- }
-
-
- const shouldApplyBackpressure = ShouldReadableStreamApplyBackpressure(stream);
-
- if (stream._state === 'waiting') {
- MarkReadableStreamReadable(stream);
- }
-
- if (shouldApplyBackpressure === true) {
- return false;
- }
- return true;
- };
-}
-
-export function CreateReadableStreamErrorFunction(stream) {
- return e => {
- if (stream._state === 'closed' || stream._state === 'errored') {
- return;
- }
-
- if (stream._state === 'readable') {
- stream._queue = [];
- }
-
- if (IsReadableStreamLocked(stream)) {
- if (stream._state === 'waiting') {
- stream._readableStreamReader._resolveReadyPromise(undefined);
- }
-
- // rs.ready() was pending because there was a reader.
- stream._resolveReadyPromise(undefined);
-
- stream._readableStreamReader._rejectClosedPromise(e);
-
- stream._readableStreamReader._state = 'errored';
-
- stream._readableStreamReader = undefined;
- } else if (stream._state === 'waiting') {
- stream._resolveReadyPromise(undefined);
- }
- stream._rejectClosedPromise(e);
-
- stream._storedError = e;
- stream._state = 'errored';
-
- return undefined;
- };
-}
-
-export function IsExclusiveStreamReader(x) {
- if (!typeIsObject(x)) {
- return false;
- }
-
- if (!Object.prototype.hasOwnProperty.call(x, '_encapsulatedReadableStream')) {
- return false;
- }
-
- return true;
-}
-
-export function IsReadableStreamLocked(stream) {
- assert(IsReadableStream(stream) === true, 'IsReadableStreamLocked should only be used on known readable streams');
-
- if (stream._readableStreamReader === undefined) {
- return false;
- }
-
- return true;
-}
-
-export function IsReadableStream(x) {
- if (!typeIsObject(x)) {
- return false;
- }
-
- if (!Object.prototype.hasOwnProperty.call(x, '_underlyingSource')) {
- return false;
- }
-
- return true;
-}
-
-function MarkReadableStreamReadable(stream) {
- if (IsReadableStreamLocked(stream)) {
- stream._readableStreamReader._resolveReadyPromise(undefined);
-
- stream._readableStreamReader._state = 'readable';
- } else {
- stream._resolveReadyPromise(undefined);
- }
-
- stream._state = 'readable';
-
- return undefined;
-}
-
-function MarkReadableStreamWaiting(stream) {
- if (IsReadableStreamLocked(stream)) {
- stream._readableStreamReader._initReadyPromise();
-
- stream._readableStreamReader._state = 'waiting';
- } else {
- stream._initReadyPromise();
- }
-
- stream._state = 'waiting';
-
- return undefined;
-}
-
-export function ReadFromReadableStream(stream) {
- if (stream._state === 'waiting') {
- throw new TypeError('no chunks available (yet)');
- }
- if (stream._state === 'closed') {
- throw new TypeError('stream has already been consumed');
- }
- if (stream._state === 'errored') {
- throw stream._storedError;
- }
-
- assert(stream._state === 'readable', `stream state ${stream._state} is invalid`);
- assert(stream._queue.length > 0, 'there must be chunks available to read');
-
- const chunk = DequeueValue(stream._queue);
-
- if (stream._queue.length === 0) {
- if (stream._draining === true) {
- CloseReadableStream(stream);
- } else {
- MarkReadableStreamWaiting(stream);
- }
- }
-
- CallReadableStreamPull(stream);
-
- return chunk;
-}
-
-export function ShouldReadableStreamApplyBackpressure(stream) {
- const queueSize = GetTotalQueueSize(stream._queue);
- let shouldApplyBackpressure = queueSize > 1;
-
- let strategy;
- try {
- strategy = stream._underlyingSource.strategy;
- } catch (strategyE) {
- stream._error(strategyE);
- throw strategyE;
- }
-
- if (strategy !== undefined) {
- try {
- shouldApplyBackpressure = Boolean(strategy.shouldApplyBackpressure(queueSize));
- } catch (shouldApplyBackpressureE) {
- stream._error(shouldApplyBackpressureE);
- throw shouldApplyBackpressureE;
- }
- }
-
- return shouldApplyBackpressure;
-}
diff --git a/reference-implementation/lib/readable-stream.js b/reference-implementation/lib/readable-stream.js
index 1ab33e700..4b716a34f 100644
--- a/reference-implementation/lib/readable-stream.js
+++ b/reference-implementation/lib/readable-stream.js
@@ -1,27 +1,25 @@
const assert = require('assert');
-import * as helpers from './helpers';
-import { AcquireExclusiveStreamReader, CallReadableStreamPull, CancelReadableStream, CreateReadableStreamCloseFunction,
- CreateReadableStreamEnqueueFunction, CreateReadableStreamErrorFunction, IsReadableStream, IsReadableStreamLocked,
- ReadFromReadableStream, ShouldReadableStreamApplyBackpressure } from './readable-stream-abstract-ops';
+import { CreateIterResultObject, InvokeOrNoop, PromiseInvokeOrNoop, typeIsObject } from './helpers';
+import { DequeueValue, EnqueueValueWithSize, GetTotalQueueSize } from './queue-with-sizes';
export default class ReadableStream {
constructor(underlyingSource = {}) {
this._underlyingSource = underlyingSource;
- this._initReadyPromise();
this._initClosedPromise();
this._queue = [];
- this._state = 'waiting';
+ this._state = 'readable';
this._started = false;
this._draining = false;
this._pullScheduled = false;
+ this._reader = undefined;
this._pullingPromise = undefined;
- this._readableStreamReader = undefined;
+ this._storedError = undefined;
this._enqueue = CreateReadableStreamEnqueueFunction(this);
this._close = CreateReadableStreamCloseFunction(this);
this._error = CreateReadableStreamErrorFunction(this);
- const startResult = helpers.InvokeOrNoop(underlyingSource, 'start', [this._enqueue, this._close, this._error]);
+ const startResult = InvokeOrNoop(underlyingSource, 'start', [this._enqueue, this._close, this._error]);
Promise.resolve(startResult).then(
() => {
this._started = true;
@@ -32,55 +30,30 @@ export default class ReadableStream {
}
get closed() {
- if (!IsReadableStream(this)) {
+ if (IsReadableStream(this) === false) {
return Promise.reject(new TypeError('ReadableStream.prototype.closed can only be used on a ReadableStream'));
}
return this._closedPromise;
}
- get state() {
- if (!IsReadableStream(this)) {
- throw new TypeError('ReadableStream.prototype.state can only be used on a ReadableStream');
- }
-
- if (IsReadableStreamLocked(this)) {
- return 'waiting';
- }
-
- return this._state;
- }
-
cancel(reason) {
- if (!IsReadableStream(this)) {
+ if (IsReadableStream(this) === false) {
return Promise.reject(new TypeError('ReadableStream.prototype.cancel can only be used on a ReadableStream'));
}
- if (IsReadableStreamLocked(this)) {
- return Promise.reject(
- new TypeError('This stream is locked to a single exclusive reader and cannot be cancelled directly'));
- }
-
return CancelReadableStream(this, reason);
}
getReader() {
- if (!IsReadableStream(this)) {
+ if (IsReadableStream(this) === false) {
throw new TypeError('ReadableStream.prototype.getReader can only be used on a ReadableStream');
}
- return AcquireExclusiveStreamReader(this);
+ return AcquireReadableStreamReader(this);
}
pipeThrough({ writable, readable }, options) {
- if (!helpers.typeIsObject(writable)) {
- throw new TypeError('A transform stream must have an writable property that is an object.');
- }
-
- if (!helpers.typeIsObject(readable)) {
- throw new TypeError('A transform stream must have a readable property that is an object.');
- }
-
this.pipeTo(writable, options);
return readable;
}
@@ -90,7 +63,11 @@ export default class ReadableStream {
preventAbort = Boolean(preventAbort);
preventCancel = Boolean(preventCancel);
- let source;
+ const source = this;
+
+ let reader;
+ let lastRead;
+ let closedPurposefully = false;
let resolvePipeToPromise;
let rejectPipeToPromise;
@@ -98,57 +75,60 @@ export default class ReadableStream {
resolvePipeToPromise = resolve;
rejectPipeToPromise = reject;
- source = this.getReader();
+ reader = source.getReader();
+
+ source.closed.catch(abortDest);
+ dest.closed.then(
+ () => {
+ if (!closedPurposefully) {
+ cancelSource(new TypeError('destination is closing or closed and cannot be piped to anymore'));
+ }
+ },
+ cancelSource
+ );
+
doPipe();
});
function doPipe() {
- for (;;) {
- const ds = dest.state;
- if (ds === 'writable') {
- if (source.state === 'readable') {
- dest.write(source.read());
- continue;
- } else if (source.state === 'waiting') {
- Promise.race([source.ready, dest.closed]).then(doPipe, doPipe);
- } else if (source.state === 'errored') {
- source.closed.catch(abortDest);
- } else if (source.state === 'closed') {
- closeDest();
- }
- } else if (ds === 'waiting') {
- if (source.state === 'readable') {
- Promise.race([source.closed, dest.ready]).then(doPipe, doPipe);
- } else if (source.state === 'waiting') {
- Promise.race([source.ready, dest.ready]).then(doPipe);
- } else if (source.state === 'errored') {
- source.closed.catch(abortDest);
- } else if (source.state === 'closed') {
- closeDest();
- }
- } else if (ds === 'errored' && (source.state === 'readable' || source.state === 'waiting')) {
- dest.closed.catch(cancelSource);
- } else if ((ds === 'closing' || ds === 'closed') &&
- (source.state === 'readable' || source.state === 'waiting')) {
- cancelSource(new TypeError('destination is closing or closed and cannot be piped to anymore'));
+ lastRead = reader.read();
+ Promise.all([lastRead, dest.ready]).then(([{ value, done }]) => {
+ if (Boolean(done) === true) {
+ closeDest();
+ } else if (dest.state === 'writable') {
+ dest.write(value);
+ doPipe();
}
- return;
- }
+ });
+
+ // Any failures will be handled by listening to source.closed and dest.closed above.
+ // TODO: handle malicious dest.write/dest.close?
}
function cancelSource(reason) {
if (preventCancel === false) {
- // implicitly releases the lock
+ // cancelling automatically releases the lock (and that doesn't fail, since source is then closed)
source.cancel(reason);
+ rejectPipeToPromise(reason);
} else {
- source.releaseLock();
+ // If we don't cancel, we need to wait for lastRead to finish before we're allowed to release.
+ // We don't need to handle lastRead failing because that will trigger abortDest which takes care of
+ // both of these.
+ lastRead.then(() => {
+ reader.releaseLock();
+ rejectPipeToPromise(reason);
+ });
}
- rejectPipeToPromise(reason);
}
function closeDest() {
- source.releaseLock();
- if (preventClose === false) {
+ // Does not need to wait for lastRead since it occurs only on source closed.
+
+ reader.releaseLock();
+
+ const destState = dest.state;
+ if (preventClose === false && (destState === 'waiting' || destState === 'writable')) {
+ closedPurposefully = true;
dest.close().then(resolvePipeToPromise, rejectPipeToPromise);
} else {
resolvePipeToPromise();
@@ -156,7 +136,10 @@ export default class ReadableStream {
}
function abortDest(reason) {
- source.releaseLock();
+ // Does not need to wait for lastRead since it only occurs on source errored.
+
+ reader.releaseLock();
+
if (preventAbort === false) {
dest.abort(reason);
}
@@ -164,59 +147,360 @@ export default class ReadableStream {
}
}
+
+ // Note: The resolve function and reject function are cleared when the corresponding promise is resolved or rejected.
+ // This is for debugging. This makes extra resolve/reject calls for the same promise fail so that we can detect
+ // unexpected extra resolve/reject calls that may be caused by bugs in the algorithm.
+
+ _initClosedPromise() {
+ this._closedPromise = new Promise((resolve, reject) => {
+ this._closedPromise_resolve = resolve;
+ this._closedPromise_reject = reject;
+ });
+ }
+
+ _resolveClosedPromise(value) {
+ this._closedPromise_resolve(value);
+ this._closedPromise_resolve = null;
+ this._closedPromise_reject = null;
+ }
+
+ _rejectClosedPromise(reason) {
+ this._closedPromise_reject(reason);
+ this._closedPromise_resolve = null;
+ this._closedPromise_reject = null;
+ }
+};
+
+class ReadableStreamReader {
+ constructor(stream) {
+ if (IsReadableStream(stream) === false) {
+ throw new TypeError('ReadableStreamReader can only be constructed with a ReadableStream instance');
+ }
+ if (stream._state === 'closed') {
+ throw new TypeError('The stream has already been closed, so a reader cannot be acquired');
+ }
+ if (stream._state === 'errored') {
+ throw stream._storedError;
+ }
+ if (IsReadableStreamLocked(stream) === true) {
+ throw new TypeError('This stream has already been locked for exclusive reading by another reader');
+ }
+
+ stream._reader = this;
+ this._ownerReadableStream = stream;
+
+ this._readRequests = [];
+
+ this._closedPromise = new Promise((resolve, reject) => {
+ this._closedPromise_resolve = resolve;
+ this._closedPromise_reject = reject;
+ });
+ }
+
+ get closed() {
+ if (IsReadableStreamReader(this) === false) {
+ return Promise.reject(
+ new TypeError('ReadableStreamReader.prototype.closed can only be used on a ReadableStreamReader'));
+ }
+
+ return this._closedPromise;
+ }
+
+ get isActive() {
+ if (IsReadableStreamReader(this) === false) {
+ throw new TypeError('ReadableStreamReader.prototype.isActive can only be used on a ReadableStreamReader');
+ }
+
+ return this._ownerReadableStream !== undefined;
+ }
+
+ cancel(reason) {
+ if (IsReadableStreamReader(this) === false) {
+ return Promise.reject(
+ new TypeError('ReadableStreamReader.prototype.cancel can only be used on a ReadableStreamReader'));
+ }
+
+ if (this._ownerReadableStream === undefined) {
+ return Promise.resolve(undefined);
+ }
+
+ return CancelReadableStream(this._ownerReadableStream, reason);
+ }
+
read() {
- if (!IsReadableStream(this)) {
- throw new TypeError('ReadableStream.prototype.read can only be used on a ReadableStream');
+ if (IsReadableStreamReader(this) === false) {
+ return Promise.reject(
+ new TypeError('ReadableStreamReader.prototype.read can only be used on a ReadableStreamReader'));
+ }
+
+ if (this._ownerReadableStream === undefined || this._ownerReadableStream._state === 'closed') {
+ return Promise.resolve(CreateIterResultObject(undefined, true));
}
- if (IsReadableStreamLocked(this)) {
- throw new TypeError('This stream is locked to a single exclusive reader and cannot be read from directly');
+ if (this._ownerReadableStream._state === 'errored') {
+ return Promise.reject(this._ownerReadableStream._storedError);
}
- return ReadFromReadableStream(this);
+ if (this._ownerReadableStream._queue.length > 0) {
+ const chunk = DequeueValue(this._ownerReadableStream._queue);
+
+ if (this._ownerReadableStream._draining === true && this._ownerReadableStream._queue.length === 0) {
+ CloseReadableStream(this._ownerReadableStream);
+ } else {
+ CallReadableStreamPull(this._ownerReadableStream);
+ }
+
+ return Promise.resolve(CreateIterResultObject(chunk, false));
+ } else {
+ const readRequest = {};
+ readRequest.promise = new Promise((resolve, reject) => {
+ readRequest._resolve = resolve;
+ readRequest._reject = reject;
+ });
+
+ this._readRequests.push(readRequest);
+ return readRequest.promise;
+ }
}
- get ready() {
- if (!IsReadableStream(this)) {
- return Promise.reject(new TypeError('ReadableStream.prototype.ready can only be used on a ReadableStream'));
+ releaseLock() {
+ if (IsReadableStreamReader(this) === false) {
+ throw new TypeError('ReadableStreamReader.prototype.releaseLock can only be used on a ReadableStreamReader');
+ }
+
+ if (this._ownerReadableStream === undefined) {
+ return undefined;
}
- return this._readyPromise;
+ if (this._readRequests.length > 0) {
+ throw new TypeError('Tried to release a reader lock when that reader has pending read() calls un-settled');
+ }
+
+ ReleaseReadableStreamReader(this);
}
+}
- _initReadyPromise() {
- this._readyPromise = new Promise((resolve, reject) => {
- this._readyPromise_resolve = resolve;
- });
+function AcquireReadableStreamReader(stream) {
+ return new ReadableStreamReader(stream);
+}
+
+function CallReadableStreamPull(stream) {
+ if (stream._draining === true || stream._started === false ||
+ stream._state === 'closed' || stream._state === 'errored' ||
+ stream._pullScheduled === true) {
+ return undefined;
}
- _initClosedPromise() {
- this._closedPromise = new Promise((resolve, reject) => {
- this._closedPromise_resolve = resolve;
- this._closedPromise_reject = reject;
+ if (stream._pullingPromise !== undefined) {
+ stream._pullScheduled = true;
+ stream._pullingPromise.then(() => {
+ stream._pullScheduled = false;
+ CallReadableStreamPull(stream);
});
+ return undefined;
+ }
+
+ const shouldApplyBackpressure = ShouldReadableStreamApplyBackpressure(stream);
+ if (shouldApplyBackpressure === true) {
+ return undefined;
}
- // Note: The resolve function and reject function are cleared when the
- // corresponding promise is resolved or rejected. This is for debugging. This
- // makes extra resolve/reject calls for the same promise fail so that we can
- // detect unexpected extra resolve/reject calls that may be caused by bugs in
- // the algorithm.
+ stream._pullingPromise = PromiseInvokeOrNoop(stream._underlyingSource, 'pull', [stream._enqueue, stream._close]);
+ stream._pullingPromise.then(
+ () => { stream._pullingPromise = undefined; },
+ e => { stream._error(e); }
+ );
- _resolveReadyPromise(value) {
- this._readyPromise_resolve(value);
- this._readyPromise_resolve = null;
+ return undefined;
+}
+
+function CancelReadableStream(stream, reason) {
+ if (stream._state === 'closed') {
+ return Promise.resolve(undefined);
+ }
+ if (stream._state === 'errored') {
+ return Promise.reject(stream._storedError);
}
- _resolveClosedPromise(value) {
- this._closedPromise_resolve(value);
- this._closedPromise_resolve = null;
- this._closedPromise_reject = null;
+ stream._queue = [];
+ CloseReadableStream(stream);
+
+ const sourceCancelPromise = PromiseInvokeOrNoop(stream._underlyingSource, 'cancel', [reason]);
+ return sourceCancelPromise.then(() => undefined);
+}
+
+function CloseReadableStream(stream) {
+ assert(stream._state === 'readable');
+
+ stream._resolveClosedPromise(undefined);
+ stream._state = 'closed';
+
+ if (IsReadableStreamLocked(stream) === true) {
+ return ReleaseReadableStreamReader(stream._reader);
}
- _rejectClosedPromise(reason) {
- this._closedPromise_reject(reason);
- this._closedPromise_resolve = null;
- this._closedPromise_reject = null;
+ return undefined;
+}
+
+function CreateReadableStreamCloseFunction(stream) {
+ return () => {
+ if (stream._state !== 'readable') {
+ return undefined;
+ }
+
+ if (stream._queue.length === 0) {
+ return CloseReadableStream(stream);
+ }
+
+ stream._draining = true;
+ };
+}
+
+function CreateReadableStreamEnqueueFunction(stream) {
+ return chunk => {
+ if (stream._state === 'errored') {
+ throw stream._storedError;
+ }
+
+ if (stream._state === 'closed') {
+ throw new TypeError('stream is closed');
+ }
+
+ if (stream._draining === true) {
+ throw new TypeError('stream is draining');
+ }
+
+ if (IsReadableStreamLocked(stream) === true && stream._reader._readRequests.length > 0) {
+ const readRequest = stream._reader._readRequests.shift();
+ readRequest._resolve(CreateIterResultObject(chunk, false));
+ } else {
+ let chunkSize = 1;
+
+ let strategy;
+ try {
+ strategy = stream._underlyingSource.strategy;
+ } catch (strategyE) {
+ stream._error(strategyE);
+ throw strategyE;
+ }
+
+ if (strategy !== undefined) {
+ try {
+ chunkSize = strategy.size(chunk);
+ } catch (chunkSizeE) {
+ stream._error(chunkSizeE);
+ throw chunkSizeE;
+ }
+ }
+
+ try {
+ EnqueueValueWithSize(stream._queue, chunk, chunkSize);
+ } catch (enqueueE) {
+ stream._error(enqueueE);
+ throw enqueueE;
+ }
+ }
+
+ CallReadableStreamPull(stream);
+
+ const shouldApplyBackpressure = ShouldReadableStreamApplyBackpressure(stream);
+ if (shouldApplyBackpressure === true) {
+ return false;
+ }
+ return true;
+ };
+}
+
+function CreateReadableStreamErrorFunction(stream) {
+ return e => {
+ if (stream._state !== 'readable') {
+ return;
+ }
+
+ stream._queue = [];
+ stream._rejectClosedPromise(e);
+ stream._storedError = e;
+ stream._state = 'errored';
+
+ if (IsReadableStreamLocked(stream) === true) {
+ stream._reader._closedPromise_reject(e);
+
+ for (const { _reject } of stream._reader._readRequests) {
+ _reject(e);
+ }
+ stream._reader._readRequests = [];
+ }
+ };
+}
+
+function IsReadableStream(x) {
+ if (!typeIsObject(x)) {
+ return false;
+ }
+
+ if (!Object.prototype.hasOwnProperty.call(x, '_underlyingSource')) {
+ return false;
+ }
+
+ return true;
+}
+
+function IsReadableStreamLocked(stream) {
+ assert(IsReadableStream(stream) === true, 'IsReadableStreamLocked should only be used on known readable streams');
+
+ if (stream._reader === undefined) {
+ return false;
+ }
+
+ return true;
+}
+
+function IsReadableStreamReader(x) {
+ if (!typeIsObject(x)) {
+ return false;
+ }
+
+ if (!Object.prototype.hasOwnProperty.call(x, '_ownerReadableStream')) {
+ return false;
+ }
+
+ return true;
+}
+
+function ReleaseReadableStreamReader(reader) {
+ assert(reader._ownerReadableStream !== undefined);
+
+ for (const { _resolve } of reader._readRequests) {
+ _resolve(CreateIterResultObject(undefined, true));
}
+ reader._readRequests = [];
+
+ reader._ownerReadableStream._reader = undefined;
+ reader._ownerReadableStream = undefined;
+ reader._closedPromise_resolve(undefined);
+}
+
+function ShouldReadableStreamApplyBackpressure(stream) {
+ const queueSize = GetTotalQueueSize(stream._queue);
+ let shouldApplyBackpressure = queueSize > 1;
+
+ let strategy;
+ try {
+ strategy = stream._underlyingSource.strategy;
+ } catch (strategyE) {
+ stream._error(strategyE);
+ throw strategyE;
+ }
+
+ if (strategy !== undefined) {
+ try {
+ shouldApplyBackpressure = Boolean(strategy.shouldApplyBackpressure(queueSize));
+ } catch (shouldApplyBackpressureE) {
+ stream._error(shouldApplyBackpressureE);
+ throw shouldApplyBackpressureE;
+ }
+ }
+
+ return shouldApplyBackpressure;
}
diff --git a/reference-implementation/lib/transform-stream.js b/reference-implementation/lib/transform-stream.js
index 6ad82b6a7..c3ef2bff6 100644
--- a/reference-implementation/lib/transform-stream.js
+++ b/reference-implementation/lib/transform-stream.js
@@ -19,9 +19,7 @@ export default class TransformStream {
chunkWrittenButNotYetTransformed = true;
const p = new Promise(resolve => writeDone = resolve);
- if (readable.state === 'waiting') {
- maybeDoTransform();
- }
+ maybeDoTransform();
return p;
},
close() {
diff --git a/reference-implementation/run-tests.js b/reference-implementation/run-tests.js
index 7d6e1463d..c68ed73b7 100644
--- a/reference-implementation/run-tests.js
+++ b/reference-implementation/run-tests.js
@@ -16,6 +16,12 @@ global.CountQueuingStrategy = CountQueuingStrategy;
global.TransformStream = TransformStream;
-const tests = glob.sync(path.resolve(__dirname, 'test/*.js'));
-const experimentalTests = glob.sync(path.resolve(__dirname, 'test/experimental/*.js'));
-tests.concat(experimentalTests).forEach(require);
+if (process.argv.length === 3) {
+ const tests = glob.sync(path.resolve(__dirname, 'test/*.js'));
+
+ // disable experimental tests while we figure out impact of async read on ReadableByteStream
+ const experimentalTests = []; // glob.sync(path.resolve(__dirname, 'test/experimental/*.js'));
+ tests.concat(experimentalTests).forEach(require);
+} else {
+ glob.sync(path.resolve(process.argv[3])).forEach(require);
+}
diff --git a/reference-implementation/test/bad-underlying-sources.js b/reference-implementation/test/bad-underlying-sources.js
index aecdf6f91..27562d725 100644
--- a/reference-implementation/test/bad-underlying-sources.js
+++ b/reference-implementation/test/bad-underlying-sources.js
@@ -1,6 +1,6 @@
const test = require('tape-catch');
-test('Throwing underlying source start getter', t => {
+test('Underlying source start: throwing getter', t => {
const theError = new Error('a unique string');
t.throws(() => {
@@ -9,11 +9,11 @@ test('Throwing underlying source start getter', t => {
throw theError;
}
});
- }, /a unique string/);
+ }, /a unique string/, 'constructing the stream should re-throw the error');
t.end();
});
-test('Throwing underlying source start method', t => {
+test('Underlying source start: throwing method', t => {
const theError = new Error('a unique string');
t.throws(() => {
@@ -22,11 +22,11 @@ test('Throwing underlying source start method', t => {
throw theError;
}
});
- }, /a unique string/);
+ }, /a unique string/, 'constructing the stream should re-throw the error');
t.end();
});
-test('Throwing underlying source pull getter (initial pull)', t => {
+test('Underlying source: throwing pull getter (initial pull)', t => {
t.plan(1);
const theError = new Error('a unique string');
@@ -42,7 +42,7 @@ test('Throwing underlying source pull getter (initial pull)', t => {
);
});
-test('Throwing underlying source pull method (initial pull)', t => {
+test('Underlying source: throwing pull method (initial pull)', t => {
t.plan(1);
const theError = new Error('a unique string');
@@ -58,8 +58,8 @@ test('Throwing underlying source pull method (initial pull)', t => {
);
});
-test('Throwing underlying source pull getter (second pull)', t => {
- t.plan(3);
+test('Underlying source: throwing pull getter (second pull)', t => {
+ t.plan(2);
const theError = new Error('a unique string');
let counter = 0;
@@ -74,10 +74,7 @@ test('Throwing underlying source pull getter (second pull)', t => {
}
});
- rs.ready.then(() => {
- t.equal(rs.state, 'readable', 'sanity check: the stream becomes readable without issue');
- t.equal(rs.read(), 'a', 'the initially-enqueued chunk can be read from the stream');
- });
+ rs.getReader().read().then(r => t.deepEqual(r, { value: 'a', done: false }, 'the chunk read should be correct'));
rs.closed.then(
() => t.fail('closed should not fulfill'),
@@ -85,8 +82,8 @@ test('Throwing underlying source pull getter (second pull)', t => {
);
});
-test('Throwing underlying source pull method (second pull)', t => {
- t.plan(3);
+test('Underlying source: throwing pull method (second pull)', t => {
+ t.plan(2);
const theError = new Error('a unique string');
let counter = 0;
@@ -101,10 +98,7 @@ test('Throwing underlying source pull method (second pull)', t => {
}
});
- rs.ready.then(() => {
- t.equal(rs.state, 'readable', 'sanity check: the stream becomes readable without issue');
- t.equal(rs.read(), 'a', 'the initially-enqueued chunk can be read from the stream');
- });
+ rs.getReader().read().then(r => t.deepEqual(r, { value: 'a', done: false }, 'the chunk read should be correct'));
rs.closed.then(
() => t.fail('closed should not fulfill'),
@@ -112,7 +106,7 @@ test('Throwing underlying source pull method (second pull)', t => {
);
});
-test('Throwing underlying source cancel getter', t => {
+test('Underlying source: throwing cancel getter', t => {
t.plan(1);
const theError = new Error('a unique string');
@@ -128,7 +122,7 @@ test('Throwing underlying source cancel getter', t => {
);
});
-test('Throwing underlying source cancel method', t => {
+test('Underlying source: throwing cancel method', t => {
t.plan(1);
const theError = new Error('a unique string');
@@ -144,30 +138,30 @@ test('Throwing underlying source cancel method', t => {
);
});
-test('Throwing underlying source strategy getter', t => {
+test('Underlying source: throwing strategy getter', t => {
t.plan(2);
const theError = new Error('a unique string');
const rs = new ReadableStream({
start(enqueue) {
- t.throws(() => enqueue('a'), /a unique string/);
+ t.throws(() => enqueue('a'), /a unique string/, 'enqueue should throw the error');
},
get strategy() {
throw theError;
}
});
- t.equal(rs.state, 'errored', 'state should be errored');
+ rs.closed.catch(e => t.equal(e, theError, 'closed should reject with the error'));
});
-test('Throwing underlying source strategy.size getter', t => {
+test('Underlying source: throwing strategy.size getter', t => {
t.plan(2);
const theError = new Error('a unique string');
const rs = new ReadableStream({
start(enqueue) {
- t.throws(() => enqueue('a'), /a unique string/);
+ t.throws(() => enqueue('a'), /a unique string/, 'enqueue should throw the error');
},
strategy: {
get size() {
@@ -179,16 +173,16 @@ test('Throwing underlying source strategy.size getter', t => {
}
});
- t.equal(rs.state, 'errored', 'state should be errored');
+ rs.closed.catch(e => t.equal(e, theError, 'closed should reject with the error'));
});
-test('Throwing underlying source strategy.size method', t => {
+test('Underlying source: throwing strategy.size method', t => {
t.plan(2);
const theError = new Error('a unique string');
const rs = new ReadableStream({
start(enqueue) {
- t.throws(() => enqueue('a'), /a unique string/);
+ t.throws(() => enqueue('a'), /a unique string/, 'enqueue should throw the error');
},
strategy: {
size() {
@@ -200,16 +194,16 @@ test('Throwing underlying source strategy.size method', t => {
}
});
- t.equal(rs.state, 'errored', 'state should be errored');
+ rs.closed.catch(e => t.equal(e, theError, 'closed should reject with the error'));
});
-test('Throwing underlying source strategy.shouldApplyBackpressure getter', t => {
+test('Underlying source: throwing strategy.shouldApplyBackpressure getter', t => {
t.plan(2);
const theError = new Error('a unique string');
const rs = new ReadableStream({
start(enqueue) {
- t.throws(() => enqueue('a'), /a unique string/);
+ t.throws(() => enqueue('a'), /a unique string/, 'enqueue should throw the error');
},
strategy: {
size() {
@@ -221,16 +215,16 @@ test('Throwing underlying source strategy.shouldApplyBackpressure getter', t =>
}
});
- t.equal(rs.state, 'errored', 'state should be errored');
+ rs.closed.catch(e => t.equal(e, theError, 'closed should reject with the error'));
});
-test('Throwing underlying source strategy.shouldApplyBackpressure method', t => {
+test('Underlying source: throwing strategy.shouldApplyBackpressure method', t => {
t.plan(2);
const theError = new Error('a unique string');
const rs = new ReadableStream({
start(enqueue) {
- t.throws(() => enqueue('a'), /a unique string/);
+ t.throws(() => enqueue('a'), /a unique string/, 'enqueue should throw the error');
},
strategy: {
size() {
@@ -242,5 +236,86 @@ test('Throwing underlying source strategy.shouldApplyBackpressure method', t =>
}
});
- t.equal(rs.state, 'errored', 'state should be errored');
+ rs.closed.catch(e => t.equal(e, theError, 'closed should reject with the error'));
+});
+
+test('Underlying source: strategy.size returning NaN', t => {
+ t.plan(2);
+
+ let theError;
+ const rs = new ReadableStream({
+ start(enqueue) {
+ try {
+ enqueue('hi');
+ t.fail('enqueue didn\'t throw');
+ } catch (error) {
+ t.equal(error.constructor, RangeError, 'enqueue should throw a RangeError');
+ theError = error;
+ }
+ },
+ strategy: {
+ size() {
+ return NaN;
+ },
+ shouldApplyBackpressure() {
+ return true;
+ }
+ }
+ });
+
+ rs.closed.catch(e => t.equal(e, theError, 'closed should reject with the error'));
+});
+
+test('Underlying source: strategy.size returning -Infinity', t => {
+ t.plan(2);
+
+ let theError;
+ const rs = new ReadableStream({
+ start(enqueue) {
+ try {
+ enqueue('hi');
+ t.fail('enqueue didn\'t throw');
+ } catch (error) {
+ t.equal(error.constructor, RangeError, 'enqueue should throw a RangeError');
+ theError = error;
+ }
+ },
+ strategy: {
+ size() {
+ return -Infinity;
+ },
+ shouldApplyBackpressure() {
+ return true;
+ }
+ }
+ });
+
+ rs.closed.catch(e => t.equal(e, theError, 'closed should reject with the error'));
+});
+
+test('Underlying source: strategy.size returning +Infinity', t => {
+ t.plan(2);
+
+ let theError;
+ const rs = new ReadableStream({
+ start(enqueue) {
+ try {
+ enqueue('hi');
+ t.fail('enqueue didn\'t throw');
+ } catch (error) {
+ t.equal(error.constructor, RangeError, 'enqueue should throw a RangeError');
+ theError = error;
+ }
+ },
+ strategy: {
+ size() {
+ return +Infinity;
+ },
+ shouldApplyBackpressure() {
+ return true;
+ }
+ }
+ });
+
+ rs.closed.catch(e => t.equal(e, theError, 'closed should reject with the error'));
});
diff --git a/reference-implementation/test/brand-checks.js b/reference-implementation/test/brand-checks.js
index fb4103bb5..f94ab50c6 100644
--- a/reference-implementation/test/brand-checks.js
+++ b/reference-implementation/test/brand-checks.js
@@ -1,11 +1,11 @@
const test = require('tape-catch');
-let ExclusiveStreamReader;
+let ReadableStreamReader;
-test('Can get the ExclusiveStreamReader constructor indirectly', t => {
+test('Can get the ReadableStreamReader constructor indirectly', t => {
t.doesNotThrow(() => {
// It's not exposed globally, but we test a few of its properties here.
- ExclusiveStreamReader = (new ReadableStream()).getReader().constructor;
+ ReadableStreamReader = (new ReadableStream()).getReader().constructor;
});
t.end();
});
@@ -13,13 +13,10 @@ test('Can get the ExclusiveStreamReader constructor indirectly', t => {
function fakeReadableStream() {
return {
get closed() { return Promise.resolve(); },
- get ready() { return Promise.resolve(); },
- get state() { return 'closed' },
cancel(reason) { return Promise.resolve(); },
- getReader() { return new ExclusiveStreamReader(new ReadableStream()); },
pipeThrough({ writable, readable }, options) { return readable; },
pipeTo(dest, { preventClose, preventAbort, preventCancel } = {}) { return Promise.resolve(); },
- read() { return ''; }
+ getReader() { return new ReadableStream(new ReadableStream()); }
};
}
@@ -42,14 +39,12 @@ function realWritableStream() {
return new WritableStream();
}
-function fakeExclusiveStreamReader() {
+function fakeReadableStreamReader() {
return {
get closed() { return Promise.resolve(); },
get isActive() { return false; },
- get ready() { return Promise.resolve(); },
- get state() { return 'closed' },
cancel(reason) { return Promise.resolve(); },
- read() { return ''; },
+ read() { return Promise.resolve({ value: undefined, done: true }); },
releaseLock() { return; }
};
}
@@ -120,18 +115,6 @@ test('ReadableStream.prototype.closed enforces a brand check', t => {
getterRejects(t, ReadableStream.prototype, 'closed', realWritableStream());
});
-test('ReadableStream.prototype.ready enforces a brand check', t => {
- t.plan(2);
- getterRejects(t, ReadableStream.prototype, 'ready', fakeReadableStream());
- getterRejects(t, ReadableStream.prototype, 'ready', realWritableStream());
-});
-
-test('ReadableStream.prototype.state enforces a brand check', t => {
- t.plan(2);
- getterThrows(t, ReadableStream.prototype, 'state', fakeReadableStream());
- getterThrows(t, ReadableStream.prototype, 'state', realWritableStream());
-});
-
test('ReadableStream.prototype.cancel enforces a brand check', t => {
t.plan(2);
methodRejects(t, ReadableStream.prototype, 'cancel', fakeReadableStream());
@@ -170,52 +153,43 @@ test('ReadableStream.prototype.pipeTo works generically on its this and its argu
t.doesNotThrow(() => ReadableStream.prototype.pipeTo.call(fakeReadableStream(), fakeWritableStream()));
});
-test('ReadableStream.prototype.read enforces a brand check', t => {
- t.plan(2);
- methodThrows(t, ReadableStream.prototype, 'read', fakeReadableStream());
- methodThrows(t, ReadableStream.prototype, 'read', realWritableStream());
-});
-test('ExclusiveStreamReader enforces a brand check on its argument', t => {
- t.plan(1);
- t.throws(() => new ExclusiveStreamReader(fakeReadableStream()), /TypeError/, 'Contructing an ExclusiveStreamReader ' +
- 'should throw');
-});
-test('ExclusiveStreamReader.prototype.closed enforces a brand check', t => {
+test('ReadableStreamReader enforces a brand check on its argument', t => {
t.plan(1);
- getterRejects(t, ExclusiveStreamReader.prototype, 'closed', fakeExclusiveStreamReader());
-});
-
-test('ExclusiveStreamReader.prototype.isActive enforces a brand check', t => {
- t.plan(1);
- getterThrows(t, ExclusiveStreamReader.prototype, 'isActive', fakeExclusiveStreamReader());
+ t.throws(() => new ReadableStreamReader(fakeReadableStream()), /TypeError/, 'Contructing a ReadableStreamReader ' +
+ 'should throw');
});
-test('ExclusiveStreamReader.prototype.ready enforces a brand check', t => {
- t.plan(1);
- getterRejects(t, ExclusiveStreamReader.prototype, 'ready', fakeExclusiveStreamReader());
+test('ReadableStreamReader.prototype.closed enforces a brand check', t => {
+ t.plan(2);
+ getterRejects(t, ReadableStreamReader.prototype, 'closed', fakeReadableStreamReader());
+ getterRejects(t, ReadableStreamReader.prototype, 'closed', realReadableStream());
});
-test('ExclusiveStreamReader.prototype.state enforces a brand check', t => {
- t.plan(1);
- getterThrows(t, ExclusiveStreamReader.prototype, 'state', fakeExclusiveStreamReader());
+test('ReadableStreamReader.prototype.isActive enforces a brand check', t => {
+ t.plan(2);
+ getterThrows(t, ReadableStreamReader.prototype, 'isActive', fakeReadableStreamReader());
+ getterThrows(t, ReadableStreamReader.prototype, 'isActive', realReadableStream());
});
-test('ExclusiveStreamReader.prototype.cancel enforces a brand check', t => {
- t.plan(1);
- methodRejects(t, ExclusiveStreamReader.prototype, 'cancel', fakeExclusiveStreamReader());
+test('ReadableStreamReader.prototype.cancel enforces a brand check', t => {
+ t.plan(2);
+ methodRejects(t, ReadableStreamReader.prototype, 'cancel', fakeReadableStreamReader());
+ methodRejects(t, ReadableStreamReader.prototype, 'cancel', realReadableStream());
});
-test('ExclusiveStreamReader.prototype.read enforces a brand check', t => {
- t.plan(1);
- methodThrows(t, ExclusiveStreamReader.prototype, 'read', fakeExclusiveStreamReader());
+test('ReadableStreamReader.prototype.read enforces a brand check', t => {
+ t.plan(2);
+ methodRejects(t, ReadableStreamReader.prototype, 'read', fakeReadableStreamReader());
+ methodRejects(t, ReadableStreamReader.prototype, 'read', realReadableStream());
});
-test('ExclusiveStreamReader.prototype.releaseLock enforces a brand check', t => {
- t.plan(1);
- methodThrows(t, ExclusiveStreamReader.prototype, 'releaseLock', fakeExclusiveStreamReader());
+test('ReadableStreamReader.prototype.releaseLock enforces a brand check', t => {
+ t.plan(2);
+ methodThrows(t, ReadableStreamReader.prototype, 'releaseLock', fakeReadableStreamReader());
+ methodThrows(t, ReadableStreamReader.prototype, 'releaseLock', realReadableStream());
});
diff --git a/reference-implementation/test/count-queuing-strategy.js b/reference-implementation/test/count-queuing-strategy.js
index c68035218..f2bf98798 100644
--- a/reference-implementation/test/count-queuing-strategy.js
+++ b/reference-implementation/test/count-queuing-strategy.js
@@ -30,25 +30,42 @@ test('Correctly governs the return value of a ReadableStream\'s enqueue function
start(enqueue_) { enqueue = enqueue_; },
strategy: new CountQueuingStrategy({ highWaterMark: 0 })
});
+ const reader = rs.getReader();
t.equal(enqueue('a'), false, 'After 0 reads, 1st enqueue should return false (queue now contains 1 chunk)');
t.equal(enqueue('b'), false, 'After 0 reads, 2nd enqueue should return false (queue now contains 2 chunks)');
t.equal(enqueue('c'), false, 'After 0 reads, 3rd enqueue should return false (queue now contains 3 chunks)');
t.equal(enqueue('d'), false, 'After 0 reads, 4th enqueue should return false (queue now contains 4 chunks)');
- t.equal(rs.read(), 'a', '1st read gives back the 1st chunk enqueued (queue now contains 3 chunks)');
- t.equal(rs.read(), 'b', '2nd read gives back the 2nd chunk enqueued (queue now contains 2 chunks)');
- t.equal(rs.read(), 'c', '3rd read gives back the 2nd chunk enqueued (queue now contains 1 chunk)');
-
- t.equal(enqueue('e'), false, 'After 3 reads, 5th enqueue should return false (queue now contains 2 chunks)');
-
- t.equal(rs.read(), 'd', '4th read gives back the 3rd chunk enqueued (queue now contains 1 chunks)');
- t.equal(rs.read(), 'e', '5th read gives back the 4th chunk enqueued (queue now contains 0 chunks)');
-
- t.equal(enqueue('f'), false, 'After 5 reads, 6th enqueue should return false (queue now contains 1 chunk)');
- t.equal(enqueue('g'), false, 'After 5 reads, 7th enqueue should return false (queue now contains 2 chunks)');
-
- t.end();
+ reader.read().then(result => {
+ t.deepEqual(result, { value: 'a', done: false },
+ '1st read gives back the 1st chunk enqueued (queue now contains 3 chunks)');
+ return reader.read();
+ })
+ .then(result => {
+ t.deepEqual(result, { value: 'b', done: false },
+ '2nd read gives back the 2nd chunk enqueued (queue now contains 2 chunks)');
+ return reader.read();
+ })
+ .then(result => {
+ t.deepEqual(result, { value: 'c', done: false },
+ '3rd read gives back the 3rd chunk enqueued (queue now contains 1 chunk)');
+ t.equal(enqueue('e'), false, 'After 3 reads, 5th enqueue should return false (queue now contains 2 chunks)');
+ return reader.read();
+ })
+ .then(result => {
+ t.deepEqual(result, { value: 'd', done: false },
+ '4th read gives back the 4th chunk enqueued (queue now contains 1 chunks)');
+ return reader.read();
+ })
+ .then(result => {
+ t.deepEqual(result, { value: 'e', done: false },
+ '5th read gives back the 5th chunk enqueued (queue now contains 0 chunks)');
+ t.equal(enqueue('f'), false, 'After 5 reads, 6th enqueue should return false (queue now contains 1 chunk)');
+ t.equal(enqueue('g'), false, 'After 5 reads, 7th enqueue should return false (queue now contains 2 chunks)');
+ t.end();
+ })
+ .catch(e => t.error(e));
});
test('Correctly governs the return value of a ReadableStream\'s enqueue function (HWM = 1)', t => {
@@ -57,25 +74,42 @@ test('Correctly governs the return value of a ReadableStream\'s enqueue function
start(enqueue_) { enqueue = enqueue_; },
strategy: new CountQueuingStrategy({ highWaterMark: 1 })
});
+ const reader = rs.getReader();
t.equal(enqueue('a'), true, 'After 0 reads, 1st enqueue should return true (queue now contains 1 chunk)');
t.equal(enqueue('b'), false, 'After 0 reads, 2nd enqueue should return false (queue now contains 2 chunks)');
t.equal(enqueue('c'), false, 'After 0 reads, 3rd enqueue should return false (queue now contains 3 chunks)');
t.equal(enqueue('d'), false, 'After 0 reads, 4th enqueue should return false (queue now contains 4 chunks)');
- t.equal(rs.read(), 'a', '1st read gives back the 1st chunk enqueued (queue now contains 3 chunks)');
- t.equal(rs.read(), 'b', '2nd read gives back the 2nd chunk enqueued (queue now contains 2 chunks)');
- t.equal(rs.read(), 'c', '3rd read gives back the 2nd chunk enqueued (queue now contains 1 chunk)');
-
- t.equal(enqueue('e'), false, 'After 3 reads, 5th enqueue should return false (queue now contains 2 chunks)');
-
- t.equal(rs.read(), 'd', '4th read gives back the 3rd chunk enqueued (queue now contains 1 chunks)');
- t.equal(rs.read(), 'e', '5th read gives back the 4th chunk enqueued (queue now contains 0 chunks)');
-
- t.equal(enqueue('f'), true, 'After 5 reads, 6th enqueue should return true (queue now contains 1 chunk)');
- t.equal(enqueue('g'), false, 'After 5 reads, 7th enqueue should return false (queue now contains 2 chunks)');
-
- t.end();
+ reader.read().then(result => {
+ t.deepEqual(result, { value: 'a', done: false },
+ '1st read gives back the 1st chunk enqueued (queue now contains 3 chunks)');
+ return reader.read();
+ })
+ .then(result => {
+ t.deepEqual(result, { value: 'b', done: false },
+ '2nd read gives back the 2nd chunk enqueued (queue now contains 2 chunks)');
+ return reader.read();
+ })
+ .then(result => {
+ t.deepEqual(result, { value: 'c', done: false },
+ '3rd read gives back the 3rd chunk enqueued (queue now contains 1 chunk)');
+ t.equal(enqueue('e'), false, 'After 3 reads, 5th enqueue should return false (queue now contains 2 chunks)');
+ return reader.read();
+ })
+ .then(result => {
+ t.deepEqual(result, { value: 'd', done: false },
+ '4th read gives back the 4th chunk enqueued (queue now contains 1 chunks)');
+ return reader.read();
+ })
+ .then(result => {
+ t.deepEqual(result, { value: 'e', done: false },
+ '5th read gives back the 5th chunk enqueued (queue now contains 0 chunks)');
+ t.equal(enqueue('f'), true, 'After 5 reads, 6th enqueue should return true (queue now contains 1 chunk)');
+ t.equal(enqueue('g'), false, 'After 5 reads, 7th enqueue should return false (queue now contains 2 chunks)');
+ t.end();
+ })
+ .catch(e => t.error(e));
});
test('Correctly governs the return value of a ReadableStream\'s enqueue function (HWM = 4)', t => {
@@ -84,6 +118,7 @@ test('Correctly governs the return value of a ReadableStream\'s enqueue function
start(enqueue_) { enqueue = enqueue_; },
strategy: new CountQueuingStrategy({ highWaterMark: 4 })
});
+ const reader = rs.getReader();
t.equal(enqueue('a'), true, 'After 0 reads, 1st enqueue should return true (queue now contains 1 chunk)');
t.equal(enqueue('b'), true, 'After 0 reads, 2nd enqueue should return true (queue now contains 2 chunks)');
@@ -92,22 +127,42 @@ test('Correctly governs the return value of a ReadableStream\'s enqueue function
t.equal(enqueue('e'), false, 'After 0 reads, 5th enqueue should return false (queue now contains 5 chunks)');
t.equal(enqueue('f'), false, 'After 0 reads, 6th enqueue should return false (queue now contains 6 chunks)');
- t.equal(rs.read(), 'a', '1st read gives back the 1st chunk enqueued (queue now contains 5 chunks)');
- t.equal(rs.read(), 'b', '2nd read gives back the 2nd chunk enqueued (queue now contains 4 chunks)');
-
- t.equal(enqueue('g'), false, 'After 2 reads, 7th enqueue should return false (queue now contains 5 chunks)');
-
- t.equal(rs.read(), 'c', '3rd read gives back the 3rd chunk enqueued (queue now contains 4 chunks)');
- t.equal(rs.read(), 'd', '4th read gives back the 4th chunk enqueued (queue now contains 3 chunks)');
- t.equal(rs.read(), 'e', '5th read gives back the 5th chunk enqueued (queue now contains 2 chunks)');
- t.equal(rs.read(), 'f', '6th read gives back the 6th chunk enqueued (queue now contains 1 chunk)');
-
- t.equal(enqueue('h'), true, 'After 6 reads, 8th enqueue should return true (queue now contains 2 chunks)');
- t.equal(enqueue('i'), true, 'After 6 reads, 9th enqueue should return true (queue now contains 3 chunks)');
- t.equal(enqueue('j'), true, 'After 6 reads, 10th enqueue should return true (queue now contains 4 chunks)');
- t.equal(enqueue('k'), false, 'After 6 reads, 11th enqueue should return false (queue now contains 5 chunks)');
-
- t.end();
+ reader.read().then(result => {
+ t.deepEqual(result, { value: 'a', done: false },
+ '1st read gives back the 1st chunk enqueued (queue now contains 5 chunks)');
+ return reader.read();
+ })
+ .then(result => {
+ t.deepEqual(result, { value: 'b', done: false },
+ '2nd read gives back the 2nd chunk enqueued (queue now contains 4 chunks)');
+ t.equal(enqueue('g'), false, 'After 2 reads, 7th enqueue should return false (queue now contains 5 chunks)');
+ return reader.read();
+ })
+ .then(result => {
+ t.deepEqual(result, { value: 'c', done: false },
+ '3rd read gives back the 3rd chunk enqueued (queue now contains 4 chunks)');
+ return reader.read();
+ })
+ .then(result => {
+ t.deepEqual(result, { value: 'd', done: false },
+ '4th read gives back the 4th chunk enqueued (queue now contains 3 chunks)');
+ return reader.read();
+ })
+ .then(result => {
+ t.deepEqual(result, { value: 'e', done: false },
+ '5th read gives back the 5th chunk enqueued (queue now contains 2 chunks)');
+ return reader.read();
+ })
+ .then(result => {
+ t.deepEqual(result, { value: 'f', done: false },
+ '6th read gives back the 6th chunk enqueued (queue now contains 1 chunk)');
+ t.equal(enqueue('h'), true, 'After 6 reads, 8th enqueue should return true (queue now contains 2 chunks)');
+ t.equal(enqueue('i'), true, 'After 6 reads, 9th enqueue should return true (queue now contains 3 chunks)');
+ t.equal(enqueue('j'), true, 'After 6 reads, 10th enqueue should return true (queue now contains 4 chunks)');
+ t.equal(enqueue('k'), false, 'After 6 reads, 11th enqueue should return false (queue now contains 5 chunks)');
+ t.end();
+ })
+ .catch(e => t.error(e));
});
test('Can construct a writable stream with a valid CountQueuingStrategy', t => {
diff --git a/reference-implementation/test/exclusive-stream-reader.js b/reference-implementation/test/exclusive-stream-reader.js
deleted file mode 100644
index a596e4495..000000000
--- a/reference-implementation/test/exclusive-stream-reader.js
+++ /dev/null
@@ -1,531 +0,0 @@
-const test = require('tape-catch');
-
-test('Using the reader directly on a mundane stream', t => {
- t.plan(22);
-
- const rs = new ReadableStream({
- start(enqueue, close) {
- enqueue('a');
- setTimeout(() => enqueue('b'), 30);
- setTimeout(close, 60);
- }
- });
-
- t.equal(rs.state, 'readable', 'stream starts out readable');
-
- const reader = rs.getReader();
-
- t.equal(reader.isActive, true, 'reader isActive is true');
-
- t.equal(rs.state, 'waiting', 'after getting a reader, the stream state is waiting');
- t.equal(reader.state, 'readable', 'the reader state is readable');
-
- t.throws(() => rs.read(), /TypeError/, 'trying to read from the stream directly throws a TypeError');
- t.equal(reader.read(), 'a', 'trying to read from the reader works and gives back the first enqueued value');
- t.equal(reader.state, 'waiting', 'the reader state is now waiting since the queue has been drained');
- rs.cancel().then(
- () => t.fail('cancel() should not be fulfilled'),
- e => t.equal(e.constructor, TypeError, 'cancel() should be rejected with a TypeError')
- );
-
- reader.ready.then(() => {
- t.equal(reader.state, 'readable', 'ready for reader is fulfilled when second chunk is enqueued');
- t.equal(rs.state, 'waiting', 'the stream state is still waiting');
- t.equal(reader.read(), 'b', 'you can read the second chunk from the reader');
- });
-
- reader.closed.then(() => {
- t.pass('closed for the reader is fulfilled');
- t.equal(reader.state, 'closed', 'the reader state is closed');
- t.equal(rs.state, 'closed', 'the stream state is closed');
- t.equal(reader.isActive, false, 'the reader is no longer active');
-
- t.doesNotThrow(() => reader.releaseLock(), 'trying to release the lock twice does nothing');
- });
-
- rs.ready.then(() => {
- t.equal(rs.state, 'closed', 'ready for stream is not fulfilled until the stream closes');
- t.equal(reader.isActive, false, 'the reader is no longer active after the stream has closed');
- });
-
- rs.closed.then(() => {
- t.pass('closed for the stream is fulfilled');
- t.equal(rs.state, 'closed', 'the stream state is closed');
- t.equal(reader.state, 'closed', 'the reader state is closed');
- t.equal(reader.isActive, false, 'the reader is no longer active');
- });
-});
-
-test('Reading from a reader for an empty stream throws but doesn\'t break anything', t => {
- let enqueue;
- const rs = new ReadableStream({
- start(e) {
- enqueue = e;
- }
- });
- const reader = rs.getReader();
-
- t.equal(reader.isActive, true, 'reader is active to start with');
- t.equal(reader.state, 'waiting', 'reader state is waiting to start with');
- t.throws(() => reader.read(), /TypeError/, 'calling reader.read() throws a TypeError');
- t.equal(reader.isActive, true, 'reader is still active');
- t.equal(reader.state, 'waiting', 'reader state is still waiting');
-
- enqueue('a');
-
- reader.ready.then(() => {
- t.equal(reader.state, 'readable', 'after enqueuing the reader state is readable');
- t.equal(reader.read(), 'a', 'the enqueued chunk can be read back through the reader');
- t.end();
- });
-});
-
-test('A released reader should present like a closed stream', t => {
- t.plan(7);
-
- const rs = new ReadableStream();
- const reader = rs.getReader();
- reader.releaseLock();
-
- t.equal(reader.isActive, false, 'isActive returns false');
- t.equal(reader.state, 'closed', 'reader.state returns closed');
- t.equal(rs.state, 'waiting', 'rs.state returns waiting');
-
- t.throws(() => reader.read(), /TypeError/, 'trying to read gives a TypeError');
- reader.cancel().then(
- v => t.equal(v, undefined, 'reader.cancel() should fulfill with undefined'),
- e => t.fail('reader.cancel() should not reject')
- );
-
- reader.ready.then(() => t.pass('reader.ready should be fulfilled'));
- reader.closed.then(() => t.pass('reader.closed should be fulfilled'));
-});
-
-test('cancel() on a reader implicitly releases the reader before calling through', t => {
- t.plan(3);
-
- const passedReason = new Error('it wasn\'t the right time, sorry');
- const rs = new ReadableStream({
- cancel(reason) {
- t.equal(reader.isActive, false, 'canceling via the reader should release the reader\'s lock');
- t.equal(reason, passedReason, 'the cancellation reason is passed through to the underlying source');
- }
- });
-
- const reader = rs.getReader();
- reader.cancel(passedReason).then(
- () => t.pass('reader.cancel() should fulfill'),
- e => t.fail('reader.cancel() should not reject')
- );
-});
-
-test('getReader() on a closed stream should fail', t => {
- const rs = new ReadableStream({
- start(enqueue, close) {
- close();
- }
- });
-
- t.equal(rs.state, 'closed', 'the stream should be closed');
- t.throws(() => rs.getReader(), /TypeError/, 'getReader() threw a TypeError');
- t.end();
-});
-
-test('getReader() on a cancelled stream should fail (since cancelling closes)', t => {
- const rs = new ReadableStream();
- rs.cancel(new Error('fun time is over'));
-
- t.equal(rs.state, 'closed', 'the stream should be closed');
- t.throws(() => rs.getReader(), /TypeError/, 'getReader() threw a TypeError');
- t.end();
-});
-
-test('getReader() on an errored stream should rethrow the error', t => {
- const theError = new Error('don\'t say i didn\'t warn ya');
- const rs = new ReadableStream({
- start(enqueue, close, error) {
- error(theError);
- }
- });
-
- t.equal(rs.state, 'errored', 'the stream should be errored');
- t.throws(() => rs.getReader(), /don't say i didn't warn ya/, 'getReader() threw the error');
- t.end();
-});
-
-test('closed should be fulfilled after stream is closed (both .closed accesses after acquiring)', t => {
- t.plan(2);
-
- let doClose;
- const rs = new ReadableStream({
- start(enqueue, close) {
- doClose = close;
- }
- });
-
- const reader = rs.getReader();
- doClose();
-
- reader.closed.then(() => {
- t.equal(reader.isActive, false, 'reader is no longer active when reader closed is fulfilled');
- });
-
- rs.closed.then(() => {
- t.equal(reader.isActive, false, 'reader is no longer active when stream closed is fulfilled');
- });
-});
-
-test('closed should be fulfilled after stream is closed (stream .closed access before acquiring)', t => {
- t.plan(2);
-
- let doClose;
- const rs = new ReadableStream({
- start(enqueue, close) {
- doClose = close;
- }
- });
-
- rs.closed.then(() => {
- t.equal(reader.isActive, false, 'reader is no longer active when stream closed is fulfilled');
- });
-
- const reader = rs.getReader();
- doClose();
-
- reader.closed.then(() => {
- t.equal(reader.isActive, false, 'reader is no longer active when reader closed is fulfilled');
- });
-});
-
-test('reader.closed should be fulfilled after reader releases its lock (.closed access before release)', t => {
- const rs = new ReadableStream();
- const reader = rs.getReader();
- reader.closed.then(() => t.end());
- reader.releaseLock();
-});
-
-test('reader.closed should be fulfilled after reader releases its lock (.closed access after release)', t => {
- const rs = new ReadableStream();
- const reader = rs.getReader();
- reader.releaseLock();
- reader.closed.then(() => t.end());
-});
-
-test('closed should be fulfilled after reader releases its lock (multiple stream locks)', t => {
- t.plan(6);
-
- let doClose;
- const rs = new ReadableStream({
- start(enqueue, close) {
- doClose = close;
- }
- });
-
- const reader1 = rs.getReader();
-
- rs.closed.then(() => {
- t.equal(reader1.isActive, false, 'reader1 is no longer active when stream closed is fulfilled');
- t.equal(reader2.isActive, false, 'reader2 is no longer active when stream closed is fulfilled');
- });
-
- reader1.releaseLock();
-
- const reader2 = rs.getReader();
- doClose();
-
- reader1.closed.then(() => {
- t.equal(reader1.isActive, false, 'reader1 is no longer active when reader1 closed is fulfilled');
- t.equal(reader2.isActive, false, 'reader2 is no longer active when reader1 closed is fulfilled');
- });
-
- reader2.closed.then(() => {
- t.equal(reader1.isActive, false, 'reader1 is no longer active when reader2 closed is fulfilled');
- t.equal(reader2.isActive, false, 'reader2 is no longer active when reader2 closed is fulfilled');
- });
-});
-
-test('ready should fulfill after reader releases its lock and stream is waiting (.ready access before releasing)',
- t => {
- t.plan(5);
-
- const rs = new ReadableStream();
- const reader = rs.getReader();
-
- t.equal(rs.state, 'waiting', 'the stream\'s state is initially waiting');
- t.equal(reader.state, 'waiting', 'the reader\'s state is initially waiting');
- reader.ready.then(() => {
- t.pass('reader ready should be fulfilled');
- t.equal(rs.state, 'waiting', 'the stream\'s state is still waiting');
- t.equal(reader.state, 'closed', 'the reader\'s state is now closed');
- });
- reader.releaseLock();
-});
-
-test('ready should fulfill after reader releases its lock and stream is waiting (.ready access after releasing)',
- t => {
- t.plan(5);
-
- const rs = new ReadableStream();
- const reader = rs.getReader();
-
- t.equal(rs.state, 'waiting', 'the stream\'s state is initially waiting');
- t.equal(reader.state, 'waiting', 'the reader\'s state is initially waiting');
- reader.releaseLock();
- reader.ready.then(() => {
- t.pass('reader ready should be fulfilled');
- t.equal(rs.state, 'waiting', 'the stream\'s state is still waiting');
- t.equal(reader.state, 'closed', 'the reader\'s state is now closed');
- });
-});
-
-test('stream\'s ready should not fulfill when acquiring, then releasing, a reader', t => {
- const rs = new ReadableStream();
- const reader = rs.getReader();
-
- rs.ready.then(() => t.fail('stream ready should not be fulfilled'));
- reader.releaseLock();
-
- setTimeout(() => t.end(), 20);
-});
-
-test('stream\'s ready should not fulfill while locked, even if accessed before locking', t => {
- let doEnqueue;
- const rs = new ReadableStream({
- start(enqueue) {
- doEnqueue = enqueue;
- }
- });
- const ready = rs.ready;
-
- const reader = rs.getReader();
-
- ready.then(() => {
- t.equal(rs.state, 'waiting', 'ready fulfilled but the state was waiting; next assert will fail');
- t.fail('stream ready should not be fulfilled');
- });
-
- doEnqueue();
- setTimeout(() => t.end(), 20);
-});
-
-test('stream\'s ready accessed before locking should not fulfill if stream becomes readable while locked, becomes ' +
- 'waiting again and then is released', t => {
- let doEnqueue;
- const rs = new ReadableStream({
- start(enqueue) {
- doEnqueue = enqueue;
- }
- });
- const ready = rs.ready;
-
- const reader = rs.getReader();
-
- ready.then(() => {
- t.fail('stream ready should not be fulfilled');
- });
-
- doEnqueue();
- t.equal(reader.state, 'readable', 'reader should be readable after enqueue');
- reader.read();
- t.equal(reader.state, 'waiting', 'reader should be waiting again after read');
- reader.releaseLock();
- t.equal(rs.state, 'waiting', 'stream should be waiting again after read');
- setTimeout(() => t.end(), 20);
-});
-
-test('stream\'s ready accessed before locking should not fulfill if stream becomes readable while locked, becomes ' +
- 'waiting again and then is released in another microtask', t => {
- let doEnqueue;
- const rs = new ReadableStream({
- start(enqueue) {
- doEnqueue = enqueue;
- }
- });
- const ready = rs.ready;
-
- const reader = rs.getReader();
-
- ready.then(() => {
- t.fail('stream ready should not be fulfilled');
- });
-
- doEnqueue();
- t.equal(reader.state, 'readable', 'reader should be readable after enqueue');
- reader.read();
- t.equal(reader.state, 'waiting', 'reader should be waiting again after read');
-
- // Let the fulfillment callback used in the algorithm of rs.ready run. This
- // covers the code path in rs.ready which is run when
- // this._readableStreamReader is not undefined.
- Promise.resolve().then(() => {
- reader.releaseLock();
- t.equal(rs.state, 'waiting', 'stream should be waiting again after read');
- setTimeout(() => t.end(), 20);
- });
-});
-
-test('stream\'s ready should not fulfill when acquiring a reader, accessing ready, releasing the reader, acquiring ' +
- 'another reader, then enqueuing a chunk', t => {
- // https://github.com/whatwg/streams/pull/262#discussion_r22990833
-
- let doEnqueue;
- const rs = new ReadableStream({
- start(enqueue) {
- doEnqueue = enqueue;
- }
- });
-
- const reader = rs.getReader();
- rs.ready.then(() => {
- t.equal(rs.state, 'waiting', 'ready fulfilled but the state was waiting; next assert will fail');
- t.fail('stream ready should not be fulfilled')
- });
-
- reader.releaseLock();
- rs.getReader();
- doEnqueue('a');
-
- setTimeout(() => t.end(), 20);
-});
-
-test('Multiple readers can access the stream in sequence', t => {
- const rs = new ReadableStream({
- start(enqueue, close) {
- enqueue('a');
- enqueue('b');
- enqueue('c');
- enqueue('d');
- enqueue('e');
- close();
- }
- });
-
- t.equal(rs.read(), 'a', 'reading the first chunk directly from the stream works');
-
- const reader1 = rs.getReader();
- t.equal(reader1.read(), 'b', 'reading the second chunk from reader1 works');
- reader1.releaseLock();
- t.equal(reader1.state, 'closed', 'reader1 is closed after being released');
-
- t.equal(rs.read(), 'c', 'reading the third chunk from the stream after releasing reader1 works');
-
- const reader2 = rs.getReader();
- t.equal(reader2.read(), 'd', 'reading the fourth chunk from reader2 works');
- reader2.releaseLock();
- t.equal(reader2.state, 'closed', 'reader2 is closed after being released');
-
- t.equal(rs.read(), 'e', 'reading the fifth chunk from the stream after releasing reader2 works');
-
- t.end();
-});
-
-test('A stream that errors has that reflected in the reader and the stream', t => {
- t.plan(9);
-
- let error;
- const rs = new ReadableStream({
- start(enqueue, close, error_) {
- error = error_;
- }
- });
-
- const reader = rs.getReader();
-
- const passedError = new Error('too exclusive');
- error(passedError);
-
- t.equal(reader.isActive, false, 'the reader should have lost its lock');
- t.throws(() => reader.read(), /TypeError/,
- 'reader.read() should throw a TypeError since the reader no longer has a lock');
- t.equal(reader.state, 'errored', 'the reader\'s state should be errored');
- reader.ready.then(() => t.pass('reader.ready should fulfill'));
- reader.closed.then(
- () => t.fail('reader.closed should not be fulfilled'),
- e => t.equal(e, passedError, 'reader.closed should be rejected with the stream error')
- );
-
- t.throws(() => rs.read(), /too exclusive/, 'rs.read() should throw the stream error');
- t.equal(rs.state, 'errored', 'the stream\'s state should be errored');
- rs.ready.then(() => t.pass('rs.ready should fulfill'));
- rs.closed.then(
- () => t.fail('rs.closed should not be fulfilled'),
- e => t.equal(e, passedError, 'rs.closed should be rejected with the stream error')
- );
-});
-
-test('Cannot use an already-released reader to unlock a stream again', t => {
- t.plan(2);
-
- const rs = new ReadableStream();
-
- const reader1 = rs.getReader();
- reader1.releaseLock();
-
- const reader2 = rs.getReader();
- t.equal(reader2.isActive, true, 'reader2 state is active before releasing reader1');
-
- reader1.releaseLock();
- t.equal(reader2.isActive, true, 'reader2 state is still active after releasing reader1 again');
-});
-
-test('stream\'s ready returns the same instance as long as there\'s no state transition visible on stream even ' +
- 'if the reader became readable while the stream was locked', t => {
- let enqueue;
- const rs = new ReadableStream({
- start(enqueue_) {
- enqueue = enqueue_
- }
- });
-
- const ready = rs.ready;
-
- const reader = rs.getReader();
-
- enqueue('a');
- t.equal(reader.state, 'readable', 'reader should be readable after enqueuing');
- t.equal(reader.read(), 'a', 'the enqueued data should be read');
-
- reader.releaseLock();
-
- t.equal(ready, rs.ready, 'rs.ready should return the same instance as before locking');
- t.end();
-});
-
-test('reader\'s ready and close returns the same instance as long as there\'s no state transition',
- t => {
- const rs = new ReadableStream();
- const reader = rs.getReader();
-
- const ready = reader.ready;
- const closed = reader.closed;
-
- reader.releaseLock();
-
- t.equal(ready, reader.ready, 'reader.ready should return the same instance as before releasing');
- t.equal(closed, reader.closed, 'reader.ready should return the same instance as before releasing');
- t.end();
-});
-
-test('reader\'s ready and close returns the same instance as long as there\'s no state transition to waiting',
- t => {
- let enqueue;
- const rs = new ReadableStream({
- start(enqueue_) {
- enqueue = enqueue_
- }
- });
-
- const reader = rs.getReader();
-
- const ready = reader.ready;
- const closed = reader.closed;
-
- enqueue('a');
- t.equal(reader.state, 'readable', 'reader should be readable after enqueuing');
-
- reader.releaseLock();
-
- t.equal(ready, reader.ready, 'reader.ready should return the same instance as before releasing');
- t.equal(closed, reader.closed, 'reader.ready should return the same instance as before releasing');
- t.end();
-});
diff --git a/reference-implementation/test/pipe-through.js b/reference-implementation/test/pipe-through.js
index 84ec86433..079052e86 100644
--- a/reference-implementation/test/pipe-through.js
+++ b/reference-implementation/test/pipe-through.js
@@ -13,7 +13,7 @@ test('Piping through a duck-typed pass-through transform stream works', t => {
});
test('Piping through an identity transform stream will close the destination when the source closes', t => {
- t.plan(2);
+ t.plan(1);
const rs = new ReadableStream({
start(enqueue, close) {
@@ -34,9 +34,9 @@ test('Piping through an identity transform stream will close the destination whe
const ws = new WritableStream();
rs.pipeThrough(ts).pipeTo(ws).then(() => {
- t.equal(rs.state, 'closed', 'the readable stream was closed');
t.equal(ws.state, 'closed', 'the writable stream was closed');
- });
+ })
+ .catch(e => t.error(e));
});
// FIXME: expected results here will probably change as we fix https://github.com/whatwg/streams/issues/190
diff --git a/reference-implementation/test/pipe-to-options.js b/reference-implementation/test/pipe-to-options.js
index b0d511c57..dc50e7635 100644
--- a/reference-implementation/test/pipe-to-options.js
+++ b/reference-implementation/test/pipe-to-options.js
@@ -1,142 +1,6 @@
const test = require('tape-catch');
-import sequentialReadableStream from './utils/sequential-rs';
-
-test('Piping with no options and no errors', t => {
- const rs = sequentialReadableStream(5, { async: true });
- const ws = new WritableStream({
- abort() {
- t.fail('unexpected abort call');
- }
- });
-
- rs.pipeTo(ws);
-
- rs.closed.then(() => {
- setTimeout(() => {
- t.equal(ws.state, 'closed', 'destination should be closed');
- t.end();
- }, 0);
- });
-});
-
-test('Piping with { preventClose: false } and no errors', t => {
- const rs = sequentialReadableStream(5, { async: true });
- const ws = new WritableStream({
- abort() {
- t.fail('unexpected abort call');
- }
- });
-
- rs.pipeTo(ws, { preventClose: false });
-
- rs.closed.then(() => {
- setTimeout(() => {
- t.equal(ws.state, 'closed', 'destination should be closed');
- t.end();
- }, 0);
- });
-});
-
-test('Piping with { preventClose: true } and no errors', t => {
- const rs = sequentialReadableStream(5, { async: true });
- const ws = new WritableStream({
- close() {
- t.fail('unexpected close call');
- t.end();
- },
- abort() {
- t.fail('unexpected abort call');
- }
- });
-
- const pipeToPromise = rs.pipeTo(ws, { preventClose: true });
-
- rs.closed.then(() => {
- setTimeout(() => {
- t.equal(ws.state, 'writable', 'destination should be writable');
-
- pipeToPromise.then(
- v => {
- t.equal(v, undefined);
- t.end();
- },
- r => {
- t.fail('pipeToPromise is rejected');
- t.end();
- }
- );
- }, 0);
- });
-});
-
-test('Piping with no options and a source error', t => {
- const theError = new Error('source error');
- const rs = new ReadableStream({
- start() {
- return Promise.reject(theError);
- }
- });
- const ws = new WritableStream({
- abort(r) {
- t.equal(r, theError, 'reason passed to abort equals the source error');
- t.end();
- }
- });
-
- rs.pipeTo(ws);
-});
-
-test('Piping with { preventAbort: false } and a source error', t => {
- const theError = new Error('source error');
- const rs = new ReadableStream({
- start() {
- return Promise.reject(theError);
- }
- });
- const ws = new WritableStream({
- abort(r) {
- t.equal(r, theError, 'reason passed to abort equals the source error');
- t.end();
- }
- });
-
- rs.pipeTo(ws, { preventAbort: false });
-});
-
-test('Piping with { preventAbort: true } and a source error', t => {
- const theError = new Error('source error');
- const rs = new ReadableStream({
- start() {
- return Promise.reject(theError);
- }
- });
- const ws = new WritableStream({
- abort(r) {
- t.fail('unexpected call to abort');
- t.end();
- }
- });
-
- const pipeToPromise = rs.pipeTo(ws, { preventAbort: true });
-
- rs.closed.catch(() => {
- setTimeout(() => {
- t.equal(ws.state, 'writable', 'destination should remain writable');
-
- pipeToPromise.then(
- () => {
- t.fail('pipeToPromise is fulfilled');
- t.end();
- },
- r => {
- t.equal(r, theError, 'rejection reason of pipeToPromise is the source error');
- t.end();
- }
- );
- }, 0);
- })
-});
+// Many other pipeTo-with-options tests have been templated.
test('Piping with no options and a destination error', t => {
t.plan(2);
@@ -197,14 +61,14 @@ test('Piping with { preventCancel: false } and a destination error', t => {
test('Piping with { preventCancel: true } and a destination error', t => {
const theError = new Error('destination error');
const rs = new ReadableStream({
- start(enqueue, close) {
+ start(enqueue) {
enqueue('a');
setTimeout(() => enqueue('b'), 10);
setTimeout(() => enqueue('c'), 20);
+ setTimeout(() => enqueue('d'), 30);
},
cancel(r) {
t.fail('unexpected call to cancel');
- t.end();
}
});
@@ -216,22 +80,18 @@ test('Piping with { preventCancel: true } and a destination error', t => {
}
});
- const pipeToPromise = rs.pipeTo(ws, { preventCancel: true });
+ rs.pipeTo(ws, { preventCancel: true }).catch(e => {
+ t.equal(e, theError, 'rejection reason of pipeTo promise is the sink error');
- ws.closed.catch(() => {
- setTimeout(() => {
- t.equal(rs.state, 'readable', 'source should remain readable');
+ let reader;
+ t.doesNotThrow(() => { reader = rs.getReader(); }, 'should be able to get a stream reader after pipeTo completes');
- pipeToPromise.then(
- () => {
- t.fail('pipeToPromise is fulfilled');
- t.end();
- },
- r => {
- t.equal(r, theError, 'rejection reason of pipeToPromise is the sink error');
- t.end();
- }
- );
- }, 30);
- });
+ // { value: 'c', done: false } gets consumed before we know that ws has errored, and so is lost.
+
+ return reader.read().then(result => {
+ t.deepEqual(result, { value: 'd', done: false }, 'should be able to read the remaining chunk from the reader');
+ t.end();
+ });
+ })
+ .catch(e => t.error(e));
});
diff --git a/reference-implementation/test/pipe-to.js b/reference-implementation/test/pipe-to.js
index 0732543ae..5b7427e10 100644
--- a/reference-implementation/test/pipe-to.js
+++ b/reference-implementation/test/pipe-to.js
@@ -2,7 +2,11 @@ const test = require('tape-catch');
import sequentialReadableStream from './utils/sequential-rs';
+// TODO: many asserts in this file are unlabeled; we should label them.
+
test('Piping from a ReadableStream from which lots of data are readable synchronously', t => {
+ t.plan(4);
+
const rs = new ReadableStream({
start(enqueue, close) {
for (let i = 0; i < 1000; ++i) {
@@ -11,57 +15,74 @@ test('Piping from a ReadableStream from which lots of data are readable synchron
close();
}
});
- t.equal(rs.state, 'readable');
const ws = new WritableStream({
strategy: new CountQueuingStrategy({
highWaterMark: 1000
})
});
- t.equal(ws.state, 'writable');
- rs.pipeTo(ws);
- t.equal(rs.state, 'closed', 'all data must be read out from rs');
- t.equal(ws.state, 'closing', 'close must have been called after accepting all data from rs');
+ t.equal(ws.state, 'writable', 'writable stream state should start out writable');
+
+ let rsClosed = false;
+ rs.closed.then(() => {
+ rsClosed = true;
+ });
- t.end();
+ let pipeFinished = false;
+ rs.pipeTo(ws).then(
+ () => {
+ pipeFinished = true;
+ t.equal(rsClosed, true, 'readable stream should be closed after pipe finishes');
+ t.equal(ws.state, 'closed', 'writable stream state should be closed after pipe finishes');
+ },
+ e => t.error(e)
+ );
+
+ setTimeout(() => {
+ t.equal(pipeFinished, true, 'pipe should have finished before a setTimeout(,0) since it should only be microtasks');
+ }, 0);
});
test('Piping from a ReadableStream in readable state to a WritableStream in closing state', t => {
- let pullCount = 0;
- let cancelCalled = false;
+ t.plan(4);
+
+ let cancelReason;
const rs = new ReadableStream({
start(enqueue, close) {
- enqueue("Hello");
- },
- pull() {
- ++pullCount;
+ enqueue('Hello');
},
- cancel() {
- t.assert(!cancelCalled);
- cancelCalled = true;
+ cancel(reason) {
+ t.equal(reason.constructor, TypeError, 'underlying source cancel should have been called with a TypeError');
+ cancelReason = reason;
}
});
- t.equal(rs.state, 'readable');
const ws = new WritableStream({
write() {
t.fail('Unexpected write call');
- t.end();
},
abort() {
t.fail('Unexpected abort call');
- t.end();
}
});
ws.close();
- t.equal(ws.state, 'closing');
+ t.equal(ws.state, 'closing', 'writable stream should be closing immediately after closing it');
- rs.pipeTo(ws);
- t.assert(cancelCalled);
- t.equal(rs.state, 'closed');
- t.end();
+ let rsClosed = false;
+ rs.closed.then(() => {
+ rsClosed = true;
+ });
+
+ rs.pipeTo(ws).then(
+ () => t.fail('promise returned by pipeTo should not fulfill'),
+ r => {
+ t.equal(r, cancelReason,
+ 'the pipeTo promise should reject with the same error as the underlying source cancel was called with');
+ t.equal(rsClosed, true, 'readable stream should be closed after pipe finishes');
+ }
+ );
});
test('Piping from a ReadableStream in readable state to a WritableStream in errored state', t => {
@@ -70,7 +91,7 @@ test('Piping from a ReadableStream in readable state to a WritableStream in erro
const passedError = new Error('horrible things');
const rs = new ReadableStream({
start(enqueue, close) {
- enqueue("Hello");
+ enqueue('Hello');
},
pull() {
++pullCount;
@@ -82,10 +103,13 @@ test('Piping from a ReadableStream in readable state to a WritableStream in erro
t.equal(reason, passedError);
}
});
- t.equal(rs.state, 'readable');
let writeCalled = false;
+ const startPromise = Promise.resolve();
const ws = new WritableStream({
+ start() {
+ return startPromise;
+ },
write(chunk) {
t.assert(!writeCalled, 'write must not be called more than once');
writeCalled = true;
@@ -104,107 +128,26 @@ test('Piping from a ReadableStream in readable state to a WritableStream in erro
}
});
- // Wait for ws to start.
- setTimeout(() => {
+ startPromise.then(() => {
ws.write('Hello');
t.assert(writeCalled, 'write must be called');
ws.ready.then(() => {
t.equal(ws.state, 'errored', 'as a result of rejected promise, ws must be in errored state');
- rs.pipeTo(ws);
-
- // Need to delay because pipeTo retrieves error from dest using ready.
- setTimeout(() => {
- t.assert(cancelCalled);
- t.equal(rs.state, 'closed');
+ rs.pipeTo(ws).catch(e => {
+ t.equal(e, passedError, 'pipeTo promise should be rejected with the error');
+ t.assert(cancelCalled, 'cancel should have been called');
t.end();
- }, 0);
+ });
});
- }, 0);
-});
-
-test('Piping from a ReadableStream in closed state to a WritableStream in writable state', t => {
- t.plan(3);
-
- const rs = new ReadableStream({
- start(enqueue, close) {
- close();
- },
- pull() {
- t.fail('Unexpected pull call');
- },
- cancel(reason) {
- t.fail('Unexpected cancel call');
- }
});
- t.equal(rs.state, 'closed');
-
- const ws = new WritableStream({
- write() {
- t.fail('Unexpected write call');
- },
- close() {
- t.fail('Unexpected close call');
- },
- abort() {
- t.fail('Unexpected abort call');
- }
- });
-
- // Wait for ws to start.
- setTimeout(() => {
- t.equal(ws.state, 'writable');
-
- rs.pipeTo(ws).then(
- () => t.fail('pipeTo promise should not be fulfilled'),
- e => t.equal(e.constructor, TypeError, 'pipeTo promise should be rejected with a TypeError')
- );
- }, 0);
});
-test('Piping from a ReadableStream in errored state to a WritableStream in writable state', t => {
- t.plan(3);
-
- const theError = new Error('piping is too hard today');
- const rs = new ReadableStream({
- start(enqueue, close, error) {
- error(theError);
- },
- pull() {
- t.fail('Unexpected pull call');
- },
- cancel(reason) {
- t.fail('Unexpected cancel call');
- }
- });
- t.equal(rs.state, 'errored');
-
- const ws = new WritableStream({
- write() {
- t.fail('Unexpected write call');
- },
- close() {
- t.fail('Unexpected close call');
- },
- abort() {
- t.fail('Unexpected abort call');
- }
- });
-
- // Wait for ws to start.
- setTimeout(() => {
- t.equal(ws.state, 'writable');
-
- rs.pipeTo(ws).then(
- () => t.fail('pipeTo promise should not be fulfilled'),
- e => t.equal(e, theError, 'pipeTo promise should be rejected with the passed error')
- );
- }, 0);
-});
+test('Piping from a ReadableStream in the readable state which becomes closed after pipeTo call to a WritableStream ' +
+ 'in the writable state', t => {
+ t.plan(5);
-test('Piping from a ReadableStream in readable state which becomes closed after pipeTo call to a WritableStream in ' +
- 'writable state', t => {
let closeReadableStream;
let pullCount = 0;
const rs = new ReadableStream({
@@ -217,51 +160,52 @@ test('Piping from a ReadableStream in readable state which becomes closed after
},
cancel() {
t.fail('Unexpected cancel call');
- t.end();
}
});
- t.equal(rs.state, 'readable');
let writeCalled = false;
+ const startPromise = Promise.resolve();
const ws = new WritableStream({
+ start() {
+ return startPromise;
+ },
write(chunk) {
if (!writeCalled) {
- t.equal(chunk, 'Hello');
+ t.equal(chunk, 'Hello', 'chunk written to writable stream should be the one enqueued into the readable stream');
writeCalled = true;
} else {
t.fail('Unexpected extra write call');
- t.end();
}
},
close() {
- t.assert(writeCalled);
- t.equal(pullCount, 2);
-
- t.end();
+ t.pass('underlying sink close should be called');
+ t.equal(pullCount, 1, 'underlying source pull should have been called once');
},
abort() {
t.fail('Unexpected abort call');
- t.end();
}
});
- // Wait for ws to start.
- setTimeout(() => {
- rs.pipeTo(ws);
- t.equal(rs.state, 'waiting', 'value must leave readable state synchronously');
- t.equal(ws.state, 'waiting', 'writable stream must be written to, entering a waiting state');
+ startPromise.then(() => {
+ rs.pipeTo(ws).then(() => {
+ t.equal(ws.state, 'closed', 'writable stream should be closed after pipeTo completes');
+ });
+
+ t.equal(ws.state, 'writable', 'writable stream should still be writable immediately after pipeTo');
closeReadableStream();
- }, 0);
+ });
});
-test('Piping from a ReadableStream in readable state which becomes errored after pipeTo call to a WritableStream in ' +
- 'writable state', t => {
+test('Piping from a ReadableStream in the readable state which becomes errored after pipeTo call to a WritableStream ' +
+ 'in the writable state', t => {
+ t.plan(5);
+
let errorReadableStream;
let pullCount = 0;
const rs = new ReadableStream({
start(enqueue, close, error) {
- enqueue("Hello");
+ enqueue('Hello');
errorReadableStream = error;
},
pull() {
@@ -269,48 +213,42 @@ test('Piping from a ReadableStream in readable state which becomes errored after
},
cancel() {
t.fail('Unexpected cancel call');
- t.end();
}
});
- t.equal(rs.state, 'readable');
- let writeCalled = false;
let passedError = new Error('horrible things');
+ const startPromise = Promise.resolve();
const ws = new WritableStream({
+ start() {
+ return startPromise;
+ },
write(chunk) {
- if (!writeCalled) {
- t.equal(chunk, 'Hello');
- writeCalled = true;
- } else {
- t.fail('Unexpected extra write call');
- t.end();
- }
+ t.fail('Unexpected extra write call');
},
close() {
t.fail('Unexpected close call');
- t.end();
},
abort(reason) {
- t.equal(reason, passedError);
- t.assert(writeCalled);
- t.equal(pullCount, 2);
-
- t.end();
+ t.equal(reason, passedError, 'underlying sink abort should receive the error from the readable stream');
+ t.equal(pullCount, 1, 'underlying source pull should have been called once');
}
});
- // Wait for ws to start.
- setTimeout(() => {
- rs.pipeTo(ws);
- t.equal(rs.state, 'waiting', 'value must leave readable state synchronously');
- t.equal(ws.state, 'waiting', 'writable stream must be written to, entering a waiting state');
+ startPromise.then(() => {
+ rs.pipeTo(ws).catch(e => {
+ t.equal(e, passedError, 'pipeTo should be rejected with the passed error');
+ t.equal(ws.state, 'errored', 'writable stream should be errored after pipeTo completes');
+ });
+
+ t.equal(ws.state, 'writable', 'writable stream should still be writable immediately after pipeTo');
errorReadableStream(passedError);
- }, 0);
+ });
});
-test('Piping from a ReadableStream in waiting state which becomes readable after pipeTo call to a WritableStream in ' +
- 'writable state', t => {
+test('Piping from an empty ReadableStream which becomes non-empty after pipeTo call to a WritableStream in the ' +
+ 'writable state', t => {
+ t.plan(3);
let enqueue;
let pullCount = 0;
const rs = new ReadableStream({
@@ -322,36 +260,31 @@ test('Piping from a ReadableStream in waiting state which becomes readable after
},
cancel() {
t.fail('Unexpected cancel call');
- t.end();
}
});
const ws = new WritableStream({
write(chunk) {
- t.equal(chunk, 'Hello');
- t.equal(pullCount, 2);
- t.end();
+ t.equal(chunk, 'Hello', 'underlying sink write should be called with the single chunk');
+ t.equal(pullCount, 1, 'pull should have been called once');
},
close() {
t.fail('Unexpected close call');
- t.end();
},
abort(reason) {
t.fail('Unexpected abort call');
- t.end();
}
});
- rs.pipeTo(ws);
- t.equal(rs.state, 'waiting');
- t.equal(ws.state, 'writable');
+ rs.pipeTo(ws).then(() => t.fail('pipeTo promise should not fulfill'));
+ t.equal(ws.state, 'writable', 'writable stream should start in writable state');
enqueue('Hello');
});
-test('Piping from a ReadableStream in waiting state which becomes errored after pipeTo call to a WritableStream in ' +
+test('Piping from an empty ReadableStream which becomes errored after pipeTo call to a WritableStream in the ' +
'writable state', t => {
- t.plan(4);
+ t.plan(3);
let errorReadableStream;
const rs = new ReadableStream({
@@ -360,11 +293,9 @@ test('Piping from a ReadableStream in waiting state which becomes errored after
},
pull() {
t.fail('Unexpected pull call');
- t.end();
},
cancel() {
t.fail('Unexpected cancel call');
- t.end();
}
});
@@ -372,79 +303,68 @@ test('Piping from a ReadableStream in waiting state which becomes errored after
const ws = new WritableStream({
write() {
t.fail('Unexpected write call');
- t.end();
},
close() {
t.fail('Unexpected close call');
- t.end();
},
abort(reason) {
- t.equal(reason, passedError);
+ t.equal(reason, passedError, 'underlying sink abort should receive the error from the readable stream');
}
});
- rs.pipeTo(ws);
- t.equal(rs.state, 'waiting');
- t.equal(ws.state, 'writable');
-
+ rs.pipeTo(ws).catch(e => t.equal(e, passedError, 'pipeTo should reject with the passed error'));
+ t.equal(ws.state, 'writable', 'writable stream should start out writable');
errorReadableStream(passedError);
- t.equal(rs.state, 'errored');
});
-test('Piping from a ReadableStream in waiting state to a WritableStream in writable state which becomes errored ' +
- 'after pipeTo call', t => {
- let writeCalled = false;
+test('Piping from an empty ReadableStream to a WritableStream in the writable state which becomes errored after a ' +
+ 'pipeTo call', t => {
+ t.plan(6);
+
+ const theError = new Error('cancel with me!');
let pullCount = 0;
const rs = new ReadableStream({
pull() {
++pullCount;
},
- cancel() {
+ cancel(reason) {
+ t.equal(reason, theError, 'underlying source cancellation reason should be the writable stream error');
t.equal(pullCount, 1, 'pull should have been called once by cancel-time');
- t.assert(writeCalled, 'write should have been called by cancel-time');
- t.end();
}
});
let errorWritableStream;
+ const startPromise = Promise.resolve();
const ws = new WritableStream({
start(error) {
errorWritableStream = error;
+ return startPromise;
},
write(chunk) {
- t.assert(!writeCalled, 'write should not have been called more than once');
- writeCalled = true;
-
- t.equal(chunk, 'Hello', 'the chunk passed to write should be the one written');
+ t.fail('Unexpected write call');
},
close() {
t.fail('Unexpected close call');
- t.end();
},
abort() {
t.fail('Unexpected abort call');
- t.end();
}
});
- // Needed to prepare errorWritableStream
- ws.write('Hello');
- // Wait for ws to start.
- setTimeout(() => {
+ startPromise.then(() => {
t.equal(ws.state, 'writable', 'ws should start writable');
- rs.pipeTo(ws);
- t.equal(rs.state, 'waiting', 'rs should be waiting after pipe');
+ rs.pipeTo(ws).catch(e => t.equal(e, theError, 'pipeTo should reject with the passed error'));
t.equal(ws.state, 'writable', 'ws should be writable after pipe');
- errorWritableStream();
+ errorWritableStream(theError);
t.equal(ws.state, 'errored', 'ws should be errored after erroring it');
- }, 0);
+ });
});
-test('Piping from a ReadableStream in readable state to a WritableStream in waiting state which becomes writable ' +
- 'after pipeTo call', t => {
+test('Piping from a non-empty ReadableStream to a WritableStream in the waiting state which becomes writable after a ' +
+ 'pipeTo call', t => {
let enqueue;
let pullCount = 0;
const rs = new ReadableStream({
@@ -456,54 +376,50 @@ test('Piping from a ReadableStream in readable state to a WritableStream in wait
},
cancel() {
t.fail('Unexpected cancel call');
- t.end();
}
});
- t.equal(rs.state, 'readable');
let resolveWritePromise;
+ const startPromise = Promise.resolve();
const ws = new WritableStream({
+ start() {
+ return startPromise;
+ },
write(chunk) {
if (!resolveWritePromise) {
t.equal(chunk, 'Hello');
return new Promise(resolve => resolveWritePromise = resolve);
} else {
t.equal(chunk, 'World');
-
t.equal(pullCount, 2);
-
t.end();
}
},
close() {
t.fail('Unexpected close call');
- t.end();
},
abort() {
t.fail('Unexpected abort call');
- t.end();
}
});
ws.write('Hello');
- // Wait for ws to start.
- setTimeout(() => {
+ startPromise.then(() => {
t.equal(ws.state, 'waiting');
rs.pipeTo(ws);
- t.equal(rs.state, 'waiting', 'readable stream must say it is waitable while piping (even with a nonempty queue)');
t.equal(ws.state, 'waiting');
resolveWritePromise();
ws.ready.then(() => {
t.equal(ws.state, 'writable');
})
- .catch(t.error);
- }, 0);
+ .catch(e => t.error(e));
+ });
});
-test('Piping from a ReadableStream in readable state to a WritableStream in waiting state which becomes errored ' +
- 'after pipeTo call', t => {
+test('Piping from a non-empty ReadableStream to a WritableStream in waiting state which becomes errored after a ' +
+ 'pipeTo call', t => {
let writeCalled = false;
let enqueue;
@@ -517,16 +433,17 @@ test('Piping from a ReadableStream in readable state to a WritableStream in wait
},
cancel() {
t.assert(writeCalled);
- t.equal(pullCount, 1);
+ t.equal(pullCount, 2);
t.end();
}
});
- t.equal(rs.state, 'readable');
let errorWritableStream;
+ const startPromise = Promise.resolve();
const ws = new WritableStream({
start(error) {
errorWritableStream = error;
+ return startPromise;
},
write(chunk) {
t.assert(!writeCalled);
@@ -536,32 +453,27 @@ test('Piping from a ReadableStream in readable state to a WritableStream in wait
},
close() {
t.fail('Unexpected close call');
- t.end();
},
abort() {
t.fail('Unexpected abort call');
- t.end();
}
});
ws.write('Hello');
- // Wait for ws to start.
- setTimeout(() => {
+ startPromise.then(() => {
t.equal(ws.state, 'waiting');
- t.equal(rs.state, 'readable', 'readable stream should be readable before piping starts');
rs.pipeTo(ws);
- t.equal(rs.state, 'waiting', 'readable stream must say it is waitable while piping (even with a nonempty queue)');
t.equal(ws.state, 'waiting');
errorWritableStream();
t.equal(ws.state, 'errored');
- }, 0);
+ });
});
-test('Piping from a ReadableStream in readable state which becomes errored after pipeTo call to a WritableStream in ' +
- 'waiting state', t => {
- t.plan(10);
+test('Piping from a non-empty ReadableStream which becomes errored after pipeTo call to a WritableStream in the ' +
+ 'waiting state', t => {
+ t.plan(6);
let errorReadableStream;
let pullCount = 0;
@@ -578,10 +490,13 @@ test('Piping from a ReadableStream in readable state which becomes errored after
t.end();
}
});
- t.equal(rs.state, 'readable');
let writeCalled = false;
+ const startPromise = Promise.resolve();
const ws = new WritableStream({
+ start() {
+ return startPromise;
+ },
write(chunk) {
t.assert(!writeCalled);
writeCalled = true;
@@ -599,23 +514,19 @@ test('Piping from a ReadableStream in readable state which becomes errored after
});
ws.write('Hello');
- // Wait for ws to start.
- setTimeout(() => {
+ startPromise.then(() => {
t.equal(ws.state, 'waiting');
t.equal(pullCount, 1);
- t.equal(rs.state, 'readable', 'readable stream should be readable before piping starts');
rs.pipeTo(ws);
- t.equal(rs.state, 'waiting', 'readable stream must say it is waitable while piping (even with a nonempty queue)');
t.equal(ws.state, 'waiting');
errorReadableStream();
- t.equal(rs.state, 'errored');
- }, 0);
+ });
});
-test('Piping from a ReadableStream in waiting state to a WritableStream in waiting state where both become ready ' +
- 'after pipeTo', t => {
+test('Piping from a non-empty ReadableStream to a WritableStream in the waiting state where both become ready ' +
+ 'after a pipeTo', t => {
let enqueue;
let pullCount = 0;
const rs = new ReadableStream({
@@ -627,14 +538,17 @@ test('Piping from a ReadableStream in waiting state to a WritableStream in waiti
},
cancel() {
t.fail('Unexpected cancel call');
- t.end();
}
});
let checkSecondWrite = false;
let resolveWritePromise;
+ const startPromise = Promise.resolve();
const ws = new WritableStream({
+ start() {
+ return startPromise;
+ },
write(chunk) {
if (checkSecondWrite) {
t.equal(chunk, 'Goodbye');
@@ -647,17 +561,14 @@ test('Piping from a ReadableStream in waiting state to a WritableStream in waiti
},
close() {
t.fail('Unexpected close call');
- t.end();
},
abort(reason) {
t.fail('Unexpected abort call');
- t.end();
}
});
ws.write('Hello');
- // Wait for ws to start.
- setTimeout(() => {
+ startPromise.then(() => {
t.assert(resolveWritePromise);
t.equal(ws.state, 'waiting');
@@ -668,17 +579,17 @@ test('Piping from a ReadableStream in waiting state to a WritableStream in waiti
// Check that nothing happens before calling done(), and then call done()
// to check that pipeTo is woken up.
setTimeout(() => {
- t.equal(pullCount, 1);
+ t.equal(pullCount, 2);
checkSecondWrite = true;
resolveWritePromise();
}, 100);
- }, 0);
+ });
});
-test('Piping from a ReadableStream in waiting state to a WritableStream in waiting state which becomes writable ' +
- 'after pipeTo call', t => {
+test('Piping from an empty ReadableStream to a WritableStream in the waiting state which becomes writable after a ' +
+ 'pipeTo call', t => {
let pullCount = 0;
const rs = new ReadableStream({
pull() {
@@ -691,7 +602,11 @@ test('Piping from a ReadableStream in waiting state to a WritableStream in waiti
});
let resolveWritePromise;
+ const startPromise = Promise.resolve();
const ws = new WritableStream({
+ start() {
+ return startPromise;
+ },
write(chunk) {
t.assert(!resolveWritePromise);
t.equal(chunk, 'Hello');
@@ -699,36 +614,32 @@ test('Piping from a ReadableStream in waiting state to a WritableStream in waiti
},
close() {
t.fail('Unexpected close call');
- t.end();
},
- abort(reason) {
+ abort() {
t.fail('Unexpected abort call');
- t.end();
}
});
ws.write('Hello');
- // Wait for ws to start.
- setTimeout(() => {
+ startPromise.then(() => {
t.equal(ws.state, 'waiting');
rs.pipeTo(ws);
- t.equal(rs.state, 'waiting');
t.equal(ws.state, 'waiting');
+ t.equal(pullCount, 1);
resolveWritePromise();
- // Check that nothing happens.
setTimeout(() => {
t.equal(pullCount, 1);
t.end();
}, 100);
- }, 0);
+ });
});
-test('Piping from a ReadableStream in waiting state which becomes closed after pipeTo call to a WritableStream in ' +
- 'waiting state', t => {
- t.plan(5);
+test('Piping from an empty ReadableStream which becomes closed after a pipeTo call to a WritableStream in the ' +
+ 'waiting state whose writes never complete', t => {
+ t.plan(4);
let closeReadableStream;
let pullCount = 0;
@@ -741,55 +652,49 @@ test('Piping from a ReadableStream in waiting state which becomes closed after p
},
cancel() {
t.fail('Unexpected cancel call');
- t.end();
}
});
let writeCalled = false;
+ const startPromise = Promise.resolve();
const ws = new WritableStream({
+ start() {
+ return startPromise;
+ },
write(chunk) {
if (!writeCalled) {
- t.equal(chunk, 'Hello');
+ t.equal(chunk, 'Hello', 'the chunk should be written to the writable stream');
writeCalled = true;
+ closeReadableStream();
} else {
t.fail('Unexpected extra write call');
- t.end();
}
return new Promise(() => {});
},
close() {
t.fail('Unexpected close call');
- t.end();
},
- abort(reason) {
+ abort() {
t.fail('Unexpected abort call');
- t.end();
}
});
ws.write('Hello');
- // Wait for ws to start.
- setTimeout(() => {
- t.equal(ws.state, 'waiting');
+ startPromise.then(() => {
+ t.equal(ws.state, 'waiting', 'the writable stream should be in the waiting state after starting');
rs.pipeTo(ws);
- closeReadableStream();
-
- t.equal(rs.state, 'closed');
-
- // Check that nothing happens.
setTimeout(() => {
- t.equal(ws.state, 'closing');
-
- t.equal(pullCount, 1);
- }, 100);
+ t.equal(ws.state, 'waiting', 'the writable stream should still be waiting since the write never completed');
+ t.equal(pullCount, 1, 'pull should have been called only once');
+ }, 50);
});
});
-test('Piping from a ReadableStream in waiting state which becomes errored after pipeTo call to a WritableStream in ' +
- 'waiting state', t => {
- t.plan(6);
+test('Piping from an empty ReadableStream which becomes errored after a pipeTo call to a WritableStream in the ' +
+ 'waiting state', t => {
+ t.plan(5);
let errorReadableStream;
let pullCount = 0;
@@ -802,26 +707,27 @@ test('Piping from a ReadableStream in waiting state which becomes errored after
},
cancel() {
t.fail('Unexpected cancel call');
- t.end();
}
});
let writeCalled = false;
const passedError = new Error('horrible things');
+ const startPromise = Promise.resolve();
const ws = new WritableStream({
+ start() {
+ return startPromise;
+ },
write(chunk) {
if (!writeCalled) {
t.equal(chunk, 'Hello');
writeCalled = true;
} else {
t.fail('Unexpected extra write call');
- t.end();
}
return new Promise(() => {});
},
close() {
t.fail('Unexpected close call');
- t.end();
},
abort(reason) {
t.equal(reason, passedError);
@@ -831,15 +737,12 @@ test('Piping from a ReadableStream in waiting state which becomes errored after
});
ws.write('Hello');
- // Wait for ws to start.
- setTimeout(() => {
+ startPromise.then(() => {
t.equal(ws.state, 'waiting');
rs.pipeTo(ws);
errorReadableStream(passedError);
-
- t.equal(rs.state, 'errored');
});
});
@@ -885,12 +788,11 @@ test('Piping to a stream that has been aborted passes through the error as the c
const passedReason = new Error('I don\'t like you.');
ws.abort(passedReason);
- rs.pipeTo(ws);
-
- setTimeout(() => {
+ rs.pipeTo(ws).catch(e => {
+ t.equal(e, passedReason, 'pipeTo rejection reason should be the cancellation reason');
t.equal(recordedReason, passedReason, 'the recorded cancellation reason must be the passed abort reason');
t.end();
- }, 10);
+ });
});
test('Piping to a stream and then aborting it passes through the error as the cancellation reason', t => {
@@ -904,13 +806,14 @@ test('Piping to a stream and then aborting it passes through the error as the ca
const ws = new WritableStream();
const passedReason = new Error('I don\'t like you.');
- rs.pipeTo(ws);
+ const pipeToPromise = rs.pipeTo(ws);
ws.abort(passedReason);
- setTimeout(() => {
+ pipeToPromise.catch(e => {
+ t.equal(e, passedReason, 'pipeTo rejection reason should be the abortion reason');
t.equal(recordedReason, passedReason, 'the recorded cancellation reason must be the passed abort reason');
t.end();
- }, 10);
+ });
});
test('Piping to a stream that has been closed propagates a TypeError cancellation reason backward', t => {
@@ -924,12 +827,11 @@ test('Piping to a stream that has been closed propagates a TypeError cancellatio
const ws = new WritableStream();
ws.close();
- rs.pipeTo(ws);
-
- setTimeout(() => {
- t.equal(recordedReason.constructor, TypeError, 'the recorded cancellation reason must be a TypeError');
+ rs.pipeTo(ws).catch(e => {
+ t.equal(e.constructor, TypeError, 'the rejection reason for the pipeTo promise should be a TypeError');
+ t.equal(recordedReason.constructor, TypeError, 'the recorded cancellation reason should be a TypeError');
t.end();
- }, 10);
+ });
});
test('Piping to a stream and then closing it propagates a TypeError cancellation reason backward', t => {
@@ -942,26 +844,27 @@ test('Piping to a stream and then closing it propagates a TypeError cancellation
const ws = new WritableStream();
- rs.pipeTo(ws);
+ const pipeToPromise = rs.pipeTo(ws);
ws.close();
- setTimeout(() => {
- t.equal(recordedReason.constructor, TypeError, 'the recorded cancellation reason must be a TypeError');
+ pipeToPromise.catch(e => {
+ t.equal(e.constructor, TypeError, 'the rejection reason for the pipeTo promise should be a TypeError');
+ t.equal(recordedReason.constructor, TypeError, 'the recorded cancellation reason should be a TypeError');
t.end();
- }, 10);
+ });
});
-test('Piping to a stream that synchronously errors passes through the error as the cancellation reason', t => {
+test('Piping to a stream that errors on write should pass through the error as the cancellation reason', t => {
let recordedReason;
const rs = new ReadableStream({
start(enqueue, close) {
enqueue('a');
enqueue('b');
enqueue('c');
- close();
},
cancel(reason) {
- recordedReason = reason;
+ t.equal(reason, passedError, 'the recorded cancellation reason must be the passed error');
+ t.end();
}
});
@@ -980,15 +883,10 @@ test('Piping to a stream that synchronously errors passes through the error as t
});
rs.pipeTo(ws);
-
- setTimeout(() => {
- t.equal(recordedReason, passedError, 'the recorded cancellation reason must be the passed error');
- t.end();
- }, 10);
});
-test('Piping to a stream that asynchronously errors passes through the error as the cancellation reason', t => {
- let recordedReason;
+test('Piping to a stream that errors on write should not pass through the error if the stream is already closed', t => {
+ let cancelCalled = false;
const rs = new ReadableStream({
start(enqueue, close) {
enqueue('a');
@@ -996,8 +894,8 @@ test('Piping to a stream that asynchronously errors passes through the error as
enqueue('c');
close();
},
- cancel(reason) {
- recordedReason = reason;
+ cancel() {
+ cancelCalled = true;
}
});
@@ -1007,7 +905,7 @@ test('Piping to a stream that asynchronously errors passes through the error as
write(chunk) {
return new Promise((resolve, reject) => {
if (++written > 1) {
- setTimeout(() => reject(passedError), 10);
+ reject(passedError);
} else {
resolve();
}
@@ -1015,24 +913,27 @@ test('Piping to a stream that asynchronously errors passes through the error as
}
});
- rs.pipeTo(ws);
-
- setTimeout(() => {
- t.equal(recordedReason, passedError, 'the recorded cancellation reason must be the passed error');
- t.end();
- }, 20);
+ rs.pipeTo(ws).then(
+ () => t.fail('pipeTo should not fulfill'),
+ r => {
+ t.equal(r, passedError, 'pipeTo should reject with the same error as the write');
+ t.equal(cancelCalled, false, 'cancel should not have been called');
+ t.end();
+ }
+ );
});
-test('Piping to a stream that errors on the last chunk passes through the error to a non-closed producer', t => {
+test('Piping to a stream that errors soon after writing should pass through the error as the cancellation reason', t => {
let recordedReason;
const rs = new ReadableStream({
start(enqueue, close) {
enqueue('a');
enqueue('b');
- setTimeout(close, 10);
+ enqueue('c');
},
cancel(reason) {
- recordedReason = reason;
+ t.equal(reason, passedError, 'the recorded cancellation reason must be the passed error');
+ t.end();
}
});
@@ -1042,41 +943,7 @@ test('Piping to a stream that errors on the last chunk passes through the error
write(chunk) {
return new Promise((resolve, reject) => {
if (++written > 1) {
- reject(passedError);
- } else {
- resolve();
- }
- });
- }
- });
-
- rs.pipeTo(ws);
-
- setTimeout(() => {
- t.equal(recordedReason, passedError, 'the recorded cancellation reason must be the passed error');
- t.end();
- }, 20);
-});
-
-test('Piping to a stream that errors on the last chunk does not pass through the error to a closed producer', t => {
- let cancelCalled = false;
- const rs = new ReadableStream({
- start(enqueue, close) {
- enqueue('a');
- enqueue('b');
- close();
- },
- cancel() {
- cancelCalled = true;
- }
- });
-
- let written = 0;
- const ws = new WritableStream({
- write(chunk) {
- return new Promise((resolve, reject) => {
- if (++written > 1) {
- reject(new Error('producer will not see this'));
+ setTimeout(() => reject(passedError), 10);
} else {
resolve();
}
@@ -1085,44 +952,95 @@ test('Piping to a stream that errors on the last chunk does not pass through the
});
rs.pipeTo(ws);
-
- setTimeout(() => {
- t.equal(cancelCalled, false, 'cancel must not be called');
- t.equal(ws.state, 'errored');
- t.end();
- }, 20);
});
-test('Piping to a writable stream that does not consume the writes fast enough exerts backpressure on the source', t => {
- t.plan(2);
-
+test('Piping to a writable stream that does not consume the writes fast enough exerts backpressure on the source',
+ t => {
const enqueueReturnValues = [];
const rs = new ReadableStream({
start(enqueue, close) {
- setTimeout(() => enqueueReturnValues.push(enqueue('a')), 10);
- setTimeout(() => enqueueReturnValues.push(enqueue('b')), 20);
- setTimeout(() => enqueueReturnValues.push(enqueue('c')), 30);
- setTimeout(() => enqueueReturnValues.push(enqueue('d')), 40);
- setTimeout(() => close(), 50);
+ setTimeout(() => enqueueReturnValues.push(enqueue('a')), 100);
+ setTimeout(() => enqueueReturnValues.push(enqueue('b')), 200);
+ setTimeout(() => enqueueReturnValues.push(enqueue('c')), 300);
+ setTimeout(() => enqueueReturnValues.push(enqueue('d')), 400);
+ setTimeout(() => close(), 500);
}
});
- let writtenValues = [];
+ const chunksGivenToWrite = [];
+ const chunksFinishedWriting = [];
+ const startPromise = Promise.resolve();
const ws = new WritableStream({
+ start() {
+ return startPromise;
+ },
write(chunk) {
+ chunksGivenToWrite.push(chunk);
return new Promise(resolve => {
setTimeout(() => {
- writtenValues.push(chunk);
+ chunksFinishedWriting.push(chunk);
resolve();
- }, 25);
+ }, 350);
});
}
});
- setTimeout(() => {
+ startPromise.then(() => {
rs.pipeTo(ws).then(() => {
- t.deepEqual(enqueueReturnValues, [true, true, false, false], 'backpressure was correctly exerted at the source');
- t.deepEqual(writtenValues, ['a', 'b', 'c', 'd'], 'all chunks were written');
+ t.deepEqual(enqueueReturnValues, [true, true, true, false], 'backpressure was correctly exerted at the source');
+ t.deepEqual(chunksFinishedWriting, ['a', 'b', 'c', 'd'], 'all chunks were written');
+ t.end();
});
- }, 0);
+
+ t.equal(ws.state, 'writable', 'at t = 0 ms, ws should be writable');
+
+ setTimeout(() => {
+ t.equal(ws.state, 'waiting', 'at t = 125 ms, ws should be waiting');
+ t.deepEqual(chunksGivenToWrite, ['a'], 'at t = 125 ms, ws.write should have been called with one chunk');
+ t.deepEqual(chunksFinishedWriting, [], 'at t = 125 ms, no chunks should have finished writing');
+
+ // The queue was empty when 'a' (the very first chunk) was enqueued
+ t.deepEqual(enqueueReturnValues, [true],
+ 'at t = 125 ms, the one enqueued chunk in rs did not cause backpressure');
+ }, 125);
+
+ setTimeout(() => {
+ t.equal(ws.state, 'waiting', 'at t = 225 ms, ws should be waiting');
+ t.deepEqual(chunksGivenToWrite, ['a'], 'at t = 225 ms, ws.write should have been called with one chunk');
+ t.deepEqual(chunksFinishedWriting, [], 'at t = 225 ms, no chunks should have finished writing');
+
+ // When 'b' was enqueued at 200 ms, the queue was also empty, since immediately after enqueuing 'a' at
+ // t = 100 ms, it was dequeued in order to fulfill the read() call that was made at time t = 0.
+ t.deepEqual(enqueueReturnValues, [true, true],
+ 'at t = 225 ms, the two enqueued chunks in rs did not cause backpressure');
+ }, 225);
+
+ setTimeout(() => {
+ t.equal(ws.state, 'waiting', 'at t = 325 ms, ws should be waiting');
+ t.deepEqual(chunksGivenToWrite, ['a'], 'at t = 325 ms, ws.write should have been called with one chunk');
+ t.deepEqual(chunksFinishedWriting, [], 'at t = 325 ms, no chunks should have finished writing');
+
+ // When 'c' was enqueued at 300 ms, the queue was again empty, since at time t = 200 ms when 'b' was enqueued,
+ // it was immediately dequeued in order to fulfill the second read() call that was made at time t = 0.
+ t.deepEqual(enqueueReturnValues, [true, true, true],
+ 'at t = 325 ms, the three enqueued chunks in rs did not cause backpressure');
+ }, 325);
+
+ setTimeout(() => {
+ t.equal(ws.state, 'waiting', 'at t = 425 ms, ws should be waiting');
+ t.deepEqual(chunksGivenToWrite, ['a'], 'at t = 425 ms, ws.write should have been called with one chunk');
+ t.deepEqual(chunksFinishedWriting, [], 'at t = 425 ms, no chunks should have finished writing');
+
+ // When 'd' was enqueued at 400 ms, the queue was *not* empty. 'c' was still in it, since the write() of 'b' will
+ // not finish until t = 100 ms + 350 ms = 450 ms. Thus backpressure should have been exerted.
+ t.deepEqual(enqueueReturnValues, [true, true, true, false],
+ 'at t = 425 ms, the fourth enqueued chunks in rs did cause backpressure');
+ }, 425);
+
+ setTimeout(() => {
+ t.equal(ws.state, 'waiting', 'at t = 475 ms, ws should be waiting');
+ t.deepEqual(chunksGivenToWrite, ['a', 'b'], 'at t = 475 ms, ws.write should have been called with two chunks');
+ t.deepEqual(chunksFinishedWriting, ['a'], 'at t = 475 ms, one chunk should have finished writing');
+ }, 475);
+ });
});
diff --git a/reference-implementation/test/readable-stream-cancel.js b/reference-implementation/test/readable-stream-cancel.js
index 59e7f3da3..6d192de56 100644
--- a/reference-implementation/test/readable-stream-cancel.js
+++ b/reference-implementation/test/readable-stream-cancel.js
@@ -4,10 +4,10 @@ import RandomPushSource from './utils/random-push-source';
import readableStreamToArray from './utils/readable-stream-to-array';
import sequentialReadableStream from './utils/sequential-rs';
-test('ReadableStream canceling an infinite stream', t => {
+test('ReadableStream cancellation: integration test on an infinite stream derived from a random push source', t => {
const randomSource = new RandomPushSource();
- let cancelationFinished = false;
+ let cancellationFinished = false;
const rs = new ReadableStream({
start(enqueue, close, error) {
randomSource.ondata = enqueue;
@@ -24,99 +24,33 @@ test('ReadableStream canceling an infinite stream', t => {
randomSource.onend();
return new Promise(resolve => setTimeout(() => {
- cancelationFinished = true;
+ cancellationFinished = true;
resolve();
}, 50));
}
});
readableStreamToArray(rs).then(
- storage => {
- t.equal(rs.state, 'closed', 'stream should be closed');
- t.equal(cancelationFinished, false, 'it did not wait for the cancellation process to finish before closing');
- t.ok(storage.length > 0, 'should have gotten some data written through the pipe');
- for (let i = 0; i < storage.length; i++) {
- t.equal(storage[i].length, 128, 'each chunk has 128 bytes');
+ chunks => {
+ t.equal(cancellationFinished, false, 'it did not wait for the cancellation process to finish before closing');
+ t.ok(chunks.length > 0, 'at least one chunk should be read');
+ for (let i = 0; i < chunks.length; i++) {
+ t.equal(chunks[i].length, 128, `chunk ${i + 1} should have 128 bytes`);
}
},
- () => {
- t.fail('the stream should be successfully read to the end');
- t.end();
- }
+ e => t.error(e)
);
setTimeout(() => {
rs.cancel().then(() => {
- t.equal(cancelationFinished, true, 'it returns a promise that is fulfilled when the cancellation finishes');
+ t.equal(cancellationFinished, true, 'it returns a promise that is fulfilled when the cancellation finishes');
t.end();
- });
+ })
+ .catch(e => t.error(e));
}, 150);
});
-test('ReadableStream cancellation puts the stream in a closed state (no chunks pulled yet)', t => {
- const rs = sequentialReadableStream(5);
-
- t.plan(5);
-
- rs.closed.then(
- () => t.assert(true, 'closed promise vended before the cancellation should fulfill'),
- () => t.fail('closed promise vended before the cancellation should not be rejected')
- );
-
- rs.ready.then(
- () => t.assert(true, 'ready promise vended before the cancellation should fulfill'),
- () => t.fail('ready promise vended before the cancellation should not be rejected')
- );
-
- rs.cancel();
-
- t.equal(rs.state, 'closed', 'state should be closed');
-
- rs.closed.then(
- () => t.assert(true, 'closed promise vended after the cancellation should fulfill'),
- () => t.fail('closed promise vended after the cancellation should not be rejected')
- );
- rs.ready.then(
- () => t.assert(true, 'ready promise vended after the cancellation should fulfill'),
- () => t.fail('ready promise vended after the cancellation should not be rejected')
- );
-});
-
-test('ReadableStream cancellation puts the stream in a closed state (after waiting for chunks)', t => {
- const rs = sequentialReadableStream(5);
-
- t.plan(5);
-
- rs.ready.then(
- () => {
- rs.closed.then(
- () => t.assert(true, 'closed promise vended before the cancellation should fulfill'),
- () => t.fail('closed promise vended before the cancellation should not be rejected')
- );
-
- rs.ready.then(
- () => t.assert(true, 'ready promise vended before the cancellation should fulfill'),
- () => t.fail('ready promise vended before the cancellation should not be rejected')
- );
-
- rs.cancel();
-
- t.equal(rs.state, 'closed', 'state should be closed');
-
- rs.closed.then(
- () => t.assert(true, 'closed promise vended after the cancellation should fulfill'),
- () => t.fail('closed promise vended after the cancellation should not be rejected')
- );
- rs.ready.then(
- () => t.assert(true, 'ready promise vended after the cancellation should fulfill'),
- () => t.fail('ready promise vended after the cancellation should not be rejected')
- );
- },
- r => t.ifError(r)
- );
-});
-
-test('ReadableStream explicit cancellation passes through the given reason', t => {
+test('ReadableStream cancellation: cancel(reason) should pass through the given reason to the underlying source', t => {
let recordedReason;
const rs = new ReadableStream({
cancel(reason) {
@@ -127,164 +61,109 @@ test('ReadableStream explicit cancellation passes through the given reason', t =
const passedReason = new Error('Sorry, it just wasn\'t meant to be.');
rs.cancel(passedReason);
- t.equal(recordedReason, passedReason);
+ t.equal(recordedReason, passedReason,
+ 'the error passed to the underlying source\'s cancel method should equal the one passed to the stream\'s cancel');
t.end();
});
-test('ReadableStream rs.cancel() on a closed stream returns a promise resolved with undefined', t => {
- const rs = new ReadableStream({
- start(enqueue, close) {
- close();
- }
- });
-
- t.equal(rs.state, 'closed');
- const cancelPromise = rs.cancel(undefined);
- cancelPromise.then(value => {
- t.equal(value, undefined, 'fulfillment value of cancelPromise must be undefined');
- t.end();
- }).catch(r => {
- t.fail('cancelPromise is rejected');
- t.end();
- });
-});
-
-test('ReadableStream rs.cancel() on an errored stream returns a promise rejected with the error', t => {
- const passedError = new Error('aaaugh!!');
-
- const rs = new ReadableStream({
- start(enqueue, close, error) {
- error(passedError);
- }
- });
-
- t.equal(rs.state, 'errored');
- const cancelPromise = rs.cancel(undefined);
- cancelPromise.then(() => {
- t.fail('cancelPromise is fulfilled');
- t.end();
- }).catch(r => {
- t.equal(r, passedError, 'cancelPromise must be rejected with passedError');
- t.end();
- });
-});
-
-test('ReadableStream the fulfillment value of the promise rs.cancel() returns must be undefined', t => {
- const rs = new ReadableStream({
- cancel(reason) {
- return "Hello";
- }
- });
+test('ReadableStream cancellation: returning a value from the underlying source\'s cancel should not affect the ' +
+ 'fulfillment value of the promise returned by the stream\'s cancel', t => {
+ t.plan(1);
- const cancelPromise = rs.cancel(undefined);
- cancelPromise.then(value => {
- t.equal(value, undefined, 'fulfillment value of cancelPromise must be undefined');
- t.end();
- }).catch(r => {
- t.fail('cancelPromise is rejected');
- t.end();
- });
-});
-
-test('ReadableStream if source\'s cancel throws, the promise returned by rs.cancel() rejects', t => {
- const errorInCancel = new Error('Sorry, it just wasn\'t meant to be.');
const rs = new ReadableStream({
cancel(reason) {
- throw errorInCancel;
+ return 'Hello';
}
});
- const cancelPromise = rs.cancel(undefined);
- cancelPromise.then(
- () => {
- t.fail('cancelPromise is fulfilled unexpectedly');
- t.end();
- },
- r => {
- t.equal(r, errorInCancel, 'rejection reason of cancelPromise must be errorInCancel');
- t.end();
- }
+ rs.cancel().then(
+ v => t.equal(v, undefined, 'cancel() return value should be fulfilled with undefined'),
+ () => t.fail('cancel() return value should not be rejected')
);
});
-test('ReadableStream onCancel returns a promise that will be resolved asynchronously', t => {
+test('ReadableStream cancellation: if the underlying source\'s cancel method returns a promise, the promise returned ' +
+ 'by the stream\'s cancel should fulfill when that one does', t => {
+
let resolveSourceCancelPromise;
+ let sourceCancelPromiseHasFulfilled = false;
const rs = new ReadableStream({
cancel() {
- return new Promise((resolve, reject) => {
+ const sourceCancelPromise = new Promise((resolve, reject) => {
resolveSourceCancelPromise = resolve;
});
+
+ sourceCancelPromise.then(() => {
+ sourceCancelPromiseHasFulfilled = true;
+ });
+
+ return sourceCancelPromise;
}
});
- let hasResolvedSourceCancelPromise = false;
- const cancelPromise = rs.cancel();
- cancelPromise.then(
+ rs.cancel().then(
value => {
- t.equal(hasResolvedSourceCancelPromise, true,
- 'cancelPromise must not be resolved before the promise returned by onCancel is resolved');
- t.equal(value, undefined, 'cancelPromise must be fulfilled with undefined');
+ t.equal(sourceCancelPromiseHasFulfilled, true,
+ 'cancel() return value should be fulfilled only after the promise returned by the underlying source\'s cancel');
+ t.equal(value, undefined, 'cancel() return value should be fulfilled with undefined');
t.end();
- }
- ).catch(
- r => {
- t.fail('cancelPromise is rejected');
- t.end();
- }
+ },
+ () => t.fail('cancel() return value should not be rejected')
);
setTimeout(() => {
- hasResolvedSourceCancelPromise = true;
resolveSourceCancelPromise('Hello');
- }, 0);
+ }, 30);
});
-test('ReadableStream onCancel returns a promise that will be rejected asynchronously', t => {
+test('ReadableStream cancellation: if the underlying source\'s cancel method returns a promise, the promise returned ' +
+ 'by the stream\'s cancel should reject when that one does', t => {
let rejectSourceCancelPromise;
+ let sourceCancelPromiseHasRejected = false;
const rs = new ReadableStream({
cancel() {
- return new Promise((resolve, reject) => {
+ const sourceCancelPromise = new Promise((resolve, reject) => {
rejectSourceCancelPromise = reject;
});
+
+ sourceCancelPromise.catch(() => {
+ sourceCancelPromiseHasRejected = true;
+ });
+
+ return sourceCancelPromise;
}
});
- let hasRejectedSourceCancelPromise = false;
const errorInCancel = new Error('Sorry, it just wasn\'t meant to be.');
- const cancelPromise = rs.cancel();
- cancelPromise.then(
- value => {
- t.fail('cancelPromise is fulfilled');
- t.end();
- },
+ rs.cancel().then(
+ () => t.fail('cancel() return value should not be rejected'),
r => {
- t.equal(hasRejectedSourceCancelPromise, true,
- 'cancelPromise must not be resolved before the promise returned by onCancel is resolved');
- t.equal(r, errorInCancel, 'cancelPromise must be rejected with errorInCancel');
+ t.equal(sourceCancelPromiseHasRejected, true,
+ 'cancel() return value should be rejected only after the promise returned by the underlying source\'s cancel');
+ t.equal(r, errorInCancel,
+ 'cancel() return value should be rejected with the underlying source\'s rejection reason');
t.end();
}
);
setTimeout(() => {
- hasRejectedSourceCancelPromise = true;
rejectSourceCancelPromise(errorInCancel);
- }, 0);
+ }, 30);
});
-test('ReadableStream cancelation before start finishes prevents pull() from being called', t => {
+test('ReadableStream cancellation: cancelling before start finishes should prevent pull() from being called', t => {
const rs = new ReadableStream({
pull() {
- t.fail('unexpected pull call');
+ t.fail('pull should not have been called');
t.end();
}
});
- rs.cancel();
-
- setTimeout(() => {
- t.pass('pull was never called');
+ Promise.all([rs.cancel(), rs.closed]).then(() => {
+ t.pass('pull should never have been called');
t.end();
- }, 0);
+ })
+ .catch(e => t.error(e));
});
diff --git a/reference-implementation/test/readable-stream-reader.js b/reference-implementation/test/readable-stream-reader.js
new file mode 100644
index 000000000..c886d36fc
--- /dev/null
+++ b/reference-implementation/test/readable-stream-reader.js
@@ -0,0 +1,208 @@
+const test = require('tape-catch');
+
+let ReadableStreamReader;
+
+test('Can get the ReadableStreamReader constructor indirectly', t => {
+ t.doesNotThrow(() => {
+ // It's not exposed globally, but we test a few of its properties here.
+ ReadableStreamReader = (new ReadableStream()).getReader().constructor;
+ });
+ t.end();
+});
+
+test('Constructing an ReadableStreamReader directly should fail if the stream is already locked (via direct ' +
+ 'construction)', t => {
+ const rs = new ReadableStream();
+ t.doesNotThrow(() => new ReadableStreamReader(rs), 'constructing directly the first time should be fine');
+ t.throws(() => new ReadableStreamReader(rs), /TypeError/, 'constructing directly the second time should fail');
+ t.end();
+});
+
+test('Getting an ReadableStreamReader via getReader should fail if the stream is already locked (via direct ' +
+ 'construction', t => {
+ const rs = new ReadableStream();
+ t.doesNotThrow(() => new ReadableStreamReader(rs), 'constructing directly should be fine');
+ t.throws(() => rs.getReader(), /TypeError/, 'getReader() should fail');
+ t.end();
+});
+
+test('Constructing an ReadableStreamReader directly should fail if the stream is already locked (via getReader)',
+ t => {
+ const rs = new ReadableStream();
+ t.doesNotThrow(() => rs.getReader(), 'getReader() should be fine');
+ t.throws(() => new ReadableStreamReader(rs), /TypeError/, 'constructing directly should fail');
+ t.end();
+});
+
+test('Constructing an ReadableStreamReader directly should fail if the stream is already closed',
+ t => {
+ const rs = new ReadableStream({
+ start(enqueue, close) {
+ close();
+ }
+ });
+
+ t.throws(() => new ReadableStreamReader(rs), /TypeError/, 'constructing directly should fail');
+ t.end();
+});
+
+test('Constructing an ReadableStreamReader directly should fail if the stream is already errored',
+ t => {
+ const theError = new Error('don\'t say i didn\'t warn ya');
+ const rs = new ReadableStream({
+ start(enqueue, close, error) {
+ error(theError);
+ }
+ });
+
+ t.throws(() => new ReadableStreamReader(rs), /don't say i didn't warn ya/, 'getReader() threw the error');
+ t.end();
+});
+
+test('Reading from a reader for an empty stream will wait until a chunk is available', t => {
+ let enqueue;
+ const rs = new ReadableStream({
+ start(e) {
+ enqueue = e;
+ }
+ });
+ const reader = rs.getReader();
+
+ t.equal(reader.isActive, true, 'reader is active to start with');
+
+ reader.read().then(result => {
+ t.deepEqual(result, { value: 'a', done: false }, 'read() should fulfill with the enqueued chunk');
+ t.equal(reader.isActive, true, 'reader is still active');
+ t.end();
+ });
+
+ enqueue('a');
+});
+
+test('cancel() on a reader releases the reader before calling through', t => {
+ t.plan(3);
+
+ const passedReason = new Error('it wasn\'t the right time, sorry');
+ const rs = new ReadableStream({
+ cancel(reason) {
+ t.equal(reader.isActive, false, 'reader should be released by the time underlying source cancel is called');
+ t.equal(reason, passedReason, 'the cancellation reason is passed through to the underlying source');
+ }
+ });
+
+ const reader = rs.getReader();
+ reader.cancel(passedReason).then(
+ () => t.pass('reader.cancel() should fulfill'),
+ e => t.fail('reader.cancel() should not reject')
+ );
+});
+
+test('closed should be fulfilled after stream is closed (stream .closed access before acquiring)', t => {
+ t.plan(2);
+
+ let doClose;
+ const rs = new ReadableStream({
+ start(enqueue, close) {
+ doClose = close;
+ }
+ });
+
+ rs.closed.then(() => {
+ t.equal(reader.isActive, false, 'reader is no longer active when stream closed is fulfilled');
+ });
+
+ const reader = rs.getReader();
+ doClose();
+
+ reader.closed.then(() => {
+ t.equal(reader.isActive, false, 'reader is no longer active when reader closed is fulfilled');
+ });
+});
+
+test('closed should be fulfilled after reader releases its lock (multiple stream locks)', t => {
+ t.plan(6);
+
+ let doClose;
+ const rs = new ReadableStream({
+ start(enqueue, close) {
+ doClose = close;
+ }
+ });
+
+ const reader1 = rs.getReader();
+
+ rs.closed.then(() => {
+ t.equal(reader1.isActive, false, 'reader1 is no longer active when stream closed is fulfilled');
+ t.equal(reader2.isActive, false, 'reader2 is no longer active when stream closed is fulfilled');
+ });
+
+ reader1.releaseLock();
+
+ const reader2 = rs.getReader();
+ doClose();
+
+ reader1.closed.then(() => {
+ t.equal(reader1.isActive, false, 'reader1 is no longer active when reader1 closed is fulfilled');
+ t.equal(reader2.isActive, false, 'reader2 is no longer active when reader1 closed is fulfilled');
+ });
+
+ reader2.closed.then(() => {
+ t.equal(reader1.isActive, false, 'reader1 is no longer active when reader2 closed is fulfilled');
+ t.equal(reader2.isActive, false, 'reader2 is no longer active when reader2 closed is fulfilled');
+ });
+});
+
+test('Multiple readers can access the stream in sequence', t => {
+ const rs = new ReadableStream({
+ start(enqueue, close) {
+ enqueue('a');
+ enqueue('b');
+ close();
+ }
+ });
+
+ const reader1 = rs.getReader();
+ reader1.read().then(r => t.deepEqual(r, { value: 'a', done: false }, 'reading the first chunk from reader1 works'));
+ reader1.releaseLock();
+
+ const reader2 = rs.getReader();
+ reader2.read().then(r => t.deepEqual(r, { value: 'b', done: false }, 'reading the second chunk from reader2 works'));
+ reader2.releaseLock();
+
+ t.end();
+});
+
+test('Cannot use an already-released reader to unlock a stream again', t => {
+ t.plan(2);
+
+ const rs = new ReadableStream();
+
+ const reader1 = rs.getReader();
+ reader1.releaseLock();
+
+ const reader2 = rs.getReader();
+ t.equal(reader2.isActive, true, 'reader2 state is active before releasing reader1');
+
+ reader1.releaseLock();
+ t.equal(reader2.isActive, true, 'reader2 state is still active after releasing reader1 again');
+});
+
+test('cancel() on a released reader is a no-op and does not pass through', t => {
+ const rs = new ReadableStream({
+ start(enqueue) {
+ enqueue('a');
+ },
+ cancel() {
+ t.fail('underlying source cancel should not be called');
+ }
+ });
+
+ const reader = rs.getReader();
+ reader.releaseLock();
+ reader.cancel().then(v => t.equal(v, undefined, 'cancel() on the reader should fulfill with undefined'));
+
+ const reader2 = rs.getReader();
+ reader2.read().then(r => t.deepEqual(r, { value: 'a', done: false }, 'a new reader should be able to read a chunk'));
+
+ setTimeout(() => t.end(), 50);
+});
diff --git a/reference-implementation/test/readable-stream-templated.js b/reference-implementation/test/readable-stream-templated.js
new file mode 100644
index 000000000..192f2d113
--- /dev/null
+++ b/reference-implementation/test/readable-stream-templated.js
@@ -0,0 +1,165 @@
+import templatedRSEmpty from './templated/readable-stream-empty';
+import templatedRSClosed from './templated/readable-stream-closed';
+import templatedRSErrored from './templated/readable-stream-errored';
+import templatedRSErroredAsyncOnly from './templated/readable-stream-errored-async-only';
+import templatedRSErroredSyncOnly from './templated/readable-stream-errored-sync-only';
+import templatedRSTwoChunksClosed from './templated/readable-stream-two-chunks-closed';
+import templatedRSEmptyReader from './templated/readable-stream-empty-reader';
+import templatedRSClosedReader from './templated/readable-stream-closed-reader';
+import templatedRSErroredReader from './templated/readable-stream-errored-reader';
+import templatedRSTwoChunksOpenReader from './templated/readable-stream-two-chunks-open-reader';
+import templatedRSTwoChunksClosedReader from './templated/readable-stream-two-chunks-closed-reader';
+
+templatedRSEmpty('ReadableStream (empty)',
+ () => new ReadableStream()
+);
+
+templatedRSEmptyReader('ReadableStream (empty) reader',
+ () => streamAndDefaultReader(new ReadableStream())
+);
+
+templatedRSClosed('ReadableStream (closed via call in start)',
+ () => new ReadableStream({
+ start(enqueue, close) { close(); }
+ })
+);
+
+templatedRSClosedReader('ReadableStream (closed via call in start) reader',
+ () => {
+ let doClose;
+ const stream = new ReadableStream({
+ start(enqueue, close) {
+ doClose = close;
+ }
+ });
+ const result = streamAndDefaultReader(stream);
+ doClose();
+ return result;
+ }
+);
+
+templatedRSClosed('ReadableStream (closed via cancel)',
+ () => {
+ const stream = new ReadableStream();
+ stream.cancel();
+ return stream;
+ }
+);
+
+templatedRSClosedReader('ReadableStream (closed via cancel) reader',
+ () => {
+ const stream = new ReadableStream();
+ const result = streamAndDefaultReader(stream);
+ stream.cancel();
+ return result;
+ }
+);
+
+const theError = new Error('boo!');
+
+templatedRSErrored('ReadableStream (errored via call in start)',
+ () => new ReadableStream({
+ start(enqueue, close, error) { error(theError); }
+ }),
+ theError
+);
+
+templatedRSErroredSyncOnly('ReadableStream (errored via call in start)',
+ () => new ReadableStream({
+ start(enqueue, close, error) { error(theError); }
+ }),
+ theError
+);
+
+templatedRSErrored('ReadableStream (errored via returning a rejected promise in start)',
+ () => new ReadableStream({
+ start(enqueue, close, error) { return Promise.reject(theError); }
+ }),
+ theError
+);
+
+templatedRSErroredAsyncOnly('ReadableStream (errored via returning a rejected promise in start) reader',
+ () => new ReadableStream({
+ start(enqueue, close, error) { return Promise.reject(theError); }
+ }),
+ theError
+);
+
+templatedRSErroredReader('ReadableStream (errored via returning a rejected promise in start) reader',
+ () => streamAndDefaultReader(new ReadableStream({
+ start(enqueue, close, error) { return Promise.reject(theError); }
+ })),
+ theError
+);
+
+const chunks = ['a', 'b'];
+
+templatedRSTwoChunksOpenReader('ReadableStream (two chunks enqueued, still open) reader',
+ () => streamAndDefaultReader(new ReadableStream({
+ start(enqueue) {
+ enqueue(chunks[0]);
+ enqueue(chunks[1]);
+ }
+ })),
+ chunks
+);
+
+templatedRSTwoChunksClosed('ReadableStream (two chunks enqueued, then closed)',
+ () => new ReadableStream({
+ start(enqueue, close) {
+ enqueue(chunks[0]);
+ enqueue(chunks[1]);
+ close();
+ }
+ }),
+ chunks
+);
+
+templatedRSTwoChunksClosed('ReadableStream (two chunks enqueued async, then closed)',
+ () => new ReadableStream({
+ start(enqueue, close) {
+ setTimeout(() => enqueue(chunks[0]), 10);
+ setTimeout(() => enqueue(chunks[1]), 20);
+ setTimeout(() => close(), 30);
+ }
+ }),
+ chunks
+);
+
+templatedRSTwoChunksClosed('ReadableStream (two chunks enqueued via pull, then closed)',
+ () => {
+ let pullCall = 0;
+
+ return new ReadableStream({
+ pull(enqueue, close) {
+ if (pullCall >= chunks.length) {
+ close();
+ } else {
+ enqueue(chunks[pullCall++]);
+ }
+ }
+ });
+ },
+ chunks
+);
+
+templatedRSTwoChunksClosedReader('ReadableStream (two chunks enqueued, then closed) reader',
+ () => {
+ let doClose;
+ const stream = new ReadableStream({
+ start(enqueue, close) {
+ enqueue(chunks[0]);
+ enqueue(chunks[1]);
+ doClose = close;
+ }
+ });
+ const result = streamAndDefaultReader(stream);
+ doClose();
+ return result;
+ },
+ chunks
+);
+
+function streamAndDefaultReader(stream) {
+ return { stream: stream, reader: stream.getReader() };
+}
diff --git a/reference-implementation/test/readable-stream.js b/reference-implementation/test/readable-stream.js
index 8de5d7476..a883ea609 100644
--- a/reference-implementation/test/readable-stream.js
+++ b/reference-implementation/test/readable-stream.js
@@ -5,310 +5,319 @@ import readableStreamToArray from './utils/readable-stream-to-array';
import sequentialReadableStream from './utils/sequential-rs';
test('ReadableStream can be constructed with no arguments', t => {
- t.plan(1);
t.doesNotThrow(() => new ReadableStream(), 'ReadableStream constructed with no errors');
+ t.end();
});
-test('ReadableStream instances have the correct methods and properties', t => {
- t.plan(9);
-
- const rs = new ReadableStream();
-
- t.equal(typeof rs.read, 'function', 'has a read method');
- t.equal(typeof rs.cancel, 'function', 'has a cancel method');
- t.equal(typeof rs.pipeTo, 'function', 'has a pipeTo method');
- t.equal(typeof rs.pipeThrough, 'function', 'has a pipeThrough method');
+test('ReadableStream: if start throws an error, it should be re-thrown', t => {
+ t.plan(1);
- t.equal(rs.state, 'waiting', 'state starts out waiting');
+ const error = new Error('aaaugh!!');
- t.ok(rs.ready, 'has a ready property');
- t.ok(rs.ready.then, 'ready property is a thenable');
- t.ok(rs.closed, 'has a closed property');
- t.ok(rs.closed.then, 'closed property is thenable');
+ t.throws(() => new ReadableStream({ start() { throw error; } }), /aaaugh/, 'error should be re-thrown');
});
-test('ReadableStream closing puts the stream in a closed state, fulfilling the ready and closed promises with ' +
- 'undefined', t => {
- t.plan(3);
+test('ReadableStream: if pull rejects, it should error the stream', t => {
+ t.plan(2);
+ const error = new Error('pull failure');
const rs = new ReadableStream({
- start(enqueue, close) {
- close();
+ pull() {
+ return Promise.reject(error);
}
});
- t.equal(rs.state, 'closed', 'The stream should be in closed state');
-
- rs.ready.then(
- v => t.equal(v, undefined, 'ready should return a promise fulfilled with undefined'),
- () => t.fail('ready should not return a rejected promise')
- );
+ rs.closed.catch(e => {
+ t.equal(e, error, 'closed should reject with the thrown error');
+ });
- rs.closed.then(
- v => t.equal(v, undefined, 'closed should return a promise fulfilled with undefined'),
- () => t.fail('closed should not return a rejected promise')
- );
+ rs.getReader().read().catch(e => {
+ t.equal(e, error, 'read() should reject with the thrown error');
+ });
});
-test('ReadableStream reading a waiting stream throws a TypeError', t => {
+test('ReadableStream: calling close twice should be a no-op', t => {
t.plan(2);
- const rs = new ReadableStream();
+ new ReadableStream({
+ start(enqueue, close) {
+ close();
+ t.doesNotThrow(close);
+ }
+ })
+ .closed.then(() => t.pass('closed should fulfill'));
+});
+
+test('ReadableStream: calling error twice should be a no-op', t => {
+ t.plan(2);
- t.equal(rs.state, 'waiting');
- t.throws(() => rs.read(), /TypeError/);
+ const theError = new Error('boo!');
+ const error2 = new Error('not me!');
+ new ReadableStream({
+ start(enqueue, close, error) {
+ error(theError);
+ t.doesNotThrow(() => error(error2));
+ }
+ })
+ .closed.catch(e => t.equal(e, theError, 'closed should reject with the first error'));
});
-test('ReadableStream reading a closed stream throws a TypeError', t => {
+test('ReadableStream: calling error after close should be a no-op', t => {
t.plan(2);
- const rs = new ReadableStream({
- start(enqueue, close) {
+ new ReadableStream({
+ start(enqueue, close, error) {
close();
+ t.doesNotThrow(error);
}
- });
-
- t.equal(rs.state, 'closed');
- t.throws(() => rs.read(), /TypeError/);
+ })
+ .closed.then(() => t.pass('closed should fulfill'));
});
-test('ReadableStream reading an errored stream throws the stored error', t => {
+test('ReadableStream: calling close after error should be a no-op', t => {
t.plan(2);
- const passedError = new Error('aaaugh!!');
+ const theError = new Error('boo!');
+ new ReadableStream({
+ start(enqueue, close, error) {
+ error(theError);
+ t.doesNotThrow(close);
+ }
+ })
+ .closed.catch(e => t.equal(e, theError, 'closed should reject with the first error'));
+});
+
+test('ReadableStream: should only call pull once upon starting the stream', t => {
+ t.plan(2);
+ let pullCount = 0;
+ const startPromise = Promise.resolve();
const rs = new ReadableStream({
- start(enqueue, close, error) {
- error(passedError);
+ start() {
+ return startPromise;
+ },
+ pull() {
+ pullCount++;
}
});
- t.equal(rs.state, 'errored');
- try {
- rs.read();
- t.fail('rs.read() didn\'t throw');
- } catch (e) {
- t.equal(e, passedError);
- }
+ startPromise.then(() => {
+ t.equal(pullCount, 1, 'pull should be called once start finishes');
+ });
+
+ setTimeout(() => t.equal(pullCount, 1, 'pull should be called exactly once'), 50);
});
-test('ReadableStream reading a stream makes ready and closed return a promise fulfilled with undefined when the ' +
- 'stream is fully drained', t => {
- t.plan(6);
+test('ReadableStream: should only call pull once for a forever-empty stream, even after reading', t => {
+ t.plan(2);
+ let pullCount = 0;
+ const startPromise = Promise.resolve();
const rs = new ReadableStream({
- start(enqueue, close) {
- enqueue('test');
- close();
+ start() {
+ return startPromise;
+ },
+ pull() {
+ pullCount++;
}
});
- t.equal(rs.state, 'readable', 'The stream should be in readable state');
- t.equal(rs.read(), 'test', 'A test string should be read');
- t.equal(rs.state, 'closed', 'The stream should be in closed state');
+ startPromise.then(() => {
+ t.equal(pullCount, 1, 'pull should be called once start finishes');
+ });
- t.throws(() => rs.read(), /TypeError/);
+ rs.getReader().read();
- rs.ready.then(
- v => t.equal(v, undefined, 'ready should return a promise fulfilled with undefined'),
- () => t.fail('ready should not return a rejected promise')
- );
-
- rs.closed.then(
- v => t.equal(v, undefined, 'closed should return a promise fulfilled with undefined'),
- () => t.fail('closed should not return a rejected promise')
- );
+ setTimeout(() => t.equal(pullCount, 1, 'pull should be called exactly once'), 50);
});
-test('ReadableStream avoid redundant pull call', t => {
+test('ReadableStream: should only call pull once on a non-empty stream read from before start fulfills', t => {
+ t.plan(5);
+
let pullCount = 0;
+ const startPromise = Promise.resolve();
const rs = new ReadableStream({
+ start(enqueue) {
+ enqueue('a');
+ return startPromise;
+ },
pull() {
pullCount++;
- },
-
- cancel() {
- t.fail('cancel should not be called');
}
});
- rs.ready;
- rs.ready;
- rs.ready;
-
- // Use setTimeout to ensure we run after any promises.
- setTimeout(() => {
- t.equal(pullCount, 1, 'pull should not be called more than once');
- t.end();
- }, 50);
-});
+ startPromise.then(() => {
+ t.equal(pullCount, 1, 'pull should be called once start finishes');
+ });
-test('ReadableStream start throws an error', t => {
- t.plan(1);
+ rs.getReader().read().then(r => {
+ t.deepEqual(r, { value: 'a', done: false }, 'first read() should return first chunk');
+ t.equal(pullCount, 1, 'pull should not have been called again');
+ });
- const error = new Error('aaaugh!!');
+ t.equal(pullCount, 0, 'calling read() should not cause pull to be called yet');
- try {
- new ReadableStream({ start() { throw error; } });
- t.fail('Constructor didn\'t throw');
- } catch (caughtError) {
- t.equal(caughtError, error, 'error was allowed to propagate');
- }
+ setTimeout(() => t.equal(pullCount, 1, 'pull should be called exactly once'), 50);
});
-test('ReadableStream pull throws an error', t => {
- t.plan(4);
-
- const error = new Error('aaaugh!!');
- const rs = new ReadableStream({ pull() { throw error; } });
+test('ReadableStream: should only call pull twice on a non-empty stream read from after start fulfills', t => {
+ t.plan(5);
- rs.closed.then(() => {
- t.fail('the stream should not close successfully');
- t.end();
+ let pullCount = 0;
+ const startPromise = Promise.resolve();
+ const rs = new ReadableStream({
+ start(enqueue) {
+ enqueue('a');
+ return startPromise;
+ },
+ pull() {
+ pullCount++;
+ }
});
- rs.ready.then(v => {
- t.equal(rs.state, 'errored', 'state is "errored" after waiting'),
- t.equal(v, undefined, 'ready fulfills with undefined')
- });
+ startPromise.then(() => {
+ t.equal(pullCount, 1, 'pull should be called once start finishes');
- rs.closed.catch(caught => {
- t.equal(rs.state, 'errored', 'state is "errored" in closed catch');
- t.equal(caught, error, 'error was passed through as rejection reason of closed property');
+ rs.getReader().read().then(r => {
+ t.deepEqual(r, { value: 'a', done: false }, 'first read() should return first chunk');
+ t.equal(pullCount, 2, 'pull should be called again once read fulfills');
+ });
});
+
+ t.equal(pullCount, 0, 'calling read() should not cause pull to be called yet');
+
+ setTimeout(() => t.equal(pullCount, 2, 'pull should be called exactly twice'), 50);
});
-test('ReadableStream adapting a push source', t => {
- let pullChecked = false;
- const randomSource = new RandomPushSource(8);
+test('ReadableStream: should call pull in reaction to read()ing the last chunk, if not draining', t => {
+ t.plan(4);
+ let pullCount = 0;
+ let doEnqueue;
+ const startPromise = Promise.resolve();
+ const pullPromise = Promise.resolve();
const rs = new ReadableStream({
- start(enqueue, close, error) {
- t.equal(typeof enqueue, 'function', 'enqueue is a function in start');
- t.equal(typeof close, 'function', 'close is a function in start');
- t.equal(typeof error, 'function', 'error is a function in start');
+ start(enqueue) {
+ doEnqueue = enqueue;
+ return startPromise;
+ },
+ pull() {
+ ++pullCount;
+ return pullPromise;
+ }
+ });
- randomSource.ondata = chunk => {
- if (!enqueue(chunk)) {
- randomSource.readStop();
- }
- };
+ const reader = rs.getReader();
- randomSource.onend = close;
- randomSource.onerror = error;
- },
+ startPromise.then(() => {
+ t.equal(pullCount, 1, 'pull should have been called once after read');
- pull(enqueue, close) {
- if (!pullChecked) {
- pullChecked = true;
- t.equal(typeof enqueue, 'function', 'enqueue is a function in pull');
- t.equal(typeof close, 'function', 'close is a function in pull');
- }
+ doEnqueue('a');
- randomSource.readStart();
- }
- });
+ return pullPromise.then(() => {
+ t.equal(pullCount, 2, 'pull should have been called a second time after enqueue');
- readableStreamToArray(rs).then(chunks => {
- t.equal(rs.state, 'closed', 'should be closed');
- t.equal(chunks.length, 8, 'got the expected 8 chunks');
- for (let i = 0; i < chunks.length; i++) {
- t.equal(chunks[i].length, 128, 'each chunk has 128 bytes');
- }
+ return reader.read().then(() => {
+ t.equal(pullCount, 3, 'pull should have been called a third time after read');
+ });
+ });
+ })
+ .catch(e => t.error(e));
- t.end();
- });
+ setTimeout(() => t.equal(pullCount, 3, 'pull should be called exactly thrice'), 50);
});
-test('ReadableStream adapting a sync pull source', t => {
- const rs = sequentialReadableStream(10);
-
- readableStreamToArray(rs).then(chunks => {
- t.equal(rs.state, 'closed', 'stream should be closed');
- t.equal(rs.source.closed, true, 'source should be closed');
- t.deepEqual(chunks, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 'got the expected 10 chunks');
+test('ReadableStream: should not call pull() in reaction to read()ing the last chunk, if draining', t => {
+ t.plan(4);
- t.end();
+ let pullCount = 0;
+ let doEnqueue;
+ let doClose;
+ const startPromise = Promise.resolve();
+ const pullPromise = Promise.resolve();
+ const rs = new ReadableStream({
+ start(enqueue, close) {
+ doEnqueue = enqueue;
+ doClose = close;
+ return startPromise;
+ },
+ pull() {
+ ++pullCount;
+ return pullPromise;
+ }
});
-});
-test('ReadableStream adapting an async pull source', t => {
- const rs = sequentialReadableStream(10, { async: true });
+ const reader = rs.getReader();
- readableStreamToArray(rs).then(chunks => {
- t.equal(rs.state, 'closed', 'stream should be closed');
- t.equal(rs.source.closed, true, 'source should be closed');
- t.deepEqual(chunks, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 'got the expected 10 chunks');
+ startPromise.then(() => {
+ t.equal(pullCount, 1, 'pull should have been called once after read');
- t.end();
- });
-});
+ doEnqueue('a');
-test('ReadableStream is able to enqueue lots of data in a single pull, making it available synchronously', t => {
- let i = 0;
- const rs = new ReadableStream({
- pull(enqueue, close) {
- while (++i <= 10) {
- enqueue(i);
- }
+ return pullPromise.then(() => {
+ t.equal(pullCount, 2, 'pull should have been called a second time after enqueue');
- close();
- }
- });
+ doClose();
- rs.ready.then(() => {
- const data = [];
- while (rs.state === 'readable') {
- data.push(rs.read());
- }
+ return reader.read().then(() => {
+ t.equal(pullCount, 2, 'pull should not have been called a third time after read');
+ });
+ });
+ })
+ .catch(e => t.error(e));
- t.deepEqual(data, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
- t.end();
- });
+ setTimeout(() => t.equal(pullCount, 2, 'pull should be called exactly twice'), 50);
});
-test('ReadableStream does not call pull until previous pull\'s promise fulfills', t => {
+test('ReadableStream: should not call pull until the previous pull call\'s promise fulfills', t => {
let resolve;
let returnedPromise;
let timesCalled = 0;
+ const startPromise = Promise.resolve();
const rs = new ReadableStream({
+ start(enqueue) {
+ enqueue('a');
+ return startPromise;
+ },
pull(enqueue) {
++timesCalled;
- enqueue(timesCalled);
returnedPromise = new Promise(r => { resolve = r; });
return returnedPromise;
}
});
+ const reader = rs.getReader();
- t.equal(rs.state, 'waiting', 'stream starts out waiting');
+ startPromise.then(() =>
+ reader.read().then(result1 => {
+ t.equal(timesCalled, 1,
+ 'pull should have been called once after start, but not yet have been called a second time');
+ t.deepEqual(result1, { value: 'a', done: false }, 'read() should fulfill with the enqueued value');
- rs.ready.then(() => {
- t.equal(rs.state, 'readable', 'stream becomes readable (even before promise fulfills)');
- t.equal(timesCalled, 1, 'pull is not yet called a second time');
- t.equal(rs.read(), 1, 'read() returns enqueued value');
-
- setTimeout(() => {
- t.equal(timesCalled, 1, 'after 30 ms, pull has still only been called once');
+ setTimeout(() => {
+ t.equal(timesCalled, 1, 'after 30 ms, pull should still only have been called once');
- resolve();
+ resolve();
- returnedPromise.then(() => {
- t.equal(timesCalled, 2, 'after the promise is fulfilled, pull is called a second time');
- t.equal(rs.read(), 2, 'read() returns the second enqueued value');
- t.end();
- });
- }, 30);
- });
+ returnedPromise.then(() => {
+ t.equal(timesCalled, 2,
+ 'after the promise returned by pull is fulfilled, pull should be called a second time');
+ t.end();
+ });
+ }, 30);
+ })
+ )
+ .catch(e => t.error(e));
});
-test('ReadableStream does not call pull multiple times after previous pull finishes', t => {
+test('ReadableStream: should pull after start, and after every read', t => {
let timesCalled = 0;
-
+ const startPromise = Promise.resolve();
const rs = new ReadableStream({
start(enqueue) {
enqueue('a');
enqueue('b');
enqueue('c');
+ return startPromise;
},
pull() {
++timesCalled;
@@ -322,166 +331,112 @@ test('ReadableStream does not call pull multiple times after previous pull finis
}
}
});
+ const reader = rs.getReader();
- t.equal(rs.state, 'readable', 'since start() synchronously enqueued chunks, the stream is readable');
+ startPromise.then(() => {
+ return reader.read().then(result1 => {
+ t.deepEqual(result1, { value: 'a', done: false }, 'first chunk should be as expected');
- // Wait for start to finish
- rs.ready.then(() => {
- t.equal(rs.read(), 'a', 'first chunk should be as expected');
- t.equal(rs.read(), 'b', 'second chunk should be as expected');
- t.equal(rs.read(), 'c', 'third chunk should be as expected');
+ return reader.read().then(result2 => {
+ t.deepEqual(result2, { value: 'b', done: false }, 'second chunk should be as expected');
- setTimeout(() => {
- // Once for after start, and once for after rs.read() === 'a'.
- t.equal(timesCalled, 2, 'pull() should only be called twice');
- t.end();
- }, 50);
- });
-});
+ return reader.read().then(result3 => {
+ t.deepEqual(result3, { value: 'c', done: false }, 'third chunk should be as expected');
-test('ReadableStream pull rejection makes stream errored', t => {
- t.plan(2);
+ setTimeout(() => {
+ // Once for after start, and once for every read.
+ t.equal(timesCalled, 4, 'pull() should be called exactly four times');
+ t.end();
+ }, 50);
+ });
+ });
+ });
+ })
+ .catch(e => t.error(e));
+});
- const theError = new Error('pull failure');
+test('ReadableStream: should not call pull after start if the stream is now closed', t => {
+ let timesCalled = 0;
+ const startPromise = Promise.resolve();
const rs = new ReadableStream({
+ start(enqueue, close) {
+ enqueue('a');
+ close();
+ return startPromise;
+ },
pull() {
- return Promise.reject(theError);
+ ++timesCalled;
}
});
- t.equal(rs.state, 'waiting', 'stream starts out waiting');
-
- rs.closed.then(
- () => t.fail('.closed should not fulfill'),
- e => t.equal(e, theError, '.closed should reject with the error')
- );
-});
-
-test('ReadableStream ready does not error when no more data is available', t => {
- // https://github.com/whatwg/streams/issues/80
-
- t.plan(1);
-
- const rs = sequentialReadableStream(5, { async: true });
- const result = [];
-
- pump();
-
- function pump() {
- while (rs.state === 'readable') {
- result.push(rs.read());
- }
-
- if (rs.state === 'closed') {
- t.deepEqual(result, [1, 2, 3, 4, 5], 'got the expected 5 chunks');
- } else {
- rs.ready.then(pump, r => t.ifError(r));
- }
- }
-});
-
-test('ReadableStream should be able to get data sequentially from an asynchronous stream', t => {
- // https://github.com/whatwg/streams/issues/80
+ startPromise.then(() => {
+ t.equal(timesCalled, 0, 'after start finishes, pull should not have been called');
- t.plan(4);
-
- const rs = sequentialReadableStream(3, { async: true });
+ const reader = rs.getReader();
+ return reader.read().then(() => {
+ t.equal(timesCalled, 0, 'reading should not have triggered a pull call');
- const result = [];
- const EOF = Object.create(null);
-
- getNext().then(v => {
- t.equal(v, 1, 'first chunk should be 1');
- return getNext().then(v => {
- t.equal(v, 2, 'second chunk should be 2');
- return getNext().then(v => {
- t.equal(v, 3, 'third chunk should be 3');
- return getNext().then(v => {
- t.equal(v, EOF, 'fourth result should be EOF');
- });
+ return rs.closed.then(() => {
+ t.equal(timesCalled, 0, 'stream should have closed with still no calls to pull');
+ t.end();
});
});
})
- .catch(r => t.ifError(r));
-
- function getNext() {
- if (rs.state === 'closed') {
- return Promise.resolve(EOF);
- }
-
- return rs.ready.then(() => {
- if (rs.state === 'readable') {
- return rs.read();
- } else if (rs.state === 'closed') {
- return EOF;
- }
- });
- }
+ .catch(e => t.error(e));
});
-test('Default ReadableStream returns `false` for all but the first `enqueue` call', t => {
- t.plan(5);
-
- new ReadableStream({
- start(enqueue) {
- t.equal(enqueue('hi'), true);
- t.equal(enqueue('hey'), false);
- t.equal(enqueue('whee'), false);
- t.equal(enqueue('yo'), false);
- t.equal(enqueue('sup'), false);
+test('ReadableStream: should call pull after enqueueing from inside pull (with no read requests), if strategy allows',
+ t => {
+ let timesCalled = 0;
+ const startPromise = Promise.resolve();
+ const rs = new ReadableStream({
+ start() {
+ return startPromise;
+ },
+ pull(enqueue) {
+ enqueue(++timesCalled);
+ },
+ strategy: {
+ size() {
+ return 1;
+ },
+ shouldApplyBackpressure(size) {
+ return size > 3;
+ }
}
});
-});
-test('ReadableStream continues returning `true` from `enqueue` if the data is read out of it in time', t => {
- t.plan(12);
-
- const rs = new ReadableStream({
- start(enqueue) {
- // Delay a bit so that the stream is successfully constructed and thus the `rs` variable references something.
- setTimeout(() => {
- t.equal(enqueue('foo'), true);
- t.equal(rs.state, 'readable');
- t.equal(rs.read(), 'foo');
- t.equal(rs.state, 'waiting');
-
- t.equal(enqueue('bar'), true);
- t.equal(rs.state, 'readable');
- t.equal(rs.read(), 'bar');
- t.equal(rs.state, 'waiting');
-
- t.equal(enqueue('baz'), true);
- t.equal(rs.state, 'readable');
- t.equal(rs.read(), 'baz');
- t.equal(rs.state, 'waiting');
- }, 0);
- },
- strategy: new CountQueuingStrategy({ highWaterMark: 4 })
+ startPromise.then(() => {
+ // after start: size = 0, pull()
+ // after enqueue(1): size = 1, pull()
+ // after enqueue(2): size = 2, pull()
+ // after enqueue(3): size = 3, pull()
+ // after enqueue(4): size = 4, do not pull
+ t.equal(timesCalled, 4, 'pull() should have been called four times');
+ t.end();
});
});
-test('ReadableStream enqueue fails when the stream is draining', t => {
+test('ReadableStream: enqueue should throw when the stream is readable but draining', t => {
+ t.plan(2);
+
const rs = new ReadableStream({
start(enqueue, close) {
- t.equal(enqueue('a'), true);
+ t.equal(enqueue('a'), true, 'the first enqueue should return true');
close();
t.throws(
() => enqueue('b'),
/TypeError/,
- 'enqueue after close must throw a TypeError'
+ 'enqueue after close should throw a TypeError'
);
- },
- strategy: new CountQueuingStrategy({ highWaterMark: 10 })
+ }
});
-
- t.equal(rs.state, 'readable');
- t.equal(rs.read(), 'a');
- t.equal(rs.state, 'closed');
- t.end();
});
-test('ReadableStream enqueue fails when the stream is closed', t => {
+test('ReadableStream: enqueue should throw when the stream is closed', t => {
+ t.plan(1);
+
const rs = new ReadableStream({
start(enqueue, close) {
close();
@@ -489,16 +444,15 @@ test('ReadableStream enqueue fails when the stream is closed', t => {
t.throws(
() => enqueue('a'),
/TypeError/,
- 'enqueue after close must throw a TypeError'
+ 'enqueue after close should throw a TypeError'
);
}
});
-
- t.equal(rs.state, 'closed');
- t.end();
});
-test('ReadableStream enqueue fails with the correct error when the stream is errored', t => {
+test('ReadableStream: enqueue should throw the stored error when the stream is errored', t => {
+ t.plan(1);
+
const expectedError = new Error('i am sad');
const rs = new ReadableStream({
start(enqueue, close, error) {
@@ -507,260 +461,153 @@ test('ReadableStream enqueue fails with the correct error when the stream is err
t.throws(
() => enqueue('a'),
/i am sad/,
- 'enqueue after error must throw that error'
+ 'enqueue after error should throw that error'
);
}
});
-
- t.equal(rs.state, 'errored');
- t.end();
});
-test('ReadableStream if shouldApplyBackpressure throws, the stream is errored', t => {
- const error = new Error('aaaugh!!');
- const rs = new ReadableStream({
- start(enqueue) {
- try {
- enqueue('hi');
- t.fail('enqueue didn\'t throw');
- t.end();
- } catch (e) {
- t.equal(e, error);
- }
- },
- strategy: {
- size() {
- return 1;
- },
+test('ReadableStream: should call underlying source methods as methods', t => {
+ t.plan(6);
- shouldApplyBackpressure() {
- throw error;
- }
+ class Source {
+ start(enqueue) {
+ t.equal(this, theSource, 'start() should be called with the correct this');
+ enqueue('a');
}
- });
-
- rs.closed.catch(r => {
- t.equal(r, error);
- t.end();
- });
-});
-test('ReadableStream if size throws, the stream is errored', t => {
- const error = new Error('aaaugh!!');
+ pull() {
+ t.equal(this, theSource, 'pull() should be called with the correct this');
+ }
- const rs = new ReadableStream({
- start(enqueue) {
- try {
- enqueue('hi');
- t.fail('enqueue didn\'t throw');
- t.end();
- } catch (e) {
- t.equal(e, error);
- }
- },
- strategy: {
- size() {
- throw error;
- },
+ cancel() {
+ t.equal(this, theSource, 'cancel() should be called with the correct this');
+ }
- shouldApplyBackpressure() {
- return true;
- }
+ get strategy() {
+ // Called three times
+ t.equal(this, theSource, 'strategy getter should be called with the correct this');
+ return undefined;
}
- });
+ }
- rs.closed.catch(r => {
- t.equal(r, error);
- t.end();
- });
+ const theSource = new Source();
+ theSource.debugName = 'the source object passed to the constructor'; // makes test failures easier to diagnose
+ const rs = new ReadableStream(theSource);
+
+ rs.getReader().read().then(() => rs.cancel());
});
-test('ReadableStream if size is NaN, the stream is errored', t => {
- t.plan(2);
+test('ReadableStream strategies: the default strategy should return false for all but the first enqueue call', t => {
+ t.plan(5);
- const rs = new ReadableStream({
+ new ReadableStream({
start(enqueue) {
- try {
- enqueue('hi');
- t.fail('enqueue didn\'t throw');
- } catch (error) {
- t.equal(error.constructor, RangeError);
- }
- },
- strategy: {
- size() {
- return NaN;
- },
-
- shouldApplyBackpressure() {
- return true;
- }
+ t.equal(enqueue('a'), true, 'first enqueue should return true');
+ t.equal(enqueue('b'), false, 'second enqueue should return false');
+ t.equal(enqueue('c'), false, 'third enqueue should return false');
+ t.equal(enqueue('d'), false, 'fourth enqueue should return false');
+ t.equal(enqueue('e'), false, 'fifth enqueue should return false');
}
});
-
- t.equal(rs.state, 'errored', 'state should be errored');
});
-test('ReadableStream if size is -Infinity, the stream is errored', t => {
- t.plan(2);
-
+test('ReadableStream strategies: the default strategy should continue returning true from enqueue if the chunks are ' +
+ 'read immediately', t => {
+ let doEnqueue;
const rs = new ReadableStream({
start(enqueue) {
- try {
- enqueue('hi');
- t.fail('enqueue didn\'t throw');
- } catch (error) {
- t.equal(error.constructor, RangeError);
- }
- },
- strategy: {
- size() {
- return -Infinity;
- },
-
- shouldApplyBackpressure() {
- return true;
- }
+ doEnqueue = enqueue;
}
});
+ const reader = rs.getReader();
- t.equal(rs.state, 'errored', 'state should be errored');
-});
+ t.equal(doEnqueue('a'), true, 'first enqueue should return true');
-test('ReadableStream if size is +Infinity, the stream is errored', t => {
- t.plan(2);
+ reader.read().then(result1 => {
+ t.deepEqual(result1, { value: 'a', done: false }, 'first chunk read should be correct');
+ t.equal(doEnqueue('b'), true, 'second enqueue should return true');
- const rs = new ReadableStream({
- start(enqueue) {
- try {
- enqueue('hi');
- t.fail('enqueue didn\'t throw');
- } catch (error) {
- t.equal(error.constructor, RangeError);
- }
- },
- strategy: {
- size() {
- return +Infinity;
- },
+ return reader.read();
+ })
+ .then(result2 => {
+ t.deepEqual(result2, { value: 'b', done: false }, 'second chunk read should be correct');
+ t.equal(doEnqueue('c'), true, 'third enqueue should return true');
- shouldApplyBackpressure() {
- return true;
- }
- }
- });
+ return reader.read();
+ })
+ .then(result3 => {
+ t.deepEqual(result3, { value: 'c', done: false }, 'third chunk read should be correct');
+ t.equal(doEnqueue('d'), true, 'fourth enqueue should return true');
- t.equal(rs.state, 'errored', 'state should be errored');
+ t.end();
+ })
+ .catch(e => t.error(e));
});
-test('ReadableStream errors in shouldApplyBackpressure cause ready to fulfill and closed to rejected', t => {
- t.plan(3);
+test('ReadableStream integration test: adapting a random push source', t => {
+ let pullChecked = false;
+ const randomSource = new RandomPushSource(8);
- const thrownError = new Error('size failure');
- let callsToShouldApplyBackpressure = 0;
const rs = new ReadableStream({
- start(enqueue) {
- setTimeout(() => {
- try {
- enqueue('hi');
- t.fail('enqueue didn\'t throw');
- } catch (error) {
- t.equal(error, thrownError, 'error thrown by enqueue should be the thrown error');
+ start(enqueue, close, error) {
+ t.equal(typeof enqueue, 'function', 'enqueue should be a function in start');
+ t.equal(typeof close, 'function', 'close should be a function in start');
+ t.equal(typeof error, 'function', 'error should be a function in start');
+
+ randomSource.ondata = chunk => {
+ if (!enqueue(chunk)) {
+ randomSource.readStop();
}
- }, 0);
+ };
+
+ randomSource.onend = close;
+ randomSource.onerror = error;
},
- strategy: {
- size() {
- return 1;
- },
- shouldApplyBackpressure() {
- if (++callsToShouldApplyBackpressure === 2) {
- throw thrownError;
- }
- return false;
+ pull(enqueue, close) {
+ if (!pullChecked) {
+ pullChecked = true;
+ t.equal(typeof enqueue, 'function', 'enqueue should be a function in pull');
+ t.equal(typeof close, 'function', 'close should be a function in pull');
}
- }
- });
-
- rs.ready.then(
- v => t.equal(v, undefined, 'ready should be fulfilled with undefined'),
- e => t.fail('ready should not be rejected')
- );
- rs.closed.then(
- v => t.fail('closed should not be fulfilled'),
- e => t.equal(e, thrownError, 'closed should be rejected with the thrown error')
- );
-});
-
-test('ReadableStream cancel() and closed on a closed stream should return the same promise', t => {
- const rs = new ReadableStream({
- start(enqueue, close) {
- close();
+ randomSource.readStart();
}
});
- t.equal(rs.cancel(), rs.closed, 'the promises returned should be the same');
- t.end();
-});
+ readableStreamToArray(rs).then(
+ chunks => {
+ t.equal(chunks.length, 8, '8 chunks should be read');
+ for (let i = 0; i < chunks.length; i++) {
+ t.equal(chunks[i].length, 128, `chunk ${i + 1} should have 128 bytes`);
+ }
-test('ReadableStream ready returns the same value when called on a new, empty stream', t => {
- const rs = new ReadableStream();
- t.equal(rs.ready, rs.ready, 'rs.ready should not change between gets');
- t.end();
+ t.end();
+ },
+ e => t.error(e)
+ );
});
-test('ReadableStream ready returns the same value when called on a readable stream', t => {
- const rs = new ReadableStream({
- start(enqueue) {
- enqueue('a');
- }
- });
+test('ReadableStream integration test: adapting a sync pull source', t => {
+ const rs = sequentialReadableStream(10);
- t.equal(rs.ready, rs.ready, 'rs.ready should not change between gets');
- t.end();
-});
+ readableStreamToArray(rs).then(chunks => {
+ t.equal(rs.source.closed, true, 'source should be closed after all chunks are read');
+ t.deepEqual(chunks, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 'the expected 10 chunks should be read');
-test('ReadableStream cancel() and closed on an errored stream should return the same promise', t => {
- const rs = new ReadableStream({
- start(enqueue, close, error) {
- error(new Error('boo!'));
- }
+ t.end();
});
-
- t.equal(rs.cancel(), rs.closed, 'the promises returned should be the same');
- t.end();
});
-test('ReadableStream should call underlying source methods as methods', t => {
- t.plan(6);
-
- class Source {
- start(enqueue) {
- t.equal(this, theSource, 'start() should be called with the correct this');
- enqueue('a');
- }
-
- pull() {
- t.equal(this, theSource, 'pull() should be called with the correct this');
- }
-
- cancel() {
- t.equal(this, theSource, 'cancel() should be called with the correct this');
- }
-
- get strategy() {
- // Called three times
- t.equal(this, theSource, 'strategy getter should be called with the correct this');
- return undefined;
- }
- }
+test('ReadableStream integration test: adapting an async pull source', t => {
+ const rs = sequentialReadableStream(10, { async: true });
- const theSource = new Source();
- theSource.debugName = "the source object passed to the constructor";
- const rs = new ReadableStream(theSource);
+ readableStreamToArray(rs).then(chunks => {
+ t.equal(rs.source.closed, true, 'source should be closed after all chunks are read');
+ t.deepEqual(chunks, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 'the expected 10 chunks should be read');
- rs.ready.then(() => rs.cancel());
+ t.end();
+ });
});
diff --git a/reference-implementation/test/templated/readable-stream-closed-reader.js b/reference-implementation/test/templated/readable-stream-closed-reader.js
new file mode 100644
index 000000000..f7b32dbd1
--- /dev/null
+++ b/reference-implementation/test/templated/readable-stream-closed-reader.js
@@ -0,0 +1,50 @@
+const tapeTest = require('tape-catch');
+
+export default (label, factory) => {
+ function test(description, testFn) {
+ tapeTest(`${label}: ${description}`, testFn);
+ }
+
+ test('read() should fulfill with { value: undefined, done: true }', t => {
+ t.plan(1);
+ const { reader } = factory();
+
+ reader.read().then(
+ v => t.deepEqual(v, { value: undefined, done: true }, 'read() should fulfill correctly'),
+ () => t.fail('read() should not return a rejected promise')
+ );
+ });
+
+ test('closed should fulfill with undefined', t => {
+ t.plan(2);
+ const { stream, reader } = factory();
+
+ stream.closed.then(
+ v => t.equal(v, undefined, 'stream closed should fulfill with undefined'),
+ () => t.fail('stream closed should not reject')
+ );
+
+ reader.closed.then(
+ v => t.equal(v, undefined, 'reader closed should fulfill with undefined'),
+ () => t.fail('reader closed should not reject')
+ );
+ });
+
+ test('cancel() should return a distinct fulfilled promise each time', t => {
+ t.plan(7);
+ const { stream, reader } = factory();
+
+ const cancelPromise1 = reader.cancel();
+ const cancelPromise2 = reader.cancel();
+ const closedStreamPromise = stream.closed;
+ const closedReaderPromise = reader.closed;
+
+ cancelPromise1.then(v => t.equal(v, undefined, 'first cancel() call should fulfill with undefined'));
+ cancelPromise2.then(v => t.equal(v, undefined, 'second cancel() call should fulfill with undefined'));
+ t.notEqual(cancelPromise1, cancelPromise2, 'cancel() calls should return distinct promises');
+ t.notEqual(cancelPromise1, closedStreamPromise, 'cancel() promise 1 should be distinct from stream.closed');
+ t.notEqual(cancelPromise1, closedReaderPromise, 'cancel() promise 1 should be distinct from reader.closed');
+ t.notEqual(cancelPromise2, closedStreamPromise, 'cancel() promise 2 should be distinct from stream.closed');
+ t.notEqual(cancelPromise2, closedReaderPromise, 'cancel() promise 2 should be distinct from reader.closed');
+ });
+};
diff --git a/reference-implementation/test/templated/readable-stream-closed.js b/reference-implementation/test/templated/readable-stream-closed.js
new file mode 100644
index 000000000..8a250c836
--- /dev/null
+++ b/reference-implementation/test/templated/readable-stream-closed.js
@@ -0,0 +1,72 @@
+const tapeTest = require('tape-catch');
+
+export default (label, factory) => {
+ function test(description, testFn) {
+ tapeTest(`${label}: ${description}`, testFn);
+ }
+
+ test('closed should fulfill with undefined', t => {
+ t.plan(1);
+ const rs = factory();
+
+ rs.closed.then(
+ v => t.equal(v, undefined, 'closed should fulfill with undefined'),
+ () => t.fail('closed should not reject')
+ );
+ });
+
+ test('cancel() should return a distinct fulfilled promise each time', t => {
+ t.plan(5);
+ const rs = factory();
+
+ const cancelPromise1 = rs.cancel();
+ const cancelPromise2 = rs.cancel();
+ const closedPromise = rs.closed;
+
+ cancelPromise1.then(v => t.equal(v, undefined, 'first cancel() call should fulfill with undefined'));
+ cancelPromise2.then(v => t.equal(v, undefined, 'second cancel() call should fulfill with undefined'));
+ t.notEqual(cancelPromise1, cancelPromise2, 'cancel() calls should return distinct promises');
+ t.notEqual(cancelPromise1, closedPromise, 'cancel() promise 1 should be distinct from closed');
+ t.notEqual(cancelPromise2, closedPromise, 'cancel() promise 2 should be distinct from closed');
+ });
+
+ test('getReader() should throw a TypeError', t => {
+ t.plan(1);
+ const rs = factory();
+
+ t.throws(() => rs.getReader(), /TypeError/, 'getReader() should fail');
+ });
+
+ test('piping to a WritableStream in the writable state should fail', t => {
+ t.plan(3);
+ const rs = factory();
+
+ const startPromise = Promise.resolve();
+ const ws = new WritableStream({
+ start() {
+ return startPromise;
+ },
+ write() {
+ t.fail('Unexpected write call');
+ },
+ close() {
+ t.fail('Unexpected close call');
+ },
+ abort() {
+ t.fail('Unexpected abort call');
+ }
+ });
+
+ startPromise.then(() => {
+ t.equal(ws.state, 'writable', 'writable stream should start in writable state');
+
+ rs.pipeTo(ws).then(
+ () => t.fail('pipeTo promise should not fulfill'),
+ e => {
+ t.equal(e.constructor, TypeError, 'pipeTo promise should be rejected with a TypeError');
+ t.equal(ws.state, 'writable', 'writable stream should still be writable');
+ }
+ );
+ });
+ });
+};
diff --git a/reference-implementation/test/templated/readable-stream-empty-reader.js b/reference-implementation/test/templated/readable-stream-empty-reader.js
new file mode 100644
index 000000000..ead5c6eca
--- /dev/null
+++ b/reference-implementation/test/templated/readable-stream-empty-reader.js
@@ -0,0 +1,134 @@
+const tapeTest = require('tape-catch');
+
+export default (label, factory) => {
+ function test(description, testFn) {
+ tapeTest(`${label}: ${description}`, testFn);
+ }
+
+ test('instances have the correct methods and properties', t => {
+ const { reader } = factory();
+
+ t.ok(reader.closed, 'has a closed property');
+ t.equal(typeof reader.closed.then, 'function', 'closed property is thenable');
+
+ t.equal(typeof reader.cancel, 'function', 'has a cancel method');
+ t.equal(typeof reader.read, 'function', 'has a read method');
+ t.equal(typeof reader.releaseLock, 'function', 'has a releaseLock method');
+
+ t.end();
+ });
+
+ test('read() should never settle', t => {
+ const { reader } = factory();
+
+ reader.read().then(
+ () => t.fail('read() should not fulfill'),
+ () => t.fail('read() should not reject')
+ );
+
+ setTimeout(() => t.end(), 100);
+ });
+
+ test('two read()s should both never settle', t => {
+ const { reader } = factory();
+
+ reader.read().then(
+ () => t.fail('first read() should not fulfill'),
+ () => t.fail('first read() should not reject')
+ );
+
+ reader.read().then(
+ () => t.fail('second read() should not fulfill'),
+ () => t.fail('second read() should not reject')
+ );
+
+ setTimeout(() => t.end(), 100);
+ });
+
+ test('read() should return distinct promises each time', t => {
+ t.plan(1);
+ const { reader } = factory();
+
+ t.notEqual(reader.read(), reader.read(), 'the promises returned should be distinct');
+ });
+
+ test('getReader() again on the stream should fail', t => {
+ t.plan(1);
+ const { stream } = factory();
+
+ t.throws(() => stream.getReader(), /TypeError/, 'stream.getReader() should throw a TypeError');
+ });
+
+ test('releasing the lock with pending read requests should throw but the read requests should stay pending', t => {
+ const { reader } = factory();
+
+ reader.read().then(
+ () => t.fail('first read() should not fulfill'),
+ () => t.fail('first read() should not reject')
+ );
+
+ reader.read().then(
+ () => t.fail('second read() should not fulfill'),
+ () => t.fail('second read() should not reject')
+ );
+
+ reader.closed.then(
+ () => t.fail('closed should not fulfill'),
+ () => t.fail('closed should not reject')
+ );
+
+ t.throws(() => reader.releaseLock(), /TypeError/, 'releaseLock should throw a TypeError');
+ t.equal(reader.isActive, true, 'the reader should still be active');
+
+ setTimeout(() => t.end(), 50);
+ });
+
+ test('releasing the lock should cause further read() calls to resolve as if the stream is closed', t => {
+ t.plan(3);
+ const { reader } = factory();
+
+ reader.releaseLock();
+ t.equal(reader.isActive, false, 'the reader should no longer be active');
+
+ reader.read().then(r =>
+ t.deepEqual(r, { value: undefined, done: true }, 'first read() should return closed result'));
+ reader.read().then(r =>
+ t.deepEqual(r, { value: undefined, done: true }, 'second read() should return closed result'));
+ });
+
+ test('releasing the lock should cause closed to fulfill', t => {
+ t.plan(3);
+ const { stream, reader } = factory();
+
+ reader.closed.then(v => t.equal(v, undefined, 'reader.closed got before release should fulfill with undefined'));
+ stream.closed.then(() => t.fail('stream.closed got before release should not fulfill'));
+
+ reader.releaseLock();
+ t.equal(reader.isActive, false, 'the reader should no longer be active');
+
+ reader.closed.then(v => t.equal(v, undefined, 'reader.closed got after release should fulfill with undefined'));
+ stream.closed.then(() => t.fail('stream.closed got after release should not fulfill'));
+ });
+
+ test('canceling via the reader should cause the reader to become inactive', t => {
+ t.plan(3);
+ const { reader } = factory();
+
+ t.equal(reader.isActive, true, 'the reader should be active before releasing it');
+ reader.cancel();
+ t.equal(reader.isActive, false, 'the reader should no longer be active');
+ reader.read().then(r => t.deepEqual(r, { value: undefined, done: true },
+ 'read()ing from the reader should give a done result'))
+ });
+
+ test('canceling via the stream should cause the reader to become inactive', t => {
+ t.plan(3);
+ const { stream, reader } = factory();
+
+ t.equal(reader.isActive, true, 'the reader should be active before releasing it');
+ stream.cancel();
+ t.equal(reader.isActive, false, 'the reader should no longer be active');
+ reader.read().then(r => t.deepEqual(r, { value: undefined, done: true },
+ 'read()ing from the reader should give a done result'))
+ });
+};
diff --git a/reference-implementation/test/templated/readable-stream-empty.js b/reference-implementation/test/templated/readable-stream-empty.js
new file mode 100644
index 000000000..2e6a62833
--- /dev/null
+++ b/reference-implementation/test/templated/readable-stream-empty.js
@@ -0,0 +1,21 @@
+const tapeTest = require('tape-catch');
+
+export default (label, factory) => {
+ function test(description, testFn) {
+ tapeTest(`${label}: ${description}`, testFn);
+ }
+
+ test('instances have the correct methods and properties', t => {
+ const rs = factory();
+
+ t.ok(rs.closed, 'has a closed property');
+ t.equal(typeof rs.closed.then, 'function', 'closed property is thenable');
+
+ t.equal(typeof rs.cancel, 'function', 'has a cancel method');
+ t.equal(typeof rs.getReader, 'function', 'has a getReader method');
+ t.equal(typeof rs.pipeThrough, 'function', 'has a pipeThrough method');
+ t.equal(typeof rs.pipeTo, 'function', 'has a pipeTo method');
+
+ t.end();
+ });
+};
diff --git a/reference-implementation/test/templated/readable-stream-errored-async-only.js b/reference-implementation/test/templated/readable-stream-errored-async-only.js
new file mode 100644
index 000000000..8ec30721d
--- /dev/null
+++ b/reference-implementation/test/templated/readable-stream-errored-async-only.js
@@ -0,0 +1,59 @@
+const tapeTest = require('tape-catch');
+
+export default (label, factory, error) => {
+ function test(description, testFn) {
+ tapeTest(`${label}: ${description}`, testFn);
+ }
+
+ test('piping with no options', t => {
+ t.plan(4);
+ const rs = factory();
+
+ const ws = new WritableStream({
+ abort(r) {
+ t.equal(r, error, 'reason passed to abort should equal the source error');
+ }
+ });
+
+ rs.pipeTo(ws).catch(e => {
+ t.equal(ws.state, 'errored', 'destination should be errored');
+ t.equal(e, error, 'rejection reason of pipeToPromise should be the source error');
+ });
+
+ ws.closed.catch(e => t.equal(e, error), 'rejection reason of dest closed should be the source error');
+ });
+
+ test('piping with { preventAbort: false }', t => {
+ t.plan(4);
+ const rs = factory();
+
+ const ws = new WritableStream({
+ abort(r) {
+ t.equal(r, error, 'reason passed to abort should equal the source error');
+ }
+ });
+
+ rs.pipeTo(ws, { preventAbort: false }).catch(e => {
+ t.equal(ws.state, 'errored', 'destination should be errored');
+ t.equal(e, error, 'rejection reason of pipeToPromise should be the source error');
+ });
+
+ ws.closed.catch(e => t.equal(e, error), 'rejection reason of dest closed should be the source error');
+ });
+
+ test('piping with { preventAbort: true }', t => {
+ t.plan(2);
+ const rs = factory();
+
+ const ws = new WritableStream({
+ abort() {
+ t.fail('underlying sink abort should not be called');
+ }
+ });
+
+ rs.pipeTo(ws, { preventAbort: true }).catch(e => {
+ t.equal(ws.state, 'writable', 'destination should remain writable');
+ t.equal(e, error, 'rejection reason of pipeToPromise should be the source error');
+ });
+ });
+};
diff --git a/reference-implementation/test/templated/readable-stream-errored-reader.js b/reference-implementation/test/templated/readable-stream-errored-reader.js
new file mode 100644
index 000000000..0078835e4
--- /dev/null
+++ b/reference-implementation/test/templated/readable-stream-errored-reader.js
@@ -0,0 +1,32 @@
+const tapeTest = require('tape-catch');
+
+export default (label, factory, error) => {
+ function test(description, testFn) {
+ tapeTest(`${label}: ${description}`, testFn);
+ }
+
+ test('closed should reject with the error', t => {
+ t.plan(2);
+ const { stream, reader } = factory();
+
+ stream.closed.then(
+ () => t.fail('stream closed should not fulfill'),
+ r => t.equal(r, error, 'stream closed should reject with the error')
+ );
+
+ reader.closed.then(
+ () => t.fail('stream closed should not fulfill'),
+ r => t.equal(r, error, 'stream closed should reject with the error')
+ );
+ });
+
+ test('read() should reject with the error', t => {
+ t.plan(1);
+ const { reader } = factory();
+
+ reader.read().then(
+ () => t.fail('read() should not fulfill'),
+ r => t.equal(r, error, 'read() should reject with the error')
+ );
+ });
+};
diff --git a/reference-implementation/test/templated/readable-stream-errored-sync-only.js b/reference-implementation/test/templated/readable-stream-errored-sync-only.js
new file mode 100644
index 000000000..7e01d1efe
--- /dev/null
+++ b/reference-implementation/test/templated/readable-stream-errored-sync-only.js
@@ -0,0 +1,33 @@
+const tapeTest = require('tape-catch');
+
+export default (label, factory, error) => {
+ function test(description, testFn) {
+ tapeTest(`${label}: ${description}`, testFn);
+ }
+
+ test('cancel() should return a distinct rejected promise each time', t => {
+ t.plan(5);
+ const rs = factory();
+
+ const cancelPromise1 = rs.cancel();
+ const cancelPromise2 = rs.cancel();
+ const closedPromise = rs.closed;
+
+ cancelPromise1.catch(e => t.equal(e, error, 'first cancel() call should reject with the error'));
+ cancelPromise2.catch(e => t.equal(e, error, 'second cancel() call should reject with the error'));
+ t.notEqual(cancelPromise1, cancelPromise2, 'cancel() calls should return distinct promises');
+ t.notEqual(cancelPromise1, closedPromise, 'cancel() promise 1 should be distinct from closed');
+ t.notEqual(cancelPromise2, closedPromise, 'cancel() promise 2 should be distinct from closed');
+ });
+
+ test('getReader() should throw the error', t => {
+ t.plan(1);
+ const rs = factory();
+
+ try {
+ rs.getReader();
+ } catch (e) {
+ t.equal(e, error, 'getReader() should throw the error');
+ }
+ });
+};
diff --git a/reference-implementation/test/templated/readable-stream-errored.js b/reference-implementation/test/templated/readable-stream-errored.js
new file mode 100644
index 000000000..67c9e3d75
--- /dev/null
+++ b/reference-implementation/test/templated/readable-stream-errored.js
@@ -0,0 +1,51 @@
+const tapeTest = require('tape-catch');
+
+export default (label, factory, error) => {
+ function test(description, testFn) {
+ tapeTest(`${label}: ${description}`, testFn);
+ }
+
+ test('closed should reject with the error', t => {
+ t.plan(1);
+ const rs = factory();
+
+ rs.closed.then(
+ () => t.fail('closed should not fulfill'),
+ r => t.equal(r, error, 'closed should reject with the error')
+ );
+ });
+
+ test('piping to a WritableStream in the writable state should fail', t => {
+ t.plan(3);
+
+ const rs = factory();
+
+ const startPromise = Promise.resolve();
+ const ws = new WritableStream({
+ start() {
+ return startPromise;
+ },
+ write() {
+ t.fail('Unexpected write call');
+ },
+ close() {
+ t.fail('Unexpected close call');
+ },
+ abort() {
+ t.fail('Unexpected abort call');
+ }
+ });
+
+ startPromise.then(() => {
+ t.equal(ws.state, 'writable');
+
+ rs.pipeTo(ws).then(
+ () => t.fail('pipeTo promise should not be fulfilled'),
+ e => {
+ t.equal(e, error, 'pipeTo promise should be rejected with the passed error');
+ t.equal(ws.state, 'writable', 'writable stream should still be writable');
+ }
+ );
+ });
+ });
+};
diff --git a/reference-implementation/test/templated/readable-stream-two-chunks-closed-reader.js b/reference-implementation/test/templated/readable-stream-two-chunks-closed-reader.js
new file mode 100644
index 000000000..2f933a2a9
--- /dev/null
+++ b/reference-implementation/test/templated/readable-stream-two-chunks-closed-reader.js
@@ -0,0 +1,112 @@
+const tapeTest = require('tape-catch');
+
+export default (label, factory, chunks) => {
+ function test(description, testFn) {
+ tapeTest(`${label}: ${description}`, testFn);
+ }
+
+ test('third read(), without waiting, should give { value: undefined, done: true }', t => {
+ t.plan(3);
+
+ const { reader } = factory();
+
+ reader.read().then(r => t.deepEqual(r, { value: chunks[0], done: false }, 'first result should be correct'));
+ reader.read().then(r => t.deepEqual(r, { value: chunks[1], done: false }, 'second result should be correct'));
+ reader.read().then(r => t.deepEqual(r, { value: undefined, done: true }, 'third result should be correct'));
+ });
+
+ test('third read, with waiting, should give { value: undefined, done: true }', t => {
+ t.plan(3);
+
+ const { reader } = factory();
+
+ reader.read().then(r => {
+ t.deepEqual(r, { value: chunks[0], done: false }, 'first result should be correct');
+
+ return reader.read().then(r => {
+ t.deepEqual(r, { value: chunks[1], done: false }, 'second result should be correct');
+
+ return reader.read().then(r => {
+ t.deepEqual(r, { value: undefined, done: true }, 'third result should be correct');
+ });
+ });
+ })
+ .catch(e => t.error(e));
+ });
+
+ test('draining the stream via read() should cause the stream and reader closed promises to fulfill', t => {
+ t.plan(2);
+
+ const { stream, reader } = factory();
+
+ stream.closed.then(
+ v => t.equal(v, undefined, 'stream closed should fulfill with undefined'),
+ () => t.fail('stream closed should not reject')
+ );
+
+ reader.closed.then(
+ v => t.equal(v, undefined, 'reader closed should fulfill with undefined'),
+ () => t.fail('reader closed should not reject')
+ );
+
+ reader.read();
+ reader.read();
+ });
+
+ test('releasing the lock after the stream is closed should do nothing', t => {
+ t.plan(2);
+ const { stream, reader } = factory();
+
+ stream.closed.then(
+ () => t.doesNotThrow(() => reader.releaseLock(), 'releasing the lock after stream closed should not throw')
+ );
+
+ reader.closed.then(
+ () => t.doesNotThrow(() => reader.releaseLock(), 'releasing the lock after reader closed should not throw')
+ );
+
+ reader.read();
+ reader.read();
+ });
+
+ test('releasing the lock should cause read() to act as if the stream is closed', t => {
+ t.plan(3);
+ const { reader } = factory();
+
+ reader.releaseLock();
+
+ reader.read().then(r =>
+ t.deepEqual(r, { value: undefined, done: true }, 'first read() should return closed result'));
+ reader.read().then(r =>
+ t.deepEqual(r, { value: undefined, done: true }, 'second read() should return closed result'));
+ reader.read().then(r =>
+ t.deepEqual(r, { value: undefined, done: true }, 'third read() should return closed result'));
+ });
+
+ test('reader\'s closed property always returns the same promise', t => {
+ t.plan(6);
+ const { stream, reader } = factory();
+
+ const readerClosed = reader.closed;
+
+ t.notEqual(readerClosed, stream.closed, 'reader.closed is not equal to stream.closed');
+ t.equal(reader.closed, readerClosed, 'accessing reader.closed twice in succession gives the same value');
+
+ reader.read().then(() => {
+ t.equal(reader.closed, readerClosed, 'reader.closed is the same after read() fulfills');
+
+ reader.releaseLock();
+
+ t.equal(reader.closed, readerClosed, 'reader.closed is the same after releasing the lock');
+
+ stream.closed.then(() => {
+ t.equal(reader.closed, readerClosed, 'reader.closed is the same after the stream is closed');
+ });
+
+ const newReader = stream.getReader();
+ newReader.read();
+ });
+
+ t.equal(reader.closed, readerClosed, 'reader.closed is the same after calling read()');
+ });
+};
diff --git a/reference-implementation/test/templated/readable-stream-two-chunks-closed.js b/reference-implementation/test/templated/readable-stream-two-chunks-closed.js
new file mode 100644
index 000000000..2793dfd13
--- /dev/null
+++ b/reference-implementation/test/templated/readable-stream-two-chunks-closed.js
@@ -0,0 +1,71 @@
+const tapeTest = require('tape-catch');
+
+export default (label, factory, chunks) => {
+ function test(description, testFn) {
+ tapeTest(`${label}: ${description}`, testFn);
+ }
+
+ test('piping with no options and no destination errors', t => {
+ const rs = factory();
+
+ const chunksWritten = [];
+ const ws = new WritableStream({
+ abort() {
+ t.fail('unexpected abort call');
+ },
+ write(chunk) {
+ chunksWritten.push(chunk);
+ }
+ });
+
+ rs.pipeTo(ws).then(() => {
+ t.equal(ws.state, 'closed', 'destination should be closed');
+ t.deepEqual(chunksWritten, chunks);
+ t.end();
+ });
+ });
+
+ test('piping with { preventClose: false } and no destination errors', t => {
+ const rs = factory();
+
+ const chunksWritten = [];
+ const ws = new WritableStream({
+ abort() {
+ t.fail('unexpected abort call');
+ },
+ write(chunk) {
+ chunksWritten.push(chunk);
+ }
+ });
+
+ rs.pipeTo(ws).then(() => {
+ t.equal(ws.state, 'closed', 'destination should be closed');
+ t.deepEqual(chunksWritten, chunks);
+ t.end();
+ });
+ });
+
+ test('piping with { preventClose: true } and no destination errors', t => {
+ const rs = factory();
+
+ const chunksWritten = [];
+ const ws = new WritableStream({
+ close() {
+ t.fail('unexpected close call');
+ },
+ abort() {
+ t.fail('unexpected abort call');
+ },
+ write(chunk) {
+ chunksWritten.push(chunk);
+ }
+ });
+
+ rs.pipeTo(ws, { preventClose: true }).then(() => {
+ t.equal(ws.state, 'writable', 'destination should be writable');
+ t.deepEqual(chunksWritten, chunks);
+ t.end();
+ });
+ });
+};
+
diff --git a/reference-implementation/test/templated/readable-stream-two-chunks-open-reader.js b/reference-implementation/test/templated/readable-stream-two-chunks-open-reader.js
new file mode 100644
index 000000000..7f40055bc
--- /dev/null
+++ b/reference-implementation/test/templated/readable-stream-two-chunks-open-reader.js
@@ -0,0 +1,52 @@
+const tapeTest = require('tape-catch');
+
+export default (label, factory, chunks) => {
+ function test(description, testFn) {
+ tapeTest(`${label}: ${description}`, testFn);
+ }
+
+ test('calling read() twice without waiting will eventually give both chunks', t => {
+ t.plan(2);
+ const { reader } = factory();
+
+ reader.read().then(r => t.deepEqual(r, { value: chunks[0], done: false }, 'first result should be correct'));
+ reader.read().then(r => t.deepEqual(r, { value: chunks[1], done: false }, 'second result should be correct'));
+ });
+
+ test('calling read() twice with waiting will eventually give both chunks', t => {
+ t.plan(2);
+ const { reader } = factory();
+
+ reader.read().then(r => {
+ t.deepEqual(r, { value: chunks[0], done: false }, 'first result should be correct');
+
+ return reader.read().then(r => {
+ t.deepEqual(r, { value: chunks[1], done: false }, 'second result should be correct');
+ });
+ })
+ .catch(e => t.error(e));
+ });
+
+ test('read() should return distinct promises each time', t => {
+ t.plan(1);
+ const { reader } = factory();
+
+ t.notEqual(reader.read(), reader.read(), 'the promises returned should be distinct');
+ });
+
+ test('cancel() after a read() should still give that single read result', t => {
+ t.plan(4);
+ const { stream, reader } = factory();
+
+ stream.closed.then(v => t.equal(v, undefined, 'stream closed should fulfill with undefined'));
+ reader.closed.then(v => t.equal(v, undefined, 'reader closed should fulfill with undefined'));
+
+ reader.read().then(r => t.deepEqual(r, { value: chunks[0], done: false },
+ 'promise returned before cancellation should fulfill with a chunk'));
+
+ reader.cancel();
+
+ reader.read().then(r => t.deepEqual(r, { value: undefined, done: true },
+ 'promise returned after cancellation should fulfill with an end-of-stream signal'));
+ });
+};
diff --git a/reference-implementation/test/transform-stream-errors.js b/reference-implementation/test/transform-stream-errors.js
index 5a83712ab..fa0ec63eb 100644
--- a/reference-implementation/test/transform-stream-errors.js
+++ b/reference-implementation/test/transform-stream-errors.js
@@ -1,7 +1,7 @@
const test = require('tape-catch');
test('TransformStream errors thrown in transform put the writable and readable in an errored state', t => {
- t.plan(9);
+ t.plan(5);
const thrownError = new Error('bad things are happening!');
const ts = new TransformStream({
@@ -10,25 +10,12 @@ test('TransformStream errors thrown in transform put the writable and readable i
}
});
- t.equal(ts.readable.state, 'waiting', 'readable starts in waiting');
t.equal(ts.writable.state, 'writable', 'writable starts in writable');
- ts.writable.write('a');
-
- t.equal(ts.readable.state, 'waiting', 'readable stays in waiting immediately after throw');
- t.equal(ts.writable.state, 'waiting', 'writable stays in waiting immediately after throw');
-
- setTimeout(() => {
- t.equal(ts.readable.state, 'errored', 'readable becomes errored after writing to the throwing transform');
- t.equal(ts.writable.state, 'errored', 'writable becomes errored after writing to the throwing transform');
-
- try {
- ts.readable.read();
- t.fail('read() didn\'nt throw');
- } catch (error) {
- t.equal(error, thrownError, 'readable\'s read should throw the thrown error');
- }
- }, 0);
+ ts.readable.getReader().read().then(
+ () => t.fail('readable\'s read() should reject'),
+ r => t.equal(r, thrownError, 'readable\'s read should reject with the thrown error')
+ );
ts.readable.closed.then(
() => t.fail('readable\'s closed should not be fulfilled'),
@@ -39,10 +26,13 @@ test('TransformStream errors thrown in transform put the writable and readable i
() => t.fail('writable\'s closed should not be fulfilled'),
e => t.equal(e, thrownError, 'writable\'s closed should be rejected with the thrown error')
);
+
+ ts.writable.write('a');
+ t.equal(ts.writable.state, 'waiting', 'writable becomes waiting immediately after throw');
});
test('TransformStream errors thrown in flush put the writable and readable in an errored state', t => {
- t.plan(11);
+ t.plan(6);
const thrownError = new Error('bad things are happening!');
const ts = new TransformStream({
@@ -54,30 +44,10 @@ test('TransformStream errors thrown in flush put the writable and readable in an
}
});
- t.equal(ts.readable.state, 'waiting', 'readable starts in waiting');
- t.equal(ts.writable.state, 'writable', 'writable starts in writable');
-
- ts.writable.write('a');
-
- t.equal(ts.readable.state, 'waiting', 'readable stays in waiting after a write');
- t.equal(ts.writable.state, 'waiting', 'writable stays in waiting after a write');
-
- ts.writable.close();
-
- t.equal(ts.readable.state, 'waiting', 'readable stays in waiting immediately after a throw');
- t.equal(ts.writable.state, 'closing', 'writable becomes closing immediately after a throw');
-
- setTimeout(() => {
- t.equal(ts.readable.state, 'errored', 'readable becomes errored after closing with the throwing flush');
- t.equal(ts.writable.state, 'errored', 'writable becomes errored after closing with the throwing flush');
-
- try {
- ts.readable.read();
- t.fail('read() didn\'nt throw');
- } catch (error) {
- t.equal(error, thrownError, 'readable\'s read should throw the thrown error');
- }
- }, 0);
+ ts.readable.getReader().read().then(
+ () => t.fail('readable\'s read() should reject'),
+ r => t.equal(r, thrownError, 'readable\'s read should reject with the thrown error')
+ );
ts.readable.closed.then(
() => t.fail('readable\'s closed should not be fulfilled'),
@@ -88,4 +58,10 @@ test('TransformStream errors thrown in flush put the writable and readable in an
() => t.fail('writable\'s closed should not be fulfilled'),
e => t.equal(e, thrownError, 'writable\'s closed should be rejected with the thrown error')
);
+
+ t.equal(ts.writable.state, 'writable', 'writable starts in writable');
+ ts.writable.write('a');
+ t.equal(ts.writable.state, 'waiting', 'writable becomes waiting after a write');
+ ts.writable.close();
+ t.equal(ts.writable.state, 'closing', 'writable becomes closing after the close call');
});
diff --git a/reference-implementation/test/transform-stream.js b/reference-implementation/test/transform-stream.js
index 004dfdc94..837ff41a0 100644
--- a/reference-implementation/test/transform-stream.js
+++ b/reference-implementation/test/transform-stream.js
@@ -1,5 +1,7 @@
const test = require('tape-catch');
+import readableStreamToArray from './utils/readable-stream-to-array';
+
test('TransformStream can be constructed with a transform function', t => {
t.plan(1);
t.doesNotThrow(() => new TransformStream({ transform() { } }), 'TransformStream constructed with no errors');
@@ -22,16 +24,15 @@ test('TransformStream instances must have writable and readable properties of th
t.ok(ts.readable instanceof ReadableStream, 'readable is an instance of ReadableStream');
});
-test('TransformStream writables and readables start in the expected states', t => {
- t.plan(2);
+test('TransformStream writable starts in the writable state', t => {
+ t.plan(1);
const ts = new TransformStream({ transform() { } });
t.equal(ts.writable.state, 'writable', 'writable starts writable');
- t.equal(ts.readable.state, 'waiting', 'readable starts waiting');
});
test('Pass-through sync TransformStream: can read from readable what is put into writable', t => {
- t.plan(5);
+ t.plan(3);
const ts = new TransformStream({
transform(chunk, enqueue, done) {
@@ -40,22 +41,22 @@ test('Pass-through sync TransformStream: can read from readable what is put into
}
});
- setTimeout(() => {
- ts.writable.write('a');
+ ts.writable.write('a');
- t.equal(ts.writable.state, 'waiting', 'writable is waiting after one write');
- t.equal(ts.readable.state, 'readable', 'readable is readable since transformation is sync');
- t.equal(ts.readable.read(), 'a', 'result from reading the readable is the same as was written to writable');
- t.equal(ts.readable.state, 'waiting', 'readable is waiting again after having read all that was written');
- ts.writable.ready.then(() => {
+ t.equal(ts.writable.state, 'waiting', 'writable is waiting after one write');
+ ts.readable.getReader().read().then(result => {
+ t.deepEqual(result, { value: 'a', done: false },
+ 'result from reading the readable is the same as was written to writable');
+
+ return ts.writable.ready.then(() => {
t.equal(ts.writable.state, 'writable', 'writable becomes writable again');
- })
- .catch(t.error);
- }, 0);
+ });
+ })
+ .catch(e => t.error(e));
});
test('Uppercaser sync TransformStream: can read from readable transformed version of what is put into writable', t => {
- t.plan(5);
+ t.plan(3);
const ts = new TransformStream({
transform(chunk, enqueue, done) {
@@ -64,22 +65,23 @@ test('Uppercaser sync TransformStream: can read from readable transformed versio
}
});
- setTimeout(() => {
- ts.writable.write('a');
+ ts.writable.write('a');
- t.equal(ts.writable.state, 'waiting', 'writable is waiting after one write');
- t.equal(ts.readable.state, 'readable', 'readable is readable since transformation is sync');
- t.equal(ts.readable.read(), 'A', 'result from reading the readable is the same as was written to writable');
- t.equal(ts.readable.state, 'waiting', 'readable is waiting again after having read all that was written');
- ts.writable.ready.then(() => {
+ t.equal(ts.writable.state, 'waiting', 'writable is waiting after one write');
+
+ ts.readable.getReader().read().then(result => {
+ t.deepEqual(result, { value: 'A', done: false },
+ 'result from reading the readable is the transformation of what was written to writable');
+
+ return ts.writable.ready.then(() => {
t.equal(ts.writable.state, 'writable', 'writable becomes writable again');
- })
- .catch(t.error);
- }, 0);
+ });
+ })
+ .catch(e => t.error(e));
});
test('Uppercaser-doubler sync TransformStream: can read both chunks put into the readable', t => {
- t.plan(7);
+ t.plan(4);
const ts = new TransformStream({
transform(chunk, enqueue, done) {
@@ -89,24 +91,30 @@ test('Uppercaser-doubler sync TransformStream: can read both chunks put into the
}
});
- setTimeout(() => {
- ts.writable.write('a');
-
- t.equal(ts.writable.state, 'waiting', 'writable is waiting after one write');
- t.equal(ts.readable.state, 'readable', 'readable is readable after writing to writable');
- t.equal(ts.readable.read(), 'A', 'the first chunk read is the transformation of the single chunk written');
- t.equal(ts.readable.state, 'readable', 'readable is readable still after reading the first chunk');
- t.equal(ts.readable.read(), 'A', 'the second chunk read is also the transformation of the single chunk written');
- t.equal(ts.readable.state, 'waiting', 'readable is waiting again after having read both enqueued chunks');
- ts.writable.ready.then(() => {
- t.equal(ts.writable.state, 'writable', 'writable becomes writable again');
- })
- .catch(t.error);
- }, 0);
+ ts.writable.write('a');
+
+ t.equal(ts.writable.state, 'waiting', 'writable is waiting after one write');
+
+ const reader = ts.readable.getReader();
+
+ reader.read().then(result1 => {
+ t.deepEqual(result1, { value: 'A', done: false },
+ 'the first chunk read is the transformation of the single chunk written');
+
+ return reader.read().then(result2 => {
+ t.deepEqual(result2, { value: 'A', done: false },
+ 'the second chunk read is also the transformation of the single chunk written');
+
+ return ts.writable.ready.then(() => {
+ t.equal(ts.writable.state, 'writable', 'writable becomes writable again');
+ });
+ });
+ })
+ .catch(e => t.error(e));
});
-test('Uppercaser async TransformStream: readable chunk becomes available asynchronously', t => {
- t.plan(7);
+test('Uppercaser async TransformStream: can read from readable transformed version of what is put into writable', t => {
+ t.plan(3);
const ts = new TransformStream({
transform(chunk, enqueue, done) {
@@ -115,29 +123,23 @@ test('Uppercaser async TransformStream: readable chunk becomes available asynchr
}
});
- setTimeout(() => {
- ts.writable.write('a');
-
- t.equal(ts.writable.state, 'waiting', 'writable is now waiting since the transform has not signaled done');
- t.equal(ts.readable.state, 'waiting', 'readable is still not readable');
+ ts.writable.write('a');
- ts.readable.ready.then(() => {
- t.equal(ts.readable.state, 'readable', 'readable eventually becomes readable');
- t.equal(ts.readable.read(), 'A', 'chunk read from readable is the transformation result');
- t.equal(ts.readable.state, 'waiting', 'readable is waiting again after having read the chunk');
+ t.equal(ts.writable.state, 'waiting', 'writable is waiting after one write');
- t.equal(ts.writable.state, 'waiting', 'writable is still waiting since the transform still has not signaled done');
+ ts.readable.getReader().read().then(result => {
+ t.deepEqual(result, { value: 'A', done: false },
+ 'result from reading the readable is the transformation of what was written to writable');
- return ts.writable.ready.then(() => {
- t.equal(ts.writable.state, 'writable', 'writable eventually becomes writable (after the transform signals done)');
- });
- })
- .catch(t.error);
- }, 0);
+ return ts.writable.ready.then(() => {
+ t.equal(ts.writable.state, 'writable', 'writable becomes writable again');
+ });
+ })
+ .catch(e => t.error(e));
});
-test('Uppercaser-doubler async TransformStream: readable chunks becomes available asynchronously', t => {
- t.plan(11);
+test('Uppercaser-doubler async TransformStream: can read both chunks put into the readable', t => {
+ t.plan(4);
const ts = new TransformStream({
transform(chunk, enqueue, done) {
@@ -147,51 +149,40 @@ test('Uppercaser-doubler async TransformStream: readable chunks becomes availabl
}
});
- setTimeout(() => {
- ts.writable.write('a');
-
- t.equal(ts.writable.state, 'waiting', 'writable is now waiting since the transform has not signaled done');
- t.equal(ts.readable.state, 'waiting', 'readable is still not readable');
-
- ts.readable.ready.then(() => {
- t.equal(ts.readable.state, 'readable', 'readable eventually becomes readable');
- t.equal(ts.readable.read(), 'A', 'chunk read from readable is the transformation result');
- t.equal(ts.readable.state, 'waiting', 'readable is waiting again after having read the chunk');
+ const reader = ts.readable.getReader();
- t.equal(ts.writable.state, 'waiting', 'writable is still waiting since the transform still has not signaled done');
+ ts.writable.write('a');
- return ts.readable.ready.then(() => {
- t.equal(ts.readable.state, 'readable', 'readable becomes readable again');
- t.equal(ts.readable.read(), 'A', 'chunk read from readable is the transformation result');
- t.equal(ts.readable.state, 'waiting', 'readable is waiting again after having read the chunk');
+ t.equal(ts.writable.state, 'waiting', 'writable is waiting after one write');
+ reader.read().then(result1 => {
+ t.deepEqual(result1, { value: 'A', done: false },
+ 'the first chunk read is the transformation of the single chunk written');
- t.equal(ts.writable.state, 'waiting', 'writable is still waiting since the transform still has not signaled done');
+ return reader.read().then(result2 => {
+ t.deepEqual(result2, { value: 'A', done: false },
+ 'the second chunk read is also the transformation of the single chunk written');
- return ts.writable.ready.then(() => {
- t.equal(ts.writable.state, 'writable', 'writable eventually becomes writable (after the transform signals done)');
- });
+ return ts.writable.ready.then(() => {
+ t.equal(ts.writable.state, 'writable', 'writable becomes writable again');
});
- })
- .catch(t.error);
- }, 0);
+ });
+ })
+ .catch(e => t.error(e));
});
test('TransformStream: by default, closing the writable closes the readable (when there are no queued writes)', t => {
- t.plan(4);
+ t.plan(3);
const ts = new TransformStream({ transform() { } });
ts.writable.close();
t.equal(ts.writable.state, 'closing', 'writable is closing');
- setTimeout(() => {
- t.equal(ts.readable.state, 'closed', 'readable is closed within a tick');
- ts.writable.closed.then(() => {
- t.equal(ts.writable.state, 'closed', 'writable becomes closed eventually');
- t.equal(ts.readable.state, 'closed', 'readable is still closed at that time');
- })
- .catch(t.error);
- }, 0);
+ Promise.all([ts.writable.closed, ts.readable.closed]).then(() => {
+ t.pass('both writable and readable closed promises fulfill');
+ t.equal(ts.writable.state, 'closed', 'writable state becomes closed eventually');
+ })
+ .catch(e => t.error(e));
});
test('TransformStream: by default, closing the writable waits for transforms to finish before closing both', t => {
@@ -206,19 +197,25 @@ test('TransformStream: by default, closing the writable waits for transforms to
ts.writable.write('a');
ts.writable.close();
t.equal(ts.writable.state, 'closing', 'writable is closing');
+
+ let rsClosed = false;
+ ts.readable.closed.then(() => {
+ rsClosed = true;
+ });
+
setTimeout(() => {
- t.equal(ts.readable.state, 'waiting', 'readable is still waiting after a tick');
+ t.equal(rsClosed, false, 'readable is not closed after a tick');
ts.writable.closed.then(() => {
t.equal(ts.writable.state, 'closed', 'writable becomes closed eventually');
- t.equal(ts.readable.state, 'closed', 'readable is closed at that point');
+ t.equal(rsClosed, true, 'readable is closed at that point');
})
- .catch(t.error);
+ .catch(e => t.error(e));
}, 0);
});
test('TransformStream: by default, closing the writable closes the readable after sync enqueues and async done', t => {
- t.plan(7);
+ t.plan(3);
const ts = new TransformStream({
transform(chunk, enqueue, done) {
@@ -231,24 +228,19 @@ test('TransformStream: by default, closing the writable closes the readable afte
ts.writable.write('a');
ts.writable.close();
t.equal(ts.writable.state, 'closing', 'writable is closing');
- setTimeout(() => {
- t.equal(ts.readable.state, 'readable', 'readable is readable');
- ts.writable.closed.then(() => {
- t.equal(ts.writable.state, 'closed', 'writable becomes closed eventually');
- t.equal(ts.readable.state, 'readable', 'readable is still readable at that time');
-
- t.equal(ts.readable.read(), 'x', 'can read the first enqueued chunk from the readable');
- t.equal(ts.readable.read(), 'y', 'can read the second enqueued chunk from the readable');
+ ts.writable.closed.then(() => {
+ t.equal(ts.writable.state, 'closed', 'writable becomes closed eventually');
- t.equal(ts.readable.state, 'closed', 'after reading, the readable is now closed');
- })
- .catch(t.error);
- }, 0);
+ return readableStreamToArray(ts.readable).then(chunks => {
+ t.deepEqual(chunks, ['x', 'y'], 'both enqueued chunks can be read from the readable');
+ });
+ })
+ .catch(e => t.error(e));
});
test('TransformStream: by default, closing the writable closes the readable after async enqueues and async done', t => {
- t.plan(8);
+ t.plan(3);
const ts = new TransformStream({
transform(chunk, enqueue, done) {
@@ -261,19 +253,15 @@ test('TransformStream: by default, closing the writable closes the readable afte
ts.writable.write('a');
ts.writable.close();
t.equal(ts.writable.state, 'closing', 'writable is closing');
- setTimeout(() => {
- t.equal(ts.readable.state, 'waiting', 'readable starts waiting');
- ts.writable.closed.then(() => {
- t.equal(ts.writable.state, 'closed', 'writable becomes closed eventually');
- t.equal(ts.readable.state, 'readable', 'readable is now readable since all chunks have been enqueued');
- t.equal(ts.readable.read(), 'x', 'can read the first enqueued chunk from the readable');
- t.equal(ts.readable.state, 'readable', 'after reading one chunk, the readable is still readable');
- t.equal(ts.readable.read(), 'y', 'can read the second enqueued chunk from the readable');
- t.equal(ts.readable.state, 'closed', 'after reading two chunks, the readable is now closed');
- })
- .catch(t.error);
- }, 0);
+ ts.writable.closed.then(() => {
+ t.equal(ts.writable.state, 'closed', 'writable becomes closed eventually');
+
+ return readableStreamToArray(ts.readable).then(chunks => {
+ t.deepEqual(chunks, ['x', 'y'], 'both enqueued chunks can be read from the readable');
+ });
+ })
+ .catch(e => t.error(e));
});
test('TransformStream flush is called immediately when the writable is closed, if no writes are queued', t => {
@@ -287,10 +275,9 @@ test('TransformStream flush is called immediately when the writable is closed, i
}
});
- setTimeout(() => {
- ts.writable.close();
+ ts.writable.close().then(() => {
t.ok(flushCalled, 'closing the writable triggers the transform flush immediately');
- }, 0);
+ });
});
test('TransformStream flush is called after all queued writes finish, once the writable is closed', t => {
@@ -306,20 +293,23 @@ test('TransformStream flush is called after all queued writes finish, once the w
}
});
+ ts.writable.write('a');
+ ts.writable.close();
+ t.notOk(flushCalled, 'closing the writable does not immediately call flush if writes are not finished');
+
+ let rsClosed = false;
+ ts.readable.closed.then(() => {
+ rsClosed = true;
+ });
+
setTimeout(() => {
- ts.writable.write('a');
- ts.writable.close();
- t.notOk(flushCalled, 'closing the writable does not immediately call flush if writes are not finished');
-
- setTimeout(() => {
- t.ok(flushCalled, 'flush is eventually called');
- t.equal(ts.readable.state, 'waiting', 'if flush does not call close, the readable stays open');
- }, 50);
- }, 0);
+ t.ok(flushCalled, 'flush is eventually called');
+ t.equal(rsClosed, false, 'if flush does not call close, the readable does not become closed');
+ }, 50);
});
test('TransformStream flush gets a chance to enqueue more into the readable', t => {
- t.plan(6);
+ t.plan(2);
const ts = new TransformStream({
transform(chunk, enqueue, done) {
@@ -331,23 +321,22 @@ test('TransformStream flush gets a chance to enqueue more into the readable', t
}
});
- setTimeout(() => {
- t.equal(ts.readable.state, 'waiting', 'before doing anything, the readable is waiting');
- ts.writable.write('a');
- t.equal(ts.readable.state, 'waiting', 'after a write to the writable, the readable is still waiting');
- ts.writable.close();
- ts.readable.ready.then(() => {
- t.equal(ts.readable.state, 'readable', 'after closing the writable, the readable is now readable as a result of flush');
- t.equal(ts.readable.read(), 'x', 'reading the first chunk gives back what was enqueued');
- t.equal(ts.readable.read(), 'y', 'reading the second chunk gives back what was enqueued');
- t.equal(ts.readable.state, 'waiting', 'after reading both chunks, the readable is waiting, since close was not called');
- })
- .catch(t.error);
- }, 0);
+ const reader = ts.readable.getReader();
+
+ ts.writable.write('a');
+ ts.writable.close();
+ reader.read().then(result1 => {
+ t.deepEqual(result1, { value: 'x', done: false }, 'the first chunk read is the first one enqueued in flush');
+
+ return reader.read().then(result2 => {
+ t.deepEqual(result2, { value: 'y', done: false }, 'the second chunk read is the second one enqueued in flush');
+ });
+ })
+ .catch(e => t.error(e));
});
test('TransformStream flush gets a chance to enqueue more into the readable, and can then async close', t => {
- t.plan(7);
+ t.plan(3);
const ts = new TransformStream({
transform(chunk, enqueue, done) {
@@ -360,22 +349,21 @@ test('TransformStream flush gets a chance to enqueue more into the readable, and
}
});
- setTimeout(() => {
- t.equal(ts.readable.state, 'waiting', 'before doing anything, the readable is waiting');
- ts.writable.write('a');
- t.equal(ts.readable.state, 'waiting', 'after a write to the writable, the readable is still waiting');
- ts.writable.close();
- ts.readable.ready.then(() => {
- t.equal(ts.readable.state, 'readable', 'after closing the writable, the readable is now readable as a result of flush');
- t.equal(ts.readable.read(), 'x', 'reading the first chunk gives back what was enqueued');
- t.equal(ts.readable.read(), 'y', 'reading the second chunk gives back what was enqueued');
- t.equal(ts.readable.state, 'waiting', 'after reading both chunks, the readable is waiting, since close was not called');
- })
- .catch(t.error);
+ const reader = ts.readable.getReader();
- ts.readable.closed.then(() => {
- t.equal(ts.readable.state, 'closed', 'the readable eventually does close, after close is called from flush');
- })
- .catch(t.error);
- }, 0);
+ ts.writable.write('a');
+ ts.writable.close();
+ reader.read().then(result1 => {
+ t.deepEqual(result1, { value: 'x', done: false }, 'the first chunk read is the first one enqueued in flush');
+
+ return reader.read().then(result2 => {
+ t.deepEqual(result2, { value: 'y', done: false }, 'the second chunk read is the second one enqueued in flush');
+ });
+ })
+ .catch(e => t.error(e));
+
+ ts.readable.closed.then(() => {
+ t.pass('readable becomes closed');
+ })
+ .catch(e => t.error(e));
});
diff --git a/reference-implementation/test/utils/random-push-source.js b/reference-implementation/test/utils/random-push-source.js
index 4ff21d437..43039a594 100644
--- a/reference-implementation/test/utils/random-push-source.js
+++ b/reference-implementation/test/utils/random-push-source.js
@@ -24,24 +24,23 @@ export default class RandomPushSource {
this.paused = false;
}
- const stream = this;
+ const source = this;
function writeChunk() {
- if (stream.paused) {
+ if (source.paused) {
return;
}
- stream.pushed++;
+ source.pushed++;
- if (stream.toPush > 0 && stream.pushed > stream.toPush) {
- if (stream._intervalHandle) {
- clearInterval(stream._intervalHandle);
- stream._intervalHandle = undefined;
+ if (source.toPush > 0 && source.pushed > source.toPush) {
+ if (source._intervalHandle) {
+ clearInterval(source._intervalHandle);
+ source._intervalHandle = undefined;
}
- stream.closed = true;
- stream.onend();
- }
- else {
- stream.ondata(randomChunk(128));
+ source.closed = true;
+ source.onend();
+ } else {
+ source.ondata(randomChunk(128));
}
}
}
diff --git a/reference-implementation/test/utils/readable-stream-to-array.js b/reference-implementation/test/utils/readable-stream-to-array.js
index b77fb9397..7c8b3d3db 100644
--- a/reference-implementation/test/utils/readable-stream-to-array.js
+++ b/reference-implementation/test/utils/readable-stream-to-array.js
@@ -1,18 +1,18 @@
export default function readableStreamToArray(readable) {
const chunks = [];
+ const reader = readable.getReader();
- pump();
- return readable.closed.then(() => chunks);
+ return pump();
function pump() {
- while (readable.state === "readable") {
- chunks.push(readable.read());
- }
+ return reader.read().then(({ value, done }) => {
+ if (done) {
+ reader.releaseLock();
+ return chunks;
+ }
- if (readable.state === "waiting") {
- readable.ready.then(pump);
- }
-
- // Otherwise the stream is "closed" or "errored", which will be handled above.
+ chunks.push(value);
+ return pump();
+ });
}
}
|