Skip to content

Commit a748960

Browse files
sebmarkbagezhengjitf
authored andcommitted
[Fizz] Expose maxBoundarySize as an option (facebook#21029)
* Expose maxBoundarySize as an option * Adjust the heuristic * Rename to progressiveChunkSize
1 parent 7788c4c commit a748960

File tree

4 files changed

+47
-7
lines changed

4 files changed

+47
-7
lines changed

packages/react-dom/src/server/ReactDOMFizzServerBrowser.js

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ import {
1818

1919
type Options = {
2020
signal?: AbortSignal,
21+
progressiveChunkSize?: number,
2122
};
2223

2324
function renderToReadableStream(
@@ -35,7 +36,11 @@ function renderToReadableStream(
3536
}
3637
return new ReadableStream({
3738
start(controller) {
38-
request = createRequest(children, controller);
39+
request = createRequest(
40+
children,
41+
controller,
42+
options ? options.progressiveChunkSize : undefined,
43+
);
3944
startWork(request);
4045
},
4146
pull(controller) {

packages/react-dom/src/server/ReactDOMFizzServerNode.js

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,10 @@ function createDrainHandler(destination, request) {
2121
return () => startFlowing(request);
2222
}
2323

24+
type Options = {
25+
progressiveChunkSize?: number,
26+
};
27+
2428
type Controls = {
2529
// Cancel any pending I/O and put anything remaining into
2630
// client rendered mode.
@@ -30,8 +34,13 @@ type Controls = {
3034
function pipeToNodeWritable(
3135
children: ReactNodeList,
3236
destination: Writable,
37+
options?: Options,
3338
): Controls {
34-
const request = createRequest(children, destination);
39+
const request = createRequest(
40+
children,
41+
destination,
42+
options ? options.progressiveChunkSize : undefined,
43+
);
3544
let hasStartedFlowing = false;
3645
startWork(request);
3746
return {

packages/react-noop-renderer/src/ReactNoopServer.js

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,11 @@ const ReactNoopServer = ReactFizzServer({
210210
},
211211
});
212212

213-
function render(children: React$Element<any>): Destination {
213+
type Options = {
214+
progressiveChunkSize?: number,
215+
};
216+
217+
function render(children: React$Element<any>, options?: Options): Destination {
214218
const destination: Destination = {
215219
root: null,
216220
placeholders: new Map(),
@@ -220,7 +224,11 @@ function render(children: React$Element<any>): Destination {
220224
ReactNoopServer.abort(request);
221225
},
222226
};
223-
const request = ReactNoopServer.createRequest(children, destination);
227+
const request = ReactNoopServer.createRequest(
228+
children,
229+
destination,
230+
options ? options.progressiveChunkSize : undefined,
231+
);
224232
ReactNoopServer.startWork(request);
225233
ReactNoopServer.startFlowing(request);
226234
return destination;

packages/react-server/src/ReactFizzServer.js

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ const CLOSED = 2;
9999
type Request = {
100100
+destination: Destination,
101101
+responseState: ResponseState,
102-
+maxBoundarySize: number,
102+
+progressiveChunkSize: number,
103103
status: 0 | 1 | 2,
104104
nextSegmentId: number,
105105
allPendingWork: number, // when it reaches zero, we can close the connection.
@@ -113,16 +113,34 @@ type Request = {
113113
partialBoundaries: Array<SuspenseBoundary>, // Partially completed boundaries that can flush its segments early.
114114
};
115115

116+
// This is a default heuristic for how to split up the HTML content into progressive
117+
// loading. Our goal is to be able to display additional new content about every 500ms.
118+
// Faster than that is unnecessary and should be throttled on the client. It also
119+
// adds unnecessary overhead to do more splits. We don't know if it's a higher or lower
120+
// end device but higher end suffer less from the overhead than lower end does from
121+
// not getting small enough pieces. We error on the side of low end.
122+
// We base this on low end 3G speeds which is about 500kbits per second. We assume
123+
// that there can be a reasonable drop off from max bandwidth which leaves you with
124+
// as little as 80%. We can receive half of that each 500ms - at best. In practice,
125+
// a little bandwidth is lost to processing and contention - e.g. CSS and images that
126+
// are downloaded along with the main content. So we estimate about half of that to be
127+
// the lower end throughput. In other words, we expect that you can at least show
128+
// about 12.5kb of content per 500ms. Not counting starting latency for the first
129+
// paint.
130+
// 500 * 1024 / 8 * .8 * 0.5 / 2
131+
const DEFAULT_PROGRESSIVE_CHUNK_SIZE = 12800;
132+
116133
export function createRequest(
117134
children: ReactNodeList,
118135
destination: Destination,
136+
progressiveChunkSize: number = DEFAULT_PROGRESSIVE_CHUNK_SIZE,
119137
): Request {
120138
const pingedWork = [];
121139
const abortSet: Set<SuspendedWork> = new Set();
122140
const request = {
123141
destination,
124142
responseState: createResponseState(),
125-
maxBoundarySize: 1024,
143+
progressiveChunkSize,
126144
status: BUFFERING,
127145
nextSegmentId: 0,
128146
allPendingWork: 0,
@@ -642,7 +660,7 @@ function flushSegment(
642660
flushSubtree(request, destination, segment);
643661

644662
return writeEndSuspenseBoundary(destination);
645-
} else if (boundary.byteSize > request.maxBoundarySize) {
663+
} else if (boundary.byteSize > request.progressiveChunkSize) {
646664
// This boundary is large and will be emitted separately so that we can progressively show
647665
// other content. We add it to the queue during the flush because we have to ensure that
648666
// the parent flushes first so that there's something to inject it into.

0 commit comments

Comments
 (0)