@@ -99,7 +99,7 @@ const CLOSED = 2;
99
99
type Request = {
100
100
+ destination : Destination ,
101
101
+ responseState : ResponseState ,
102
- + maxBoundarySize : number ,
102
+ + progressiveChunkSize : number ,
103
103
status : 0 | 1 | 2 ,
104
104
nextSegmentId : number ,
105
105
allPendingWork : number , // when it reaches zero, we can close the connection.
@@ -113,16 +113,34 @@ type Request = {
113
113
partialBoundaries : Array < SuspenseBoundary > , // Partially completed boundaries that can flush its segments early.
114
114
} ;
115
115
116
+ // This is a default heuristic for how to split up the HTML content into progressive
117
+ // loading. Our goal is to be able to display additional new content about every 500ms.
118
+ // Faster than that is unnecessary and should be throttled on the client. It also
119
+ // adds unnecessary overhead to do more splits. We don't know if it's a higher or lower
120
+ // end device but higher end suffer less from the overhead than lower end does from
121
+ // not getting small enough pieces. We error on the side of low end.
122
+ // We base this on low end 3G speeds which is about 500kbits per second. We assume
123
+ // that there can be a reasonable drop off from max bandwidth which leaves you with
124
+ // as little as 80%. We can receive half of that each 500ms - at best. In practice,
125
+ // a little bandwidth is lost to processing and contention - e.g. CSS and images that
126
+ // are downloaded along with the main content. So we estimate about half of that to be
127
+ // the lower end throughput. In other words, we expect that you can at least show
128
+ // about 12.5kb of content per 500ms. Not counting starting latency for the first
129
+ // paint.
130
+ // 500 * 1024 / 8 * .8 * 0.5 / 2
131
+ const DEFAULT_PROGRESSIVE_CHUNK_SIZE = 12800 ;
132
+
116
133
export function createRequest (
117
134
children : ReactNodeList ,
118
135
destination : Destination ,
136
+ progressiveChunkSize : number = DEFAULT_PROGRESSIVE_CHUNK_SIZE ,
119
137
) : Request {
120
138
const pingedWork = [ ] ;
121
139
const abortSet : Set < SuspendedWork > = new Set();
122
140
const request = {
123
141
destination ,
124
142
responseState : createResponseState ( ) ,
125
- maxBoundarySize : 1024 ,
143
+ progressiveChunkSize ,
126
144
status : BUFFERING ,
127
145
nextSegmentId : 0 ,
128
146
allPendingWork : 0 ,
@@ -642,7 +660,7 @@ function flushSegment(
642
660
flushSubtree ( request , destination , segment ) ;
643
661
644
662
return writeEndSuspenseBoundary ( destination ) ;
645
- } else if ( boundary . byteSize > request.maxBoundarySize ) {
663
+ } else if ( boundary . byteSize > request.progressiveChunkSize ) {
646
664
// This boundary is large and will be emitted separately so that we can progressively show
647
665
// other content. We add it to the queue during the flush because we have to ensure that
648
666
// the parent flushes first so that there's something to inject it into.
0 commit comments