mirror of
https://github.com/facebook/react.git
synced 2026-02-26 16:24:59 +00:00
Added an explicit type to all $FlowFixMe suppressions to reduce over-suppressions of new errors that might be caused on the same lines. Also removes suppressions that aren't used (e.g. in a `@noflow` file as they're purely misleading) Test Plan: yarn flow-ci
226 lines
6.7 KiB
JavaScript
226 lines
6.7 KiB
JavaScript
/**
|
|
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
*
|
|
* This source code is licensed under the MIT license found in the
|
|
* LICENSE file in the root directory of this source tree.
|
|
*
|
|
* @flow
|
|
*/
|
|
|
|
import type {Writable} from 'stream';
|
|
import {TextEncoder} from 'util';
|
|
import {AsyncLocalStorage} from 'async_hooks';
|
|
|
|
interface MightBeFlushable {
|
|
flush?: () => void;
|
|
}
|
|
|
|
export type Destination = Writable & MightBeFlushable;
|
|
|
|
export type PrecomputedChunk = Uint8Array;
|
|
export opaque type Chunk = string;
|
|
|
|
export function scheduleWork(callback: () => void) {
|
|
setImmediate(callback);
|
|
}
|
|
|
|
export function flushBuffered(destination: Destination) {
|
|
// If we don't have any more data to send right now.
|
|
// Flush whatever is in the buffer to the wire.
|
|
if (typeof destination.flush === 'function') {
|
|
// By convention the Zlib streams provide a flush function for this purpose.
|
|
// For Express, compression middleware adds this method.
|
|
destination.flush();
|
|
}
|
|
}
|
|
|
|
export const supportsRequestStorage = true;
|
|
export const requestStorage: AsyncLocalStorage<Map<Function, mixed>> =
|
|
new AsyncLocalStorage();
|
|
|
|
const VIEW_SIZE = 2048;
|
|
let currentView = null;
|
|
let writtenBytes = 0;
|
|
let destinationHasCapacity = true;
|
|
|
|
export function beginWriting(destination: Destination) {
|
|
currentView = new Uint8Array(VIEW_SIZE);
|
|
writtenBytes = 0;
|
|
destinationHasCapacity = true;
|
|
}
|
|
|
|
function writeStringChunk(destination: Destination, stringChunk: string) {
|
|
if (stringChunk.length === 0) {
|
|
return;
|
|
}
|
|
// maximum possible view needed to encode entire string
|
|
if (stringChunk.length * 3 > VIEW_SIZE) {
|
|
if (writtenBytes > 0) {
|
|
writeToDestination(
|
|
destination,
|
|
((currentView: any): Uint8Array).subarray(0, writtenBytes),
|
|
);
|
|
currentView = new Uint8Array(VIEW_SIZE);
|
|
writtenBytes = 0;
|
|
}
|
|
writeToDestination(destination, textEncoder.encode(stringChunk));
|
|
return;
|
|
}
|
|
|
|
let target: Uint8Array = (currentView: any);
|
|
if (writtenBytes > 0) {
|
|
target = ((currentView: any): Uint8Array).subarray(writtenBytes);
|
|
}
|
|
const {read, written} = textEncoder.encodeInto(stringChunk, target);
|
|
writtenBytes += written;
|
|
|
|
if (read < stringChunk.length) {
|
|
writeToDestination(
|
|
destination,
|
|
(currentView: any).subarray(0, writtenBytes),
|
|
);
|
|
currentView = new Uint8Array(VIEW_SIZE);
|
|
writtenBytes = textEncoder.encodeInto(
|
|
stringChunk.slice(read),
|
|
(currentView: any),
|
|
).written;
|
|
}
|
|
|
|
if (writtenBytes === VIEW_SIZE) {
|
|
writeToDestination(destination, (currentView: any));
|
|
currentView = new Uint8Array(VIEW_SIZE);
|
|
writtenBytes = 0;
|
|
}
|
|
}
|
|
|
|
function writeViewChunk(destination: Destination, chunk: PrecomputedChunk) {
|
|
if (chunk.byteLength === 0) {
|
|
return;
|
|
}
|
|
if (chunk.byteLength > VIEW_SIZE) {
|
|
if (__DEV__) {
|
|
if (precomputedChunkSet && precomputedChunkSet.has(chunk)) {
|
|
console.error(
|
|
'A large precomputed chunk was passed to writeChunk without being copied.' +
|
|
' Large chunks get enqueued directly and are not copied. This is incompatible with precomputed chunks because you cannot enqueue the same precomputed chunk twice.' +
|
|
' Use "cloneChunk" to make a copy of this large precomputed chunk before writing it. This is a bug in React.',
|
|
);
|
|
}
|
|
}
|
|
// this chunk may overflow a single view which implies it was not
|
|
// one that is cached by the streaming renderer. We will enqueu
|
|
// it directly and expect it is not re-used
|
|
if (writtenBytes > 0) {
|
|
writeToDestination(
|
|
destination,
|
|
((currentView: any): Uint8Array).subarray(0, writtenBytes),
|
|
);
|
|
currentView = new Uint8Array(VIEW_SIZE);
|
|
writtenBytes = 0;
|
|
}
|
|
writeToDestination(destination, chunk);
|
|
return;
|
|
}
|
|
|
|
let bytesToWrite = chunk;
|
|
const allowableBytes = ((currentView: any): Uint8Array).length - writtenBytes;
|
|
if (allowableBytes < bytesToWrite.byteLength) {
|
|
// this chunk would overflow the current view. We enqueue a full view
|
|
// and start a new view with the remaining chunk
|
|
if (allowableBytes === 0) {
|
|
// the current view is already full, send it
|
|
writeToDestination(destination, (currentView: any));
|
|
} else {
|
|
// fill up the current view and apply the remaining chunk bytes
|
|
// to a new view.
|
|
((currentView: any): Uint8Array).set(
|
|
bytesToWrite.subarray(0, allowableBytes),
|
|
writtenBytes,
|
|
);
|
|
writtenBytes += allowableBytes;
|
|
writeToDestination(destination, (currentView: any));
|
|
bytesToWrite = bytesToWrite.subarray(allowableBytes);
|
|
}
|
|
currentView = new Uint8Array(VIEW_SIZE);
|
|
writtenBytes = 0;
|
|
}
|
|
((currentView: any): Uint8Array).set(bytesToWrite, writtenBytes);
|
|
writtenBytes += bytesToWrite.byteLength;
|
|
|
|
if (writtenBytes === VIEW_SIZE) {
|
|
writeToDestination(destination, (currentView: any));
|
|
currentView = new Uint8Array(VIEW_SIZE);
|
|
writtenBytes = 0;
|
|
}
|
|
}
|
|
|
|
export function writeChunk(
|
|
destination: Destination,
|
|
chunk: PrecomputedChunk | Chunk,
|
|
): void {
|
|
if (typeof chunk === 'string') {
|
|
writeStringChunk(destination, chunk);
|
|
} else {
|
|
writeViewChunk(destination, ((chunk: any): PrecomputedChunk));
|
|
}
|
|
}
|
|
|
|
function writeToDestination(destination: Destination, view: Uint8Array) {
|
|
const currentHasCapacity = destination.write(view);
|
|
destinationHasCapacity = destinationHasCapacity && currentHasCapacity;
|
|
}
|
|
|
|
export function writeChunkAndReturn(
|
|
destination: Destination,
|
|
chunk: PrecomputedChunk | Chunk,
|
|
): boolean {
|
|
writeChunk(destination, chunk);
|
|
return destinationHasCapacity;
|
|
}
|
|
|
|
export function completeWriting(destination: Destination) {
|
|
if (currentView && writtenBytes > 0) {
|
|
destination.write(currentView.subarray(0, writtenBytes));
|
|
}
|
|
currentView = null;
|
|
writtenBytes = 0;
|
|
destinationHasCapacity = true;
|
|
}
|
|
|
|
export function close(destination: Destination) {
|
|
destination.end();
|
|
}
|
|
|
|
const textEncoder = new TextEncoder();
|
|
|
|
export function stringToChunk(content: string): Chunk {
|
|
return content;
|
|
}
|
|
|
|
const precomputedChunkSet = __DEV__ ? new Set<PrecomputedChunk>() : null;
|
|
|
|
export function stringToPrecomputedChunk(content: string): PrecomputedChunk {
|
|
const precomputedChunk = textEncoder.encode(content);
|
|
|
|
if (__DEV__) {
|
|
if (precomputedChunkSet) {
|
|
precomputedChunkSet.add(precomputedChunk);
|
|
}
|
|
}
|
|
|
|
return precomputedChunk;
|
|
}
|
|
|
|
export function clonePrecomputedChunk(
|
|
precomputedChunk: PrecomputedChunk,
|
|
): PrecomputedChunk {
|
|
return precomputedChunk.length > VIEW_SIZE
|
|
? precomputedChunk.slice()
|
|
: precomputedChunk;
|
|
}
|
|
|
|
export function closeWithError(destination: Destination, error: mixed): void {
|
|
// $FlowFixMe[incompatible-call]: This is an Error object or the destination accepts other types.
|
|
destination.destroy(error);
|
|
}
|