Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
const { head_url: headUrl, get_url: getUrl, service } = await this._client.send('get file download url', { // eslint-disable-line no-underscore-dangle
resource_id: resourceId,
});
if (!streamCloudStorage[service])
throw new InternalError(`unsupported cloud storage service: ${service}`);
const { DownloadStream } = streamCloudStorage[service];
const downloadChunkSize = 1024 * 1024;
const downloader = new DownloadStream(resourceId, headUrl, getUrl, downloadChunkSize);
const { metadata: encryptedMetadata, encryptedContentLength } = await downloader.getMetadata();
const { encryptionFormat, clearContentLength, ...fileMetadata } = await this._decryptMetadata(encryptedMetadata);
const combinedOutputOptions = extractOutputOptions({ type: defaultDownloadType, ...outputOptions, ...fileMetadata });
const merger = new MergerStream(combinedOutputOptions);
const decryptor = await this._dataProtector.makeDecryptorStream();
// For compatibility with SDKs up to 2.2.1
const clearSize = encryptionFormat
? getClearSize(encryptionFormat, encryptedContentLength)
: clearContentLength;
const progressHandler = new ProgressHandler(progressOptions).start(clearSize);
decryptor.on('data', (chunk: Uint8Array) => progressHandler.report(chunk.byteLength));
return pipeStreams({ streams: [downloader, decryptor, merger], resolveEvent: 'data' });
}
}
const { UploadStream } = streamService;
const slicer = new SlicerStream({ source: clearData });
const uploader = new UploadStream(urls, headers, totalEncryptedSize, recommendedChunkSize, encryptedMetadata);
const progressHandler = new ProgressHandler(progressOptions).start(totalEncryptedSize);
uploader.on('uploaded', (chunk: Uint8Array) => progressHandler.report(chunk.byteLength));
const streams = [slicer, encryptor];
// Some version of Edge (e.g. version 18) fail to handle the 308 HTTP status used by
// GCS in a non-standard way (no redirection expected) when uploading in chunks. So we
// add a merger stream before the uploader to ensure there's a single upload request
// returning the 200 HTTP status.
if (service === 'GCS' && isEdge()) {
const merger = new MergerStream({ type: Uint8Array });
streams.push(merger);
} else if (service === 'S3') {
const resizer = new ResizerStream(recommendedChunkSize);
streams.push(resizer);
}
streams.push(uploader);
await pipeStreams({ streams, resolveEvent: 'finish' });
return resourceId;
}
_initializeStreams() {
this._resizerStream = new ResizerStream(this._maxClearChunkSize);
this._encryptorStream = new Transform({
// buffering input bytes until clear chunk size is reached
writableHighWaterMark: this._maxClearChunkSize,
writableObjectMode: false,
// buffering output bytes until encrypted chunk size is reached
readableHighWaterMark: this._maxEncryptedChunkSize,
readableObjectMode: false,
transform: (clearData, encoding, done) => {
try {
const encryptedChunk = this._encryptChunk(clearData);
this._encryptorStream.push(encryptedChunk);
} catch (err) {
return done(err);
}
const uploader = new UploadStream(urls, headers, totalEncryptedSize, recommendedChunkSize, encryptedMetadata);
const progressHandler = new ProgressHandler(progressOptions).start(totalEncryptedSize);
uploader.on('uploaded', (chunk: Uint8Array) => progressHandler.report(chunk.byteLength));
const streams = [slicer, encryptor];
// Some version of Edge (e.g. version 18) fail to handle the 308 HTTP status used by
// GCS in a non-standard way (no redirection expected) when uploading in chunks. So we
// add a merger stream before the uploader to ensure there's a single upload request
// returning the 200 HTTP status.
if (service === 'GCS' && isEdge()) {
const merger = new MergerStream({ type: Uint8Array });
streams.push(merger);
} else if (service === 'S3') {
const resizer = new ResizerStream(recommendedChunkSize);
streams.push(resizer);
}
streams.push(uploader);
await pipeStreams({ streams, resolveEvent: 'finish' });
return resourceId;
}
async _initializeStreams(headOfEncryptedData: Uint8Array) {
let encryptedChunkSize;
let resourceId;
try {
({ encryptedChunkSize, resourceId } = encryptionV4.unserialize(headOfEncryptedData));
} catch (e) {
throw new InvalidArgument('encryptedData', e, headOfEncryptedData);
}
const key = await this._mapper.findKey(resourceId);
this._state.maxEncryptedChunkSize = encryptedChunkSize;
this._resizerStream = new ResizerStream(encryptedChunkSize);
const b64ResourceId = utils.toBase64(resourceId);
this._decryptionStream = new Transform({
// buffering input bytes until encrypted chunk size is reached
writableHighWaterMark: encryptedChunkSize,
writableObjectMode: false,
// buffering output bytes until clear chunk size is reached
readableHighWaterMark: encryptedChunkSize - encryptionV4.overhead,
readableObjectMode: false,
transform: (encryptedChunk, encoding, done) => {
try {
const clearData = encryptionV4.decrypt(key, this._state.index, encryptionV4.unserialize(encryptedChunk));
this._decryptionStream.push(clearData);
} catch (error) {
headers,
service,
recommended_chunk_size: recommendedChunkSize
} = await this._client.send('get file upload url', {
resource_id: resourceId,
metadata: encryptedMetadata,
upload_content_length: totalEncryptedSize,
});
if (!streamCloudStorage[service])
throw new InternalError(`unsupported cloud storage service: ${service}`);
const streamService = streamCloudStorage[service];
const { UploadStream } = streamService;
const slicer = new SlicerStream({ source: clearData });
const uploader = new UploadStream(urls, headers, totalEncryptedSize, recommendedChunkSize, encryptedMetadata);
const progressHandler = new ProgressHandler(progressOptions).start(totalEncryptedSize);
uploader.on('uploaded', (chunk: Uint8Array) => progressHandler.report(chunk.byteLength));
const streams = [slicer, encryptor];
// Some version of Edge (e.g. version 18) fail to handle the 308 HTTP status used by
// GCS in a non-standard way (no redirection expected) when uploading in chunks. So we
// add a merger stream before the uploader to ensure there's a single upload request
// returning the 200 HTTP status.
if (service === 'GCS' && isEdge()) {
const merger = new MergerStream({ type: Uint8Array });
streams.push(merger);
} else if (service === 'S3') {
const resizer = new ResizerStream(recommendedChunkSize);
_initializeStreams() {
this._resizerStream = new ResizerStream(this._maxClearChunkSize);
this._encryptorStream = new Transform({
// buffering input bytes until clear chunk size is reached
writableHighWaterMark: this._maxClearChunkSize,
writableObjectMode: false,
// buffering output bytes until encrypted chunk size is reached
readableHighWaterMark: this._maxEncryptedChunkSize,
readableObjectMode: false,
transform: (clearData, encoding, done) => {
try {
const encryptedChunk = this._encryptChunk(clearData);
this._encryptorStream.push(encryptedChunk);
} catch (err) {
return done(err);
}
done();
},
let resourceId;
try {
({ encryptedChunkSize, resourceId } = encryptionV4.unserialize(headOfEncryptedData));
} catch (e) {
throw new InvalidArgument('encryptedData', e, headOfEncryptedData);
}
const key = await this._mapper.findKey(resourceId);
this._state.maxEncryptedChunkSize = encryptedChunkSize;
this._resizerStream = new ResizerStream(encryptedChunkSize);
const b64ResourceId = utils.toBase64(resourceId);
this._decryptionStream = new Transform({
// buffering input bytes until encrypted chunk size is reached
writableHighWaterMark: encryptedChunkSize,
writableObjectMode: false,
// buffering output bytes until clear chunk size is reached
readableHighWaterMark: encryptedChunkSize - encryptionV4.overhead,
readableObjectMode: false,
transform: (encryptedChunk, encoding, done) => {
try {
const clearData = encryptionV4.decrypt(key, this._state.index, encryptionV4.unserialize(encryptedChunk));
this._decryptionStream.push(clearData);
} catch (error) {
return done(new DecryptionFailed({ error, b64ResourceId }));
}
this._state.lastEncryptedChunkSize = encryptedChunk.length;
this._state.index += 1; // safe as long as index < 2^53
async _streamDecryptData(encryptedData: Data, outputOptions: OutputOptions, progressOptions: ProgressOptions): Promise {
const slicer = new SlicerStream({ source: encryptedData });
const decryptor = await this.makeDecryptorStream();
const merger = new MergerStream(outputOptions);
const progressHandler = new ProgressHandler(progressOptions);
decryptor.on('initialized', () => {
const encryptedSize = getDataLength(encryptedData);
const clearSize = decryptor.getClearSize(encryptedSize);
progressHandler.start(clearSize);
});
decryptor.on('data', (chunk: Uint8Array) => progressHandler.report(chunk.byteLength));
return new Promise((resolve, reject) => {
[slicer, decryptor, merger].forEach(s => s.on('error', reject));
slicer.pipe(decryptor).pipe(merger).on('data', resolve);
});
}
async _streamEncryptData(clearData: Data, sharingOptions: SharingOptions, outputOptions: OutputOptions, progressOptions: ProgressOptions, b64ResourceId?: b64string): Promise {
const slicer = new SlicerStream({ source: clearData });
const encryptor = await this.makeEncryptorStream(sharingOptions, b64ResourceId);
const clearSize = getDataLength(clearData);
const encryptedSize = encryptor.getEncryptedSize(clearSize);
const progressHandler = new ProgressHandler(progressOptions).start(encryptedSize);
encryptor.on('data', (chunk: Uint8Array) => progressHandler.report(chunk.byteLength));
const merger = new MergerStream(outputOptions);
return new Promise((resolve, reject) => {
[slicer, encryptor, merger].forEach(s => s.on('error', reject));
slicer.pipe(encryptor).pipe(merger).on('data', resolve);
});
}