Skip to content

Commit e79dbb3

Browse files
authoredAug 15, 2022
Fix parts too small with S3 datastore (#284)
1 parent ba0a7bd commit e79dbb3

File tree

2 files changed

+23
-15
lines changed

2 files changed

+23
-15
lines changed
 

‎lib/models/StreamSplitter.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ class FileStreamSplitter extends stream.Writable {
9696
return reject(err);
9797
}
9898

99-
this.emit('chunkFinished', this.currentChunkPath);
99+
this.emit('chunkFinished', { path: this.currentChunkPath, size: this.currentChunkSize });
100100

101101
this.currentChunkPath = null;
102102
this.fileDescriptor = null;

‎lib/stores/S3Store.js

+22-14
Original file line numberDiff line numberDiff line change
@@ -299,10 +299,11 @@ class S3Store extends DataStore {
299299
* @param {Object} metadata upload metadata
300300
* @param {fs<ReadStream>} readStream incoming request
301301
* @param {Number} currentPartNumber number of the current part/chunk
302+
* @param {Number} current_size current size of uploaded data
302303
* @return {Promise<Number>} which resolves with the current offset
303304
* @memberof S3Store
304305
*/
305-
_processUpload(metadata, readStream, currentPartNumber) {
306+
_processUpload(metadata, readStream, currentPartNumber, current_size) {
306307
return new Promise((resolve, reject) => {
307308
const splitterStream = new FileStreamSplitter({
308309
maxChunkSize: this.part_size,
@@ -330,20 +331,27 @@ class S3Store extends DataStore {
330331
pendingChunkFilepath = filepath;
331332
});
332333

333-
splitterStream.on('chunkFinished', (filepath) => {
334+
splitterStream.on('chunkFinished', ({ path, size }) => {
334335
pendingChunkFilepath = null;
335336

337+
current_size += size;
336338
const partNumber = currentPartNumber++;
337339

338340
const p = Promise.resolve()
339341
.then(() => {
340-
return this._uploadPart(metadata, fs.createReadStream(filepath), partNumber);
341-
})
342+
// skip chunk if it is not last and is smaller than 5MB
343+
const is_last_chunk = parseInt(metadata.file.upload_length, 10) === current_size;
344+
if (!is_last_chunk && size < 5 * 1024 * 1024) {
345+
log(`[${metadata.file.id}] ignoring chuck smaller than 5MB`);
346+
return undefined;
347+
}
342348

349+
return this._uploadPart(metadata, fs.createReadStream(path), partNumber);
350+
})
343351
.finally(() => {
344-
fs.rm(filepath, (err) => {
352+
fs.rm(path, (err) => {
345353
if (err) {
346-
log(`[${metadata.file.id}] failed to remove file ${filepath}`, err);
354+
log(`[${metadata.file.id}] failed to remove file ${path}`, err);
347355
}
348356
});
349357
});
@@ -486,17 +494,17 @@ class S3Store extends DataStore {
486494

487495
write(req, file_id) {
488496
return this._getMetadata(file_id)
489-
.then((metadata) =>
490-
this._countParts(file_id).then((part_number) => [part_number, metadata])
491-
)
497+
.then((metadata) => {
498+
return Promise.all([metadata, this._countParts(file_id), this.getOffset(file_id)]);
499+
})
492500
.then(async(results) => {
493-
const [part_number, metadata] = results;
501+
const [metadata, part_number, initial_offset] = results;
494502
const next_part_number = part_number + 1;
495503

496504
return Promise.allSettled(
497-
await this._processUpload(metadata, req, next_part_number)
505+
await this._processUpload(metadata, req, next_part_number, initial_offset.size)
498506
)
499-
.then(() => this.getOffset(metadata.file.id, true))
507+
.then(() => this.getOffset(file_id))
500508
.then((current_offset) => {
501509
if (parseInt(metadata.file.upload_length, 10) === current_offset.size) {
502510
return this._finishMultipartUpload(metadata, current_offset.parts)
@@ -509,12 +517,12 @@ class S3Store extends DataStore {
509517
}),
510518
});
511519

512-
this._clearCache(metadata.file.id);
520+
this._clearCache(file_id);
513521

514522
return current_offset.size;
515523
})
516524
.catch((err) => {
517-
log(`[${metadata.file.id}] failed to finish upload`, err);
525+
log(`[${file_id}] failed to finish upload`, err);
518526
throw err;
519527
});
520528
}

0 commit comments

Comments
 (0)
Please sign in to comment.