@@ -299,10 +299,11 @@ class S3Store extends DataStore {
299
299
* @param {Object } metadata upload metadata
300
300
* @param {fs<ReadStream> } readStream incoming request
301
301
* @param {Number } currentPartNumber number of the current part/chunk
302
+ * @param {Number } current_size current size of uploaded data
302
303
* @return {Promise<Number> } which resolves with the current offset
303
304
* @memberof S3Store
304
305
*/
305
- _processUpload ( metadata , readStream , currentPartNumber ) {
306
+ _processUpload ( metadata , readStream , currentPartNumber , current_size ) {
306
307
return new Promise ( ( resolve , reject ) => {
307
308
const splitterStream = new FileStreamSplitter ( {
308
309
maxChunkSize : this . part_size ,
@@ -330,20 +331,27 @@ class S3Store extends DataStore {
330
331
pendingChunkFilepath = filepath ;
331
332
} ) ;
332
333
333
- splitterStream . on ( 'chunkFinished' , ( filepath ) => {
334
+ splitterStream . on ( 'chunkFinished' , ( { path , size } ) => {
334
335
pendingChunkFilepath = null ;
335
336
337
+ current_size += size ;
336
338
const partNumber = currentPartNumber ++ ;
337
339
338
340
const p = Promise . resolve ( )
339
341
. then ( ( ) => {
340
- return this . _uploadPart ( metadata , fs . createReadStream ( filepath ) , partNumber ) ;
341
- } )
342
+ // skip chunk if it is not last and is smaller than 5MB
343
+ const is_last_chunk = parseInt ( metadata . file . upload_length , 10 ) === current_size ;
344
+ if ( ! is_last_chunk && size < 5 * 1024 * 1024 ) {
345
+ log ( `[${ metadata . file . id } ] ignoring chuck smaller than 5MB` ) ;
346
+ return undefined ;
347
+ }
342
348
349
+ return this . _uploadPart ( metadata , fs . createReadStream ( path ) , partNumber ) ;
350
+ } )
343
351
. finally ( ( ) => {
344
- fs . rm ( filepath , ( err ) => {
352
+ fs . rm ( path , ( err ) => {
345
353
if ( err ) {
346
- log ( `[${ metadata . file . id } ] failed to remove file ${ filepath } ` , err ) ;
354
+ log ( `[${ metadata . file . id } ] failed to remove file ${ path } ` , err ) ;
347
355
}
348
356
} ) ;
349
357
} ) ;
@@ -486,17 +494,17 @@ class S3Store extends DataStore {
486
494
487
495
write ( req , file_id ) {
488
496
return this . _getMetadata ( file_id )
489
- . then ( ( metadata ) =>
490
- this . _countParts ( file_id ) . then ( ( part_number ) => [ part_number , metadata ] )
491
- )
497
+ . then ( ( metadata ) => {
498
+ return Promise . all ( [ metadata , this . _countParts ( file_id ) , this . getOffset ( file_id ) ] ) ;
499
+ } )
492
500
. then ( async ( results ) => {
493
- const [ part_number , metadata ] = results ;
501
+ const [ metadata , part_number , initial_offset ] = results ;
494
502
const next_part_number = part_number + 1 ;
495
503
496
504
return Promise . allSettled (
497
- await this . _processUpload ( metadata , req , next_part_number )
505
+ await this . _processUpload ( metadata , req , next_part_number , initial_offset . size )
498
506
)
499
- . then ( ( ) => this . getOffset ( metadata . file . id , true ) )
507
+ . then ( ( ) => this . getOffset ( file_id ) )
500
508
. then ( ( current_offset ) => {
501
509
if ( parseInt ( metadata . file . upload_length , 10 ) === current_offset . size ) {
502
510
return this . _finishMultipartUpload ( metadata , current_offset . parts )
@@ -509,12 +517,12 @@ class S3Store extends DataStore {
509
517
} ) ,
510
518
} ) ;
511
519
512
- this . _clearCache ( metadata . file . id ) ;
520
+ this . _clearCache ( file_id ) ;
513
521
514
522
return current_offset . size ;
515
523
} )
516
524
. catch ( ( err ) => {
517
- log ( `[${ metadata . file . id } ] failed to finish upload` , err ) ;
525
+ log ( `[${ file_id } ] failed to finish upload` , err ) ;
518
526
throw err ;
519
527
} ) ;
520
528
}
0 commit comments