Skip to content

Commit

Permalink
Fix parts size computation
Browse files Browse the repository at this point in the history
- The action for abort is multipartDelete
- Substract the total part size with the stored size in complete
  MPU API, so we can deduce the size we drop. The locations
  object does not hold the size.
- Same approach for the AbortMPU API.

Issue: CLDSRV-592
  • Loading branch information
williamlardier committed Dec 20, 2024
1 parent af6e7b3 commit d9f9038
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 24 deletions.
2 changes: 1 addition & 1 deletion lib/api/api.js
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ const api = {
const requestContexts = prepareRequestContexts(apiMethod, request,
sourceBucket, sourceObject, sourceVersionId);

if (apiMethod === 'completeMultipartUpload' || apiMethod === 'abortMultipartUpload') {
if (apiMethod === 'completeMultipartUpload' || apiMethod === 'multipartDelete') {
// Request account quotas explicitly for MPU requests, to consider parts cleanup
// NOTE: we need quota for these, but it will be evaluated at the end of the API,
// once the parts have actually been deleted (not via standardMetadataValidateBucketAndObj)
Expand Down
2 changes: 1 addition & 1 deletion lib/api/apiUtils/object/abortMultipartUpload.js
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
cb();
});
}, () => {
const length = locations.reduce((length, loc) => length + loc.size, 0);
const length = storedParts.reduce((length, loc) => length + loc.value.Size, 0);
return validateQuotas(request, destBucket, request.accountQuotas,
['objectDelete'], 'objectDelete', -length, false, log, err => {
if (err) {
Expand Down
48 changes: 26 additions & 22 deletions lib/api/completeMultipartUpload.js
Original file line number Diff line number Diff line change
Expand Up @@ -214,12 +214,13 @@ function completeMultipartUpload(authInfo, request, log, callback) {
return next(err, destBucket);
}
const storedParts = result.Contents;
const totalMPUSize = storedParts.reduce((acc, part) => acc + part.value.Size, 0);
return next(null, destBucket, objMD, mpuBucket, storedParts,
jsonList, storedMetadata, location, mpuOverviewKey);
jsonList, storedMetadata, location, mpuOverviewKey, totalMPUSize);
});
},
function completeExternalMpu(destBucket, objMD, mpuBucket, storedParts,
jsonList, storedMetadata, location, mpuOverviewKey, next) {
jsonList, storedMetadata, location, mpuOverviewKey, totalMPUSize, next) {
const mdInfo = { storedParts, mpuOverviewKey, splitter };
const mpuInfo =
{ objectKey, uploadId, jsonList, bucketName, destBucket };
Expand All @@ -237,16 +238,17 @@ function completeMultipartUpload(authInfo, request, log, callback) {
}
// if mpu not handled externally, completeObjData will be null
return next(null, destBucket, objMD, mpuBucket, storedParts,
jsonList, storedMetadata, completeObjData, mpuOverviewKey);
jsonList, storedMetadata, completeObjData, mpuOverviewKey,
totalMPUSize);
});
},
function validateAndFilterParts(destBucket, objMD, mpuBucket,
storedParts, jsonList, storedMetadata, completeObjData, mpuOverviewKey,
next) {
totalMPUSize, next) {
if (completeObjData) {
return next(null, destBucket, objMD, mpuBucket, storedParts,
jsonList, storedMetadata, completeObjData, mpuOverviewKey,
completeObjData.filteredPartsObj);
completeObjData.filteredPartsObj, totalMPUSize);
}
const filteredPartsObj = validateAndFilterMpuParts(storedParts,
jsonList, mpuOverviewKey, splitter, log);
Expand All @@ -255,11 +257,11 @@ function completeMultipartUpload(authInfo, request, log, callback) {
}
return next(null, destBucket, objMD, mpuBucket, storedParts,
jsonList, storedMetadata, completeObjData, mpuOverviewKey,
filteredPartsObj);
filteredPartsObj, totalMPUSize);
},
function processParts(destBucket, objMD, mpuBucket, storedParts,
jsonList, storedMetadata, completeObjData, mpuOverviewKey,
filteredPartsObj, next) {
filteredPartsObj, totalMPUSize, next) {
// if mpu was completed on backend that stored mpu MD externally,
// skip MD processing steps
if (completeObjData && skipMpuPartProcessing(completeObjData)) {
Expand All @@ -277,7 +279,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
const calculatedSize = completeObjData.contentLength;
return next(null, destBucket, objMD, mpuBucket, storedMetadata,
completeObjData.eTag, calculatedSize, dataLocations,
[mpuOverviewKey], null, completeObjData);
[mpuOverviewKey], null, completeObjData, totalMPUSize);
}

const partsInfo =
Expand All @@ -301,15 +303,15 @@ function completeMultipartUpload(authInfo, request, log, callback) {
];
return next(null, destBucket, objMD, mpuBucket, storedMetadata,
aggregateETag, calculatedSize, dataLocations, keysToDelete,
extraPartLocations, completeObjData);
extraPartLocations, completeObjData, totalMPUSize);
}
return next(null, destBucket, objMD, mpuBucket, storedMetadata,
aggregateETag, calculatedSize, dataLocations, keysToDelete,
extraPartLocations, null);
extraPartLocations, null, totalMPUSize);
},
function prepForStoring(destBucket, objMD, mpuBucket, storedMetadata,
aggregateETag, calculatedSize, dataLocations, keysToDelete,
extraPartLocations, completeObjData, next) {
extraPartLocations, completeObjData, totalMPUSize, next) {
const metaHeaders = {};
const keysNotNeeded =
['initiator', 'partLocations', 'key',
Expand All @@ -322,6 +324,8 @@ function completeMultipartUpload(authInfo, request, log, callback) {
metaHeaders[item] = storedMetadata[item];
});

const droppedMPUSize = totalMPUSize - calculatedSize;

const metaStoreParams = {
authInfo,
objectKey,
Expand Down Expand Up @@ -381,7 +385,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
return process.nextTick(() => next(null, destBucket, dataLocations,
metaStoreParams, mpuBucket, keysToDelete, aggregateETag,
objMD, extraPartLocations, pseudoCipherBundle,
completeObjData, options));
completeObjData, options, droppedMPUSize));
}

if (!destBucket.isVersioningEnabled() && objMD?.archive?.archiveInfo) {
Expand All @@ -404,13 +408,13 @@ function completeMultipartUpload(authInfo, request, log, callback) {
return next(null, destBucket, dataLocations,
metaStoreParams, mpuBucket, keysToDelete, aggregateETag,
objMD, extraPartLocations, pseudoCipherBundle,
completeObjData, options);
completeObjData, options, droppedMPUSize);
});
},
function storeAsNewObj(destinationBucket, dataLocations,
metaStoreParams, mpuBucket, keysToDelete, aggregateETag, objMD,
extraPartLocations, pseudoCipherBundle,
completeObjData, options, next) {
completeObjData, options, droppedMPUSize, next) {
const dataToDelete = options.dataToDelete;
/* eslint-disable no-param-reassign */
metaStoreParams.versionId = options.versionId;
Expand Down Expand Up @@ -462,7 +466,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
return next(null, mpuBucket, keysToDelete, aggregateETag,
extraPartLocations, destinationBucket,
// pass the original version ID as generatedVersionId
objMD.versionId);
objMD.versionId, droppedMPUSize);
}
}
return services.metadataStoreObject(destinationBucket.getName(),
Expand Down Expand Up @@ -500,31 +504,31 @@ function completeMultipartUpload(authInfo, request, log, callback) {
}
return next(null, mpuBucket, keysToDelete,
aggregateETag, extraPartLocations,
destinationBucket, generatedVersionId);
destinationBucket, generatedVersionId,
droppedMPUSize);
});
}
return next(null, mpuBucket, keysToDelete, aggregateETag,
extraPartLocations, destinationBucket,
generatedVersionId);
generatedVersionId, droppedMPUSize);
});
},
function deletePartsMetadata(mpuBucket, keysToDelete, aggregateETag,
extraPartLocations, destinationBucket, generatedVersionId, next) {
extraPartLocations, destinationBucket, generatedVersionId, droppedMPUSize, next) {
services.batchDeleteObjectMetadata(mpuBucket.getName(),
keysToDelete, log, err => next(err, extraPartLocations,
destinationBucket, aggregateETag, generatedVersionId));
destinationBucket, aggregateETag, generatedVersionId, droppedMPUSize));
},
function batchDeleteExtraParts(extraPartLocations, destinationBucket,
aggregateETag, generatedVersionId, next) {
aggregateETag, generatedVersionId, droppedMPUSize, next) {
if (extraPartLocations && extraPartLocations.length > 0) {
return data.batchDelete(extraPartLocations, request.method, null, log, err => {
if (err) {
return next(err);
}

const length = extraPartLocations.reduce((length, loc) => length + loc.size, 0);
return validateQuotas(request, destinationBucket, request.accountQuotas,
['objectDelete'], 'objectDelete', -length, false, log, err => {
['objectDelete'], 'objectDelete', -droppedMPUSize, false, log, err => {
if (err) {
// Ignore error, as the data has been deleted already: only inflight count
// has not been updated, and will be eventually consistent anyway
Expand Down

0 comments on commit d9f9038

Please sign in to comment.