diff --git a/package.json b/package.json index f22e2d8b1c..1a140394ee 100644 --- a/package.json +++ b/package.json @@ -22,8 +22,10 @@ "@aws-sdk/client-s3": "^3.908.0", "@aws-sdk/credential-providers": "^3.864.0", "@aws-sdk/middleware-retry": "^3.374.0", + "@aws-sdk/s3-request-presigner": "^3.901.0", "@azure/storage-blob": "^12.28.0", "@hapi/joi": "^17.1.1", + "@smithy/node-http-handler": "^3.0.0", "arsenal": "git+https://github.com/scality/Arsenal#8.2.35", "async": "2.6.4", "bucketclient": "scality/bucketclient#8.2.7", diff --git a/tests/functional/aws-node-sdk/lib/utility/checkError.js b/tests/functional/aws-node-sdk/lib/utility/checkError.js index 767ea2157b..ff2d438ba1 100644 --- a/tests/functional/aws-node-sdk/lib/utility/checkError.js +++ b/tests/functional/aws-node-sdk/lib/utility/checkError.js @@ -2,8 +2,12 @@ const assert = require('assert'); function checkError(err, code, statusCode) { assert(err, 'Expected error but found none'); - assert.strictEqual(err.code, code); - assert.strictEqual(err.statusCode, statusCode); + if (code) { + assert.strictEqual(err.name, code); + } + if (statusCode) { + assert.strictEqual(err.$metadata.httpStatusCode, statusCode); + } } module.exports = checkError; diff --git a/tests/functional/aws-node-sdk/lib/utility/tagging.js b/tests/functional/aws-node-sdk/lib/utility/tagging.js index bad7cb17ca..9039a9b5eb 100644 --- a/tests/functional/aws-node-sdk/lib/utility/tagging.js +++ b/tests/functional/aws-node-sdk/lib/utility/tagging.js @@ -5,9 +5,11 @@ const taggingTests = [ it: 'should return tags if value is an empty string' }, { tag: { key: 'w'.repeat(129), value: 'foo' }, error: 'InvalidTag', + code: 400, it: 'should return InvalidTag if key length is greater than 128' }, { tag: { key: 'bar', value: 'f'.repeat(257) }, error: 'InvalidTag', + code: 400, it: 'should return InvalidTag if key length is greater than 256', }, ]; diff --git a/tests/functional/aws-node-sdk/lib/utility/website-util.js b/tests/functional/aws-node-sdk/lib/utility/website-util.js index 0b48f0e4c6..463403b525 100644 --- a/tests/functional/aws-node-sdk/lib/utility/website-util.js +++ b/tests/functional/aws-node-sdk/lib/utility/website-util.js @@ -3,6 +3,11 @@ const async = require('async'); const fs = require('fs'); const path = require('path'); const url = require('url'); +const { CreateBucketCommand, + DeleteBucketCommand, + PutBucketWebsiteCommand, + DeleteObjectCommand, + PutObjectCommand } = require('@aws-sdk/client-s3'); const { makeRequest } = require('../../../raw-node/utils/makeRequest'); @@ -352,41 +357,31 @@ class WebsiteConfigTester { } static createPutBucketWebsite(s3, bucket, bucketACL, objects, done) { - s3.createBucket({ Bucket: bucket, ACL: bucketACL }, - err => { - if (err) { - return done(err); - } + s3.send(new CreateBucketCommand({ Bucket: bucket, ACL: bucketACL })).then(() => { const webConfig = new WebsiteConfigTester('index.html', 'error.html'); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - if (err) { - return done(err); - } - return async.forEachOf(objects, + return s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })).then(() => async.forEachOf(objects, (acl, object, next) => { - s3.putObject({ Bucket: bucket, + s3.send(new PutObjectCommand({ Bucket: bucket, Key: `${object}.html`, ACL: acl, Body: fs.readFileSync(path.join(__dirname, `/../../test/object/websiteFiles/${object}.html`)), - }, - next); - }, done); - }); - }); + })).then(() => next()).catch(next); + }, done)); + }).catch(err => done(err)); } static deleteObjectsThenBucket(s3, bucket, objects, done) { async.forEachOf(objects, (acl, object, next) => { - s3.deleteObject({ Bucket: bucket, - Key: `${object}.html` }, next); + s3.send(new DeleteObjectCommand({ Bucket: bucket, + Key: `${object}.html` })).then(() => next()).catch(next); }, err => { if (err) { return done(err); } - return s3.deleteBucket({ Bucket: bucket }, done); + return s3.send(new DeleteBucketCommand({ Bucket: bucket })).then(() => done()).catch(done); }); } } diff --git a/tests/functional/aws-node-sdk/test/object/100-continue.js b/tests/functional/aws-node-sdk/test/object/100-continue.js index 855d2744ec..9a41210f92 100644 --- a/tests/functional/aws-node-sdk/test/object/100-continue.js +++ b/tests/functional/aws-node-sdk/test/object/100-continue.js @@ -2,6 +2,8 @@ const assert = require('assert'); const http = require('http'); const https = require('https'); const url = require('url'); +const { CreateBucketCommand, PutObjectCommand } = require('@aws-sdk/client-s3'); +const { getSignedUrl } = require('@aws-sdk/s3-request-presigner'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -121,17 +123,19 @@ describeSkipIfE2E('PUT public object with 100-continue header', () => { let continueRequest; const invalidSignedURL = `/${bucket}/${key}`; - beforeEach(() => { + beforeEach(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; const params = { Bucket: bucket, Key: key, + 'Content-Length': 0, }; - const signedUrl = s3.getSignedUrl('putObject', params); + const command = new PutObjectCommand(params); + const signedUrl = await getSignedUrl(s3, command); const { path } = url.parse(signedUrl); continueRequest = new ContinueRequestHandler(path); - return s3.createBucket({ Bucket: bucket }).promise(); + await s3.send(new CreateBucketCommand({ Bucket: bucket })); }); afterEach(() => diff --git a/tests/functional/aws-node-sdk/test/object/abortMPU.js b/tests/functional/aws-node-sdk/test/object/abortMPU.js index 1cceb3146e..7c3a836548 100644 --- a/tests/functional/aws-node-sdk/test/object/abortMPU.js +++ b/tests/functional/aws-node-sdk/test/object/abortMPU.js @@ -8,6 +8,20 @@ const async = require('async'); const { initMetadata, getMetadata } = require('../utils/init'); const metadata = require('../../../../../lib/metadata/wrapper'); const { DummyRequestLogger } = require('../../../../unit/helpers'); +const { + CreateBucketCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + CompleteMultipartUploadCommand, + AbortMultipartUploadCommand, + GetObjectCommand, + ListMultipartUploadsCommand, + ListObjectVersionsCommand, + DeleteObjectCommand, + PutBucketVersioningCommand, + HeadObjectCommand, + PutObjectCommand +} = require('@aws-sdk/client-s3'); const date = Date.now(); const bucket = `abortmpu${date}`; @@ -16,25 +30,26 @@ const bodyFirstPart = Buffer.allocUnsafe(10).fill(0); function checkError(err, code, message) { assert.notEqual(err, null, 'Expected failure but got success'); - assert.strictEqual(err.code, code); + assert.strictEqual(err.name, code); assert.strictEqual(err.message, message); } async function cleanupVersionedBucket(bucketUtil, bucketName) { // Clean up all multipart uploads - const listMPUResponse = await bucketUtil.s3.listMultipartUploads({ Bucket: bucketName }).promise(); - await Promise.all(listMPUResponse.Uploads.map(upload => - bucketUtil.s3.abortMultipartUpload({ - Bucket: bucketName, - Key: upload.Key, - UploadId: upload.UploadId, - }).promise().catch(err => { - if (err.code !== 'NoSuchUpload') { - throw err; - } - // If NoSuchUpload, swallow error - }), - )); + const listMPUResponse = await bucketUtil.s3.send(new ListMultipartUploadsCommand({ Bucket: bucketName })); + if (listMPUResponse.Uploads && listMPUResponse.Uploads.length > 0) { + await Promise.all(listMPUResponse.Uploads.map(async upload => { + bucketUtil.s3.send(new AbortMultipartUploadCommand({ + Bucket: bucketName, + Key: upload.Key, + UploadId: upload.UploadId, + })).catch(err => { + if (err.name !== 'NoSuchUpload') { + throw err; + } + }); + })); + } // Clean up all object versions await bucketUtil.empty(bucketName); @@ -47,47 +62,49 @@ describe('Abort MPU', () => { let s3; let uploadId; - beforeEach(() => { + beforeEach(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .then(() => s3.createMultipartUpload({ + try { + await s3.send(new CreateBucketCommand({ Bucket: bucket })); + const mpu = await s3.send(new CreateMultipartUploadCommand({ Bucket: bucket, Key: key, - }).promise()) - .then(res => { - uploadId = res.UploadId; - return s3.uploadPart({ - Bucket: bucket, Key: key, - PartNumber: 1, UploadId: uploadId, Body: bodyFirstPart, - }).promise(); - }) - .catch(err => { - process.stdout.write(`Error in beforeEach: ${err}\n`); - throw err; - }); + })); + uploadId = mpu.UploadId; + await s3.send(new UploadPartCommand({ + Bucket: bucket, Key: key, + PartNumber: 1, UploadId: uploadId, Body: bodyFirstPart, + })); + } catch (err) { + process.stdout.write(`Error in beforeEach: ${err}\n`); + throw err; + } }); - afterEach(() => - s3.abortMultipartUpload({ + afterEach(async () => { + await s3.send(new AbortMultipartUploadCommand({ Bucket: bucket, Key: key, UploadId: uploadId, - }).promise() - .then(() => bucketUtil.empty(bucket)) - .then(() => bucketUtil.deleteOne(bucket)) - ); + })); + await bucketUtil.empty(bucket); + await bucketUtil.deleteOne(bucket); + }); // aws-sdk now (v2.363.0) returns 'UriParameterError' error // this test was not replaced in any other suite it.skip('should return InvalidRequest error if aborting without key', done => { - s3.abortMultipartUpload({ + s3.send(new AbortMultipartUploadCommand({ Bucket: bucket, Key: '', UploadId: uploadId - }, - err => { + })) + .then(() => { + done(new Error('Expected failure but got success')); + }) + .catch(err => { checkError(err, 'InvalidRequest', 'A key must be specified'); done(); }); @@ -104,13 +121,10 @@ describe('Abort MPU with existing object', function AbortMPUExistingObject() { const bucketName = `abortmpu-test-bucket-${Date.now()}`; const objectKey = 'my-object'; - beforeEach(done => { + beforeEach(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - s3.createBucket({ Bucket: bucketName }, err => { - assert.ifError(err, `Error creating bucket: ${err}`); - done(); - }); + await s3.send(new CreateBucketCommand({ Bucket: bucketName })); }); afterEach(async () => { @@ -125,65 +139,82 @@ describe('Abort MPU with existing object', function AbortMPUExistingObject() { let etag1; async.waterfall([ next => { - s3.createMultipartUpload({ Bucket: bucketName, Key: objectKey }, (err, data) => { - assert.ifError(err, `error creating MPU 1: ${err}`); - uploadId1 = data.UploadId; - s3.uploadPart({ - Bucket: bucketName, - Key: objectKey, - PartNumber: 1, - UploadId: uploadId1, - Body: part1, - }, (err, data) => { - assert.ifError(err, `error uploading part for MPU 1: ${err}`); + s3.send(new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: objectKey, + })) + .then(data => { + uploadId1 = data.UploadId; + return s3.send(new UploadPartCommand({ + Bucket: bucketName, + Key: objectKey, + PartNumber: 1, + UploadId: uploadId1, + Body: part1, + })); + }) + .then(data => { etag1 = data.ETag; - s3.completeMultipartUpload({ + return s3.send(new CompleteMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, UploadId: uploadId1, MultipartUpload: { Parts: [{ ETag: etag1, PartNumber: 1 }] }, - }, err => { - assert.ifError(err, `error completing MPU 1: ${err}`); - next(); - }); - }); - }); + })); + }) + .then(() => next()) + .catch(err => next(err)); }, next => { - s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => { - assert.ifError(err, `error getting object after MPU 1: ${err}`); - assert.strictEqual(data.Body.toString(), part1.toString()); - next(); - }); + s3.send(new GetObjectCommand({ + Bucket: bucketName, + Key: objectKey, + })) + .then(async data => { + const bodyText = await data.Body.transformToString(); + assert.strictEqual(bodyText, part1.toString()); + next(); + }) + .catch(err => next(err)); }, next => { - s3.createMultipartUpload({ Bucket: bucketName, Key: objectKey }, (err, data) => { - assert.ifError(err, `error creating MPU 2: ${err}`); - uploadId2 = data.UploadId; - s3.uploadPart({ - Bucket: bucketName, - Key: objectKey, - PartNumber: 1, - UploadId: uploadId2, - Body: part2, - }, err => { - assert.ifError(err, `error uploading part for MPU 2: ${err}`); - next(); - }); - }); + s3.send(new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: objectKey, + })) + .then(data => { + uploadId2 = data.UploadId; + return s3.send(new UploadPartCommand({ + Bucket: bucketName, + Key: objectKey, + PartNumber: 1, + UploadId: uploadId2, + Body: part2, + })); + }) + .then(() => next()) + .catch(err => next(err)); }, next => { - s3.abortMultipartUpload({ Bucket: bucketName, Key: objectKey, UploadId: uploadId2 }, err => { - assert.ifError(err, `error aborting MPU 2: ${err}`); - next(); - }); + s3.send(new AbortMultipartUploadCommand({ + Bucket: bucketName, + Key: objectKey, + UploadId: uploadId2, + })) + .then(() => next()) + .catch(err => next(err)); }, next => { - s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => { - assert.ifError(err, `error getting object after aborting MPU 2: ${err}`); - assert.strictEqual(data.Body.toString(), part1.toString()); - next(); - }); + s3.send(new GetObjectCommand({ + Bucket: bucketName, + Key: objectKey, + })) + .then(async data => { + const bodyText = await data.Body.transformToString(); + assert.strictEqual(bodyText, part1.toString()); + next(); + }) + .catch(err => next(err)); }, ], done); }); @@ -196,76 +227,82 @@ describe('Abort MPU with existing object', function AbortMPUExistingObject() { let etag2; async.waterfall([ next => { - s3.createMultipartUpload({ - Bucket: bucketName, Key: objectKey, - }, (err, data) => { - assert.ifError(err, `error creating MPU 1: ${err}`); - uploadId1 = data.UploadId; - s3.uploadPart({ - Bucket: bucketName, - Key: objectKey, - PartNumber: 1, - UploadId: uploadId1, - Body: part1, - }, err => { - assert.ifError(err, `error uploading part for MPU 1: ${err}`); - next(); - }); - }); + s3.send(new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: objectKey, + })) + .then(data => { + uploadId1 = data.UploadId; + return s3.send(new UploadPartCommand({ + Bucket: bucketName, + Key: objectKey, + PartNumber: 1, + UploadId: uploadId1, + Body: part1, + })); + }) + .then(() => next()) + .catch(err => next(err)); }, next => { - s3.createMultipartUpload({ - Bucket: bucketName, Key: objectKey, - }, (err, data) => { - assert.ifError(err, `error creating MPU 2: ${err}`); - uploadId2 = data.UploadId; - s3.uploadPart({ - Bucket: bucketName, - Key: objectKey, - PartNumber: 1, - UploadId: uploadId2, - Body: part2, - }, (err, data) => { - assert.ifError(err, `error uploading part for MPU 2: ${err}`); + s3.send(new CreateMultipartUploadCommand({ + Bucket: bucketName, + Key: objectKey, + })) + .then(data => { + uploadId2 = data.UploadId; + return s3.send(new UploadPartCommand({ + Bucket: bucketName, + Key: objectKey, + PartNumber: 1, + UploadId: uploadId2, + Body: part2, + })); + }) + .then(data => { etag2 = data.ETag; - s3.completeMultipartUpload({ + return s3.send(new CompleteMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, UploadId: uploadId2, MultipartUpload: { Parts: [{ ETag: etag2, PartNumber: 1 }] }, - }, err => { - assert.ifError(err, `error completing MPU 2: ${err}`); - next(); - }); - }); - }); + })); + }) + .then(() => next()) + .catch(err => next(err)); }, next => { - s3.getObject({ + s3.send(new GetObjectCommand({ Bucket: bucketName, Key: objectKey, - }, (err, data) => { - assert.ifError(err, `error getting object after MPU 2: ${err}`); - assert.strictEqual(data.Body.toString(), part2.toString()); - next(); - }); + })) + .then(async data => { + const bodyText = await data.Body.transformToString(); + assert.strictEqual(bodyText, part2.toString()); + next(); + }) + .catch(err => next(err)); }, next => { - s3.abortMultipartUpload({ + s3.send(new AbortMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, UploadId: uploadId1, - }, err => { - assert.ifError(err, `error aborting MPU 1: ${err}`); - next(); - }); + })) + .then(() => next()) + .catch(err => next(err)); }, next => { - s3.getObject({ Bucket: bucketName, Key: objectKey }, (err, data) => { - assert.ifError(err, `error getting object after aborting MPU 1: ${err}`); - assert.strictEqual(data.Body.toString(), part2.toString()); - next(); - }); + s3.send(new GetObjectCommand({ + Bucket: bucketName, + Key: objectKey, + })) + .then(async data => { + const bodyText = await data.Body.transformToString(); + assert.strictEqual(bodyText, part2.toString()); + next(); + }) + .catch(err => next(err)); }, ], done); }); @@ -277,24 +314,27 @@ describe('Abort MPU - No Such Upload', () => { let bucketUtil; let s3; - beforeEach(() => { + beforeEach(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise(); + await s3.send(new CreateBucketCommand({ Bucket: bucket })); }); afterEach(() => bucketUtil.deleteOne(bucket)); it('should return NoSuchUpload error when aborting non-existent mpu', done => { - s3.abortMultipartUpload({ + s3.send(new AbortMultipartUploadCommand({ Bucket: bucket, Key: key, UploadId: uuidv4().replace(/-/g, '') - }, - err => { + })) + .then(() => { + done(new Error('Expected failure but got success')); + }) + .catch(err => { assert.notEqual(err, null, 'Expected failure but got success'); - assert.strictEqual(err.code, 'NoSuchUpload'); + assert.strictEqual(err.name, 'NoSuchUpload'); done(); }); }); @@ -310,17 +350,15 @@ describe('Abort MPU - Versioned Bucket Cleanup', function testSuite() { const bucketName = `abort-mpu-versioned-${Date.now()}`; const objectKey = 'test-object-with-versions'; - beforeEach(done => { + beforeEach(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - async.series([ - next => s3.createBucket({ Bucket: bucketName }, next), - next => s3.putBucketVersioning({ - Bucket: bucketName, - VersioningConfiguration: { Status: 'Enabled' }, - }, next), - ], done); + await s3.send(new CreateBucketCommand({ Bucket: bucketName })); + await s3.send(new PutBucketVersioningCommand({ + Bucket: bucketName, + VersioningConfiguration: { Status: 'Enabled' }, + })); }); afterEach(async () => { @@ -341,28 +379,28 @@ describe('Abort MPU - Versioned Bucket Cleanup', function testSuite() { async.waterfall([ next => { - s3.createMultipartUpload({ + s3.send(new CreateMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, - }, (err, result) => { - assert.ifError(err); - if (currentVersion === numberOfVersions) { - finalUploadId = result.UploadId; // Save the last one for aborting - } - next(null, result.UploadId); - }); + })) + .then(result => { + if (currentVersion === numberOfVersions) { + finalUploadId = result.UploadId; // Save the last one for aborting + } + next(null, result.UploadId); + }) + .catch(err => next(err)); }, (uploadId, next) => { - s3.uploadPart({ + s3.send(new UploadPartCommand({ Bucket: bucketName, Key: objectKey, PartNumber: 1, UploadId: uploadId, Body: data, - }, (err, result) => { - assert.ifError(err); - next(null, uploadId, result.ETag); - }); + })) + .then(result => next(null, uploadId, result.ETag)) + .catch(err => next(err)); }, (uploadId, etag, next) => { if (currentVersion === numberOfVersions) { @@ -370,14 +408,16 @@ describe('Abort MPU - Versioned Bucket Cleanup', function testSuite() { return next(); } - return s3.completeMultipartUpload({ + return s3.send(new CompleteMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, UploadId: uploadId, MultipartUpload: { Parts: [{ ETag: etag, PartNumber: 1 }], }, - }, next); + })) + .then(() => next()) + .catch(err => next(err)); }, ], callback); }, @@ -385,24 +425,19 @@ describe('Abort MPU - Versioned Bucket Cleanup', function testSuite() { assert.ifError(err); // Now abort the final MPU - s3.abortMultipartUpload({ + s3.send(new AbortMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, UploadId: finalUploadId, - }, err => { - assert.ifError(err); - - // Verify we still have the correct number of completed versions - s3.listObjectVersions({ Bucket: bucketName }, (err, data) => { - assert.ifError(err); - + })) + .then(() => s3.send(new ListObjectVersionsCommand({ Bucket: bucketName }))) + .then(data => { const objectVersions = data.Versions.filter(v => v.Key === objectKey); assert.strictEqual(objectVersions.length, numberOfVersions - 1, `Expected ${numberOfVersions - 1} versions after abort, got ${objectVersions.length}`); - done(); - }); - }); + }) + .catch(err => done(err)); } ); }); @@ -414,60 +449,64 @@ describe('Abort MPU - Versioned Bucket Cleanup', function testSuite() { async.waterfall([ // Create and upload part for MPU next => { - s3.createMultipartUpload({ + s3.send(new CreateMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, - }, (err, result) => { - assert.ifError(err); - uploadId = result.UploadId; - next(); - }); + })) + .then(result => { + uploadId = result.UploadId; + next(); + }) + .catch(err => next(err)); }, next => { - s3.uploadPart({ + s3.send(new UploadPartCommand({ Bucket: bucketName, Key: objectKey, PartNumber: 1, UploadId: uploadId, Body: data, - }, err => { - assert.ifError(err); - next(); - }); + })) + .then(() => next()) + .catch(err => next(err)); }, // Abort the MPU next => { - s3.abortMultipartUpload({ + s3.send(new AbortMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, UploadId: uploadId, - }, err => { - assert.ifError(err); - next(); - }); + })) + .then(() => next()) + .catch(err => next(err)); }, // Verify no object exists next => { - s3.getObject({ Bucket: bucketName, Key: objectKey }, err => { - assert.notEqual(err, null, 'Expected NoSuchKey error'); - assert.strictEqual(err.code, 'NoSuchKey'); - next(); - }); + s3.send(new GetObjectCommand({ + Bucket: bucketName, + Key: objectKey, + })) + .then(() => { + next(new Error('Expected NoSuchKey error')); + }) + .catch(err => { + assert.strictEqual(err.name, 'NoSuchKey'); + next(); + }); }, // Verify no versions exist next => { - s3.listObjectVersions({ Bucket: bucketName }, (err, data) => { - assert.ifError(err); - - const objectVersions = data.Versions.filter(v => v.Key === objectKey); - assert.strictEqual(objectVersions.length, 0, - `Expected 0 versions after abort, got ${objectVersions.length}`); - - next(); - }); + s3.send(new ListObjectVersionsCommand({ Bucket: bucketName })) + .then(data => { + const objectVersions = (data.Versions || []).filter(v => v.Key === objectKey); + assert.strictEqual(objectVersions.length, 0, + `Expected 0 versions after abort, got ${objectVersions.length}`); + next(); + }) + .catch(err => next(err)); }, ], done); }); @@ -497,27 +536,27 @@ describe('Abort MPU - Orphan Cleanup', function testSuite() { const tempObjectKey = `temp-object-for-metadata-${Date.now()}`; // Create temporary MPU and complete it to get real object metadata - const createResult = await s3Client.createMultipartUpload({ + const createResult = await s3Client.send(new CreateMultipartUploadCommand({ Bucket: bucketName, Key: tempObjectKey, - }).promise(); + })); const tempUploadId = createResult.UploadId; - const uploadResult = await s3Client.uploadPart({ + const uploadResult = await s3Client.send(new UploadPartCommand({ Bucket: bucketName, Key: tempObjectKey, PartNumber: 1, UploadId: tempUploadId, Body: data, - }).promise(); + })); const tempEtag = uploadResult.ETag; - const completeResult = await s3Client.completeMultipartUpload({ + const completeResult = await s3Client.send(new CompleteMultipartUploadCommand({ Bucket: bucketName, Key: tempObjectKey, UploadId: tempUploadId, MultipartUpload: { Parts: [{ ETag: tempEtag, PartNumber: 1 }] }, - }).promise(); + })); let tempVersionId; if (isVersioned && completeResult.VersionId) { @@ -546,7 +585,7 @@ describe('Abort MPU - Orphan Cleanup', function testSuite() { deleteParams.VersionId = tempVersionId; } - await s3Client.deleteObject(deleteParams).promise(); + await s3Client.send(new DeleteObjectCommand(deleteParams)); return orphanedObjectMD; } @@ -562,7 +601,8 @@ describe('Abort MPU - Orphan Cleanup', function testSuite() { s3 = bucketUtil.s3; async.series([ - next => s3.createBucket({ Bucket: bucketName }, next), + next => s3.send(new CreateBucketCommand({ Bucket: bucketName })).then(() => + next()).catch(err => next(err)), next => initMetadata(next), ], done); }); @@ -575,40 +615,41 @@ describe('Abort MPU - Orphan Cleanup', function testSuite() { const data = Buffer.from('test data for orphan cleanup'); // Create MPU and upload a part - const createResult = await s3.createMultipartUpload({ + const createResult = await s3.send(new CreateMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, - }).promise(); + })); const uploadId = createResult.UploadId; - await s3.uploadPart({ + await s3.send(new UploadPartCommand({ Bucket: bucketName, Key: objectKey, PartNumber: 1, UploadId: uploadId, Body: data, - }).promise(); + })); // Create realistic orphaned object metadata like a CompleteMPU would when failing before cleanup await createOrphanedObjectMetadata(s3, bucketName, objectKey, uploadId, data, false); // Verify the orphaned object exists - await s3.headObject({ Bucket: bucketName, Key: objectKey }).promise(); + await s3.send(new HeadObjectCommand({ Bucket: bucketName, Key: objectKey })); // Abort MPU - should clean up the orphaned object - await s3.abortMultipartUpload({ + await s3.send(new AbortMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, UploadId: uploadId, - }).promise(); + })); // Verify the orphaned object was cleaned up try { - await s3.headObject({ Bucket: bucketName, Key: objectKey }).promise(); + await s3.send(new HeadObjectCommand({ Bucket: bucketName, Key: objectKey })); assert.fail('Orphaned object should be deleted after abort'); } catch (err) { assert(err); - assert.strictEqual(err.code, 'NotFound'); + assert.strictEqual(err.name, 'NotFound'); + assert.strictEqual(err.$metadata.httpStatusCode, 404); } }); @@ -616,25 +657,25 @@ describe('Abort MPU - Orphan Cleanup', function testSuite() { const data = Buffer.from('test versioned orphan cleanup'); // Enable versioning - await s3.putBucketVersioning({ + await s3.send(new PutBucketVersioningCommand({ Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled' }, - }).promise(); + })); // Create MPU - const createResult = await s3.createMultipartUpload({ + const createResult = await s3.send(new CreateMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, - }).promise(); + })); const uploadId = createResult.UploadId; - await s3.uploadPart({ + await s3.send(new UploadPartCommand({ Bucket: bucketName, Key: objectKey, PartNumber: 1, UploadId: uploadId, Body: data, - }).promise(); + })); // Create realistic orphaned object metadata like a CompleteMPU would when failing before cleanup const orphanedObjectMD = await createOrphanedObjectMetadata( @@ -643,36 +684,36 @@ describe('Abort MPU - Orphan Cleanup', function testSuite() { // Put a new master version on top of the orphaned version // The abort will fetch this during standardMetadataValidateBucketAndObj // It will force abort to findObjectVersionByUploadId - await s3.putObject({ + await s3.send(new PutObjectCommand({ Bucket: bucketName, Key: objectKey, Body: 'version 2 data', - }).promise(); + })); // Verify we have 2 versions (1 regular + 1 orphaned) - let listResult = await s3.listObjectVersions({ Bucket: bucketName }).promise(); + let listResult = await s3.send(new ListObjectVersionsCommand({ Bucket: bucketName })); let objectVersions = listResult.Versions.filter(v => v.Key === objectKey); assert.strictEqual(objectVersions.length, 2, 'Expected 2 versions before abort, 1 regular + 1 orphaned' ); // Abort MPU - should find and clean up only the orphaned version - await s3.abortMultipartUpload({ + await s3.send(new AbortMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, UploadId: uploadId, - }).promise(); + })); // Verify only the orphaned version was deleted - listResult = await s3.listObjectVersions({ Bucket: bucketName }).promise(); + listResult = await s3.send(new ListObjectVersionsCommand({ Bucket: bucketName })); objectVersions = listResult.Versions.filter(v => v.Key === objectKey); assert.strictEqual(objectVersions.length, 1, 'Should have 1 version after abort (orphaned version cleaned up)'); // ensure orphanedObj doesn't exist try { - await s3.headObject({ Bucket: bucketName, Key: objectKey, - VersionId: orphanedObjectMD.versionId }).promise(); + await s3.send(new HeadObjectCommand({ Bucket: bucketName, Key: objectKey, + VersionId: orphanedObjectMD.versionId })); assert.fail('Orphaned object should be deleted after abort'); } catch (err) { assert(err); @@ -694,7 +735,7 @@ describe('Abort MPU - Race Conditions', function testSuite() { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - await s3.createBucket({ Bucket: bucketName }).promise(); + await s3.send(new CreateBucketCommand({ Bucket: bucketName })); }); afterEach(async () => { @@ -705,38 +746,38 @@ describe('Abort MPU - Race Conditions', function testSuite() { const data = Buffer.from('test concurrent complete and abort'); // Create MPU and upload part - const createResult = await s3.createMultipartUpload({ + const createResult = await s3.send(new CreateMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, - }).promise(); + })); const uploadId = createResult.UploadId; - const uploadResult = await s3.uploadPart({ + const uploadResult = await s3.send(new UploadPartCommand({ Bucket: bucketName, Key: objectKey, PartNumber: 1, UploadId: uploadId, Body: data, - }).promise(); + })); const etag = uploadResult.ETag; // Start concurrent operations: CompleteMPU and AbortMPU const [completeResult, abortResult] = await Promise.allSettled([ - s3.completeMultipartUpload({ + s3.send(new CompleteMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, UploadId: uploadId, MultipartUpload: { Parts: [{ ETag: etag, PartNumber: 1 }], }, - }).promise(), + })), // Add small delay to create race condition - scheduler.wait(10).then(() => s3.abortMultipartUpload({ + scheduler.wait(10).then(() => s3.send(new AbortMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, UploadId: uploadId, - }).promise()) + }))) ]); // Verify final state is consistent @@ -746,11 +787,11 @@ describe('Abort MPU - Race Conditions', function testSuite() { if (!completeError) { // Complete succeeded - object should exist or be cleaned up try { - const headResult = await s3.headObject({ Bucket: bucketName, Key: objectKey }).promise(); + const headResult = await s3.send(new HeadObjectCommand({ Bucket: bucketName, Key: objectKey })); // Complete won the race - verify object exists and is accessible assert.ok(headResult.ETag, 'Object should have valid ETag'); } catch (err) { - if (err.code === 'NotFound') { + if (err.name === 'NotFound') { // Abort may have cleaned up the object after complete created it // This is acceptable } else { @@ -760,7 +801,7 @@ describe('Abort MPU - Race Conditions', function testSuite() { } else if (!abortError) { // Abort succeeded - check if object exists or was cleaned up try { - await s3.headObject({ Bucket: bucketName, Key: objectKey }).promise(); + await s3.send(new HeadObjectCommand({ Bucket: bucketName, Key: objectKey })); // Either object exists (complete won) or doesn't (abort won) // Both states are acceptable } catch { @@ -771,8 +812,8 @@ describe('Abort MPU - Race Conditions', function testSuite() { // as long as the system remains consistent // Verify no MPU metadata remains - const listResult = await s3.listMultipartUploads({ Bucket: bucketName }).promise(); - const remainingUploads = listResult.Uploads.filter(upload => upload.UploadId === uploadId); + const listResult = await s3.send(new ListMultipartUploadsCommand({ Bucket: bucketName })); + const remainingUploads = (listResult.Uploads || []).filter(upload => upload.UploadId === uploadId); assert.strictEqual(remainingUploads.length, 0, 'No MPU metadata should remain'); }); @@ -780,37 +821,37 @@ describe('Abort MPU - Race Conditions', function testSuite() { const data = Buffer.from('test multiple concurrent aborts'); // Create MPU and upload part - const createResult = await s3.createMultipartUpload({ + const createResult = await s3.send(new CreateMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, - }).promise(); + })); const uploadId = createResult.UploadId; - await s3.uploadPart({ + await s3.send(new UploadPartCommand({ Bucket: bucketName, Key: objectKey, PartNumber: 1, UploadId: uploadId, Body: data, - }).promise(); + })); // Launch 3 concurrent abort operations const abortResults = await Promise.allSettled([ - s3.abortMultipartUpload({ + s3.send(new AbortMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, UploadId: uploadId, - }).promise(), - s3.abortMultipartUpload({ + })), + s3.send(new AbortMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, UploadId: uploadId, - }).promise(), - s3.abortMultipartUpload({ + })), + s3.send(new AbortMultipartUploadCommand({ Bucket: bucketName, Key: objectKey, UploadId: uploadId, - }).promise() + })) ]); // Verify results @@ -823,16 +864,16 @@ describe('Abort MPU - Race Conditions', function testSuite() { assert(successfulAborts.length >= 1, 'At least one abort should succeed'); // Other aborts may fail with NoSuchUpload - this is acceptable - const otherErrors = abortErrors.filter(err => err && err.code !== 'NoSuchUpload'); + const otherErrors = abortErrors.filter(err => err && err.name !== 'NoSuchUpload'); assert.strictEqual(otherErrors.length, 0, 'Should not have unexpected errors'); // Verify final cleanup state // No object should exist since no CompleteMPU was performed try { - await s3.headObject({ Bucket: bucketName, Key: objectKey }).promise(); + await s3.send(new HeadObjectCommand({ Bucket: bucketName, Key: objectKey })); assert.fail('No object should exist after aborting MPU'); } catch (err) { - if (err.code === 'NotFound') { + if (err.name === 'NotFound') { // Expected - no object should exist } else { throw err; @@ -840,8 +881,8 @@ describe('Abort MPU - Race Conditions', function testSuite() { } // Verify no MPU metadata remains - const listResult = await s3.listMultipartUploads({ Bucket: bucketName }).promise(); - const remainingUploads = listResult.Uploads.filter(upload => upload.UploadId === uploadId); + const listResult = await s3.send(new ListMultipartUploadsCommand({ Bucket: bucketName })); + const remainingUploads = (listResult.Uploads || []).filter(upload => upload.UploadId === uploadId); assert.strictEqual(remainingUploads.length, 0, 'No MPU metadata should remain after concurrent aborts'); }); diff --git a/tests/functional/aws-node-sdk/test/object/bigMpu.js b/tests/functional/aws-node-sdk/test/object/bigMpu.js index 1ce32529af..0088672939 100644 --- a/tests/functional/aws-node-sdk/test/object/bigMpu.js +++ b/tests/functional/aws-node-sdk/test/object/bigMpu.js @@ -1,7 +1,17 @@ const assert = require('assert'); - -const { S3 } = require('aws-sdk'); const { timesLimit, waterfall } = require('async'); +const { NodeHttpHandler } = require('@smithy/node-http-handler'); + +const { + S3Client, + CreateBucketCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + CompleteMultipartUploadCommand, + GetObjectCommand, + DeleteObjectCommand, + DeleteBucketCommand +} = require('@aws-sdk/client-s3'); const getConfig = require('../support/config'); @@ -14,6 +24,7 @@ const finalETag = require('crypto').createHash('md5') .update(Buffer.from(eTag.repeat(partCount), 'hex').toString('binary'), 'binary').digest('hex'); +const partETags = new Array(partCount); function uploadPart(n, uploadId, s3, next) { const params = { Bucket: bucket, @@ -25,13 +36,16 @@ function uploadPart(n, uploadId, s3, next) { if (params.PartNumber % 20 === 0) { process.stdout.write(`uploading PartNumber: ${params.PartNumber}\n`); } - s3.uploadPart(params, err => { - if (err) { - process.stdout.write('error putting part: ', err); + + s3.send(new UploadPartCommand(params)) + .then(data => { + partETags[n] = data.ETag; + next(); + }) + .catch(err => { + process.stdout.write(`error putting part ${params.PartNumber}: ${err}\n`); return next(err); - } - return next(); - }); + }); } // NOTE: This test has a history of failing in end-to-end Integration tests. @@ -42,25 +56,36 @@ describe('large mpu', function tester() { let s3; before(done => { const config = getConfig('default', { signatureVersion: 'v4' }); - s3 = new S3(config); // disable node sdk retries and timeout to prevent InvalidPart // and SocketHangUp errors. If retries are allowed, sdk will send // another request after first request has already deleted parts, // causing InvalidPart. Meanwhile, if request takes too long to finish, // sdk will create SocketHangUp error before response. - s3.config.update({ maxRetries: 0 }); - s3.config.update({ httpOptions: { timeout: 0 } }); - s3.createBucket({ Bucket: bucket }, done); + // Custom request handler with no timeouts + const requestHandler = new NodeHttpHandler({ + requestTimeout: 0, + connectionTimeout: 0, + }); + + s3 = new S3Client({ + ...config, + maxAttempts: 1, + requestHandler, + }); + + s3.send(new CreateBucketCommand({ Bucket: bucket })) + .then(() => done()) + .catch(err => done(err)); }); after(done => { - s3.deleteObject({ Bucket: bucket, Key: key }, err => { - if (err) { - process.stdout.write('err deleting object in after: ', err); + s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: key })) + .then(() => s3.send(new DeleteBucketCommand({ Bucket: bucket }))) + .then(() => done()) + .catch(err => { + process.stdout.write(`err deleting object in after: ${err}\n`); return done(err); - } - return s3.deleteBucket({ Bucket: bucket }, done); - }); + }); }); const itSkipIfAWS = process.env.AWS_ON_AIR ? it.skip : it; @@ -69,30 +94,36 @@ describe('large mpu', function tester() { itSkipIfAWS('should intiate, put parts and complete mpu ' + `with ${partCount} parts`, done => { process.stdout.write('***Running large MPU test***\n'); - let uploadId; + let uploadId; return waterfall([ - next => s3.createMultipartUpload({ Bucket: bucket, Key: key }, - (err, data) => { - if (err) { - return done(err); - } - process.stdout.write('initated mpu\n'); - uploadId = data.UploadId; - return next(); - }), next => { - process.stdout.write('putting parts'); + s3.send(new CreateMultipartUploadCommand({ Bucket: bucket, Key: key })) + .then(data => { + process.stdout.write('initiated mpu\n'); + uploadId = data.UploadId; + return next(); + }) + .catch(err => next(err)); + }, + next => { + process.stdout.write('putting parts\n'); return timesLimit(partCount, 20, (n, cb) => - uploadPart(n, uploadId, s3, cb), err => - next(err) - ); + uploadPart(n, uploadId, s3, cb), err => { + if (err) { + process.stdout.write(`Error in timesLimit: ${err}\n`); + } + return next(err); + }); }, next => { const parts = []; - for (let i = 1; i <= partCount; i++) { + for (let i = 0; i < partCount; i++) { + if (!partETags[i]) { + return next(new Error(`Missing ETag for part ${i + 1}`)); + } parts.push({ - ETag: eTag, - PartNumber: i, + ETag: partETags[i], + PartNumber: i + 1, }); } const params = { @@ -103,25 +134,23 @@ describe('large mpu', function tester() { Parts: parts, }, }; - return s3.completeMultipartUpload(params, err => { - if (err) { - process.stdout.write('err complting mpu: ', err); - return next(err); - } - return next(); - }); + return s3.send(new CompleteMultipartUploadCommand(params)) + .then(() => { + process.stdout.write('mpu completed successfully\n'); + next(); + }) + .catch(err => next(err)); }, next => { - process.stdout.write('about to get object'); - return s3.getObject({ Bucket: bucket, Key: key }, - (err, data) => { - if (err) { - return next(err); - } + process.stdout.write('about to get object\n'); + s3.send(new GetObjectCommand({ Bucket: bucket, Key: key })) + .then(data => { assert.strictEqual(data.ETag, `"${finalETag}-${partCount}"`); + process.stdout.write('get object successful\n'); return next(); - }); + }) + .catch(err => next(err)); }, ], done); }); diff --git a/tests/functional/aws-node-sdk/test/object/completeMPU.js b/tests/functional/aws-node-sdk/test/object/completeMPU.js index ee01269148..f51a1d64c3 100644 --- a/tests/functional/aws-node-sdk/test/object/completeMPU.js +++ b/tests/functional/aws-node-sdk/test/object/completeMPU.js @@ -8,16 +8,22 @@ const { versioningSuspended, } = require('../../lib/utility/versioning-util.js'); const { taggingTests } = require('../../lib/utility/tagging'); +const { + CreateBucketCommand, + DeleteBucketCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + CompleteMultipartUploadCommand, + GetObjectCommand, + PutBucketVersioningCommand, + GetObjectTaggingCommand, + NoSuchKey +} = require('@aws-sdk/client-s3'); const date = Date.now(); const bucket = `completempu${date}`; const key = 'key'; -function checkNoError(err) { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); -} - describe('Complete MPU', () => { withV4(sigCfg => { @@ -25,48 +31,51 @@ describe('Complete MPU', () => { const s3 = bucketUtil.s3; function _completeMpuAndCheckVid(uploadId, eTag, expectedVid, cb) { - s3.completeMultipartUpload({ + let versionId; + s3.send(new CompleteMultipartUploadCommand({ Bucket: bucket, Key: key, MultipartUpload: { Parts: [{ ETag: eTag, PartNumber: 1 }], }, - UploadId: uploadId }, - (err, data) => { - checkNoError(err); - const versionId = data.VersionId; + UploadId: uploadId + })) + .then(data => { + versionId = data.VersionId; if (expectedVid) { assert.notEqual(versionId, undefined); } else { assert.strictEqual(versionId, expectedVid); } - return s3.getObject({ + return s3.send(new GetObjectCommand({ Bucket: bucket, Key: key, - }, - (err, data) => { - checkNoError(err); - if (versionId) { - assert.strictEqual(data.VersionId, versionId); - } - cb(); - }); - }); + })); + }) + .then(data => { + if (versionId) { + assert.strictEqual(data.VersionId, versionId); + } + cb(); + }) + .catch(cb); } function _initiateMpuAndPutOnePart() { const result = {}; - return s3.createMultipartUpload({ - Bucket: bucket, Key: key }).promise() + return s3.send(new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: key + })) .then(data => { result.uploadId = data.UploadId; - return s3.uploadPart({ + return s3.send(new UploadPartCommand({ Bucket: bucket, Key: key, PartNumber: 1, UploadId: data.UploadId, Body: 'foo', - }).promise(); + })); }) .then(data => { result.eTag = data.ETag; @@ -78,16 +87,21 @@ describe('Complete MPU', () => { }); } - beforeEach(done => { - s3.createBucket({ Bucket: bucket }, done); + beforeEach(async () => { + await s3.send(new CreateBucketCommand({ Bucket: bucket })); }); afterEach(done => { removeAllVersions({ Bucket: bucket }, err => { if (err) { - return done(err); + process.stdout.write(`Error removing all versions: ${err}\n`); } - return s3.deleteBucket({ Bucket: bucket }, done); + s3.send(new DeleteBucketCommand({ Bucket: bucket })) + .then(() => done()) + .catch(err => { + process.stdout.write(`Error deleting bucket: ${err}\n`); + done(err); + }); }); }); @@ -112,8 +126,10 @@ describe('Complete MPU', () => { let uploadId; let eTag; - beforeEach(() => s3.putBucketVersioning({ Bucket: bucket, - VersioningConfiguration: versioningEnabled }).promise() + beforeEach(() => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: versioningEnabled + })) .then(() => _initiateMpuAndPutOnePart()) .then(result => { uploadId = result.uploadId; @@ -131,8 +147,10 @@ describe('Complete MPU', () => { let uploadId; let eTag; - beforeEach(() => s3.putBucketVersioning({ Bucket: bucket, - VersioningConfiguration: versioningSuspended }).promise() + beforeEach(() => s3.send(new PutBucketVersioningCommand({ + Bucket: bucket, + VersioningConfiguration: versioningSuspended + })) .then(() => _initiateMpuAndPutOnePart()) .then(result => { uploadId = result.uploadId; @@ -156,55 +174,71 @@ describe('Complete MPU', () => { const tagging = `${key}=${value}`; async.waterfall([ - next => s3.createMultipartUpload({ - Bucket: bucket, - Key: tagKey, - Tagging: tagging, - }, (err, data) => { - if (test.error) { - assert.strictEqual(err.code, test.error); - assert.strictEqual(err.statusCode, 400); - return next('expected'); - } - return next(err, data.UploadId); - }), - (uploadId, next) => s3.uploadPart({ - Bucket: bucket, - Key: tagKey, - PartNumber: 1, - UploadId: uploadId, - Body: 'foo', - }, (err, data) => { - next(err, data.ETag, uploadId); - }), - (eTag, uploadId, next) => s3.completeMultipartUpload({ - Bucket: bucket, - Key: tagKey, - UploadId: uploadId, - MultipartUpload: { - Parts: [{ - ETag: eTag, - PartNumber: 1, - }], - }, - }, next), + next => { + s3.send(new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: tagKey, + Tagging: tagging, + })) + .then(data => { + if (test.error) { + return next(new Error('Expected error but got success')); + } + return next(null, data.UploadId); + }) + .catch(err => { + if (test.error) { + assert.strictEqual(err.name, test.error); + assert.strictEqual(err.$metadata.httpStatusCode, 400); + return next('expected'); + } + return next(err); + }); + }, + (uploadId, next) => { + s3.send(new UploadPartCommand({ + Bucket: bucket, + Key: tagKey, + PartNumber: 1, + UploadId: uploadId, + Body: 'foo', + })) + .then(data => next(null, data.ETag, uploadId)) + .catch(err => next(err)); + }, + (eTag, uploadId, next) => { + s3.send(new CompleteMultipartUploadCommand({ + Bucket: bucket, + Key: tagKey, + UploadId: uploadId, + MultipartUpload: { + Parts: [{ + ETag: eTag, + PartNumber: 1, + }], + }, + })) + .then(() => next()) + .catch(err => next(err)); + }, ], err => { if (err === 'expected') { done(); } else { assert.ifError(err); - s3.getObjectTagging({ + s3.send(new GetObjectTaggingCommand({ Bucket: bucket, Key: tagKey, - }, (err, tagData) => { - assert.ifError(err); + })) + .then(tagData => { assert.deepStrictEqual(tagData.TagSet, [{ Key: test.tag.key, Value: test.tag.value, }]); done(); - }); + }) + .catch(err => done(err)); } }); }); @@ -224,21 +258,25 @@ describe('Complete MPU', () => { it('should complete the MPU successfully and leave a readable object', done => { async.parallel([ - doneReUpload => s3.uploadPart({ - Bucket: bucket, - Key: key, - PartNumber: 1, - UploadId: uploadId, - Body: 'foo', - }, err => { - // in case the CompleteMPU finished earlier, - // we may get a NoSuchKey error, so just - // ignore it - if (err && err.code === 'NoSuchKey') { - return doneReUpload(); - } - return doneReUpload(err); - }), + doneReUpload => { + s3.send(new UploadPartCommand({ + Bucket: bucket, + Key: key, + PartNumber: 1, + UploadId: uploadId, + Body: 'foo', + })) + .then(() => doneReUpload()) + .catch(err => { + // in case the CompleteMPU finished earlier, + // we may get a NoSuchKey error, so just + // ignore it + if (err instanceof NoSuchKey) { + return doneReUpload(); + } + return doneReUpload(err); + }); + }, doneComplete => _completeMpuAndCheckVid( uploadId, eTag, undefined, doneComplete), ], done); diff --git a/tests/functional/aws-node-sdk/test/object/compluteMpu.js b/tests/functional/aws-node-sdk/test/object/compluteMpu.js index 5bd74287ca..d8a20a6c7a 100644 --- a/tests/functional/aws-node-sdk/test/object/compluteMpu.js +++ b/tests/functional/aws-node-sdk/test/object/compluteMpu.js @@ -1,5 +1,10 @@ const assert = require('assert'); -const { S3 } = require('aws-sdk'); +const { + S3Client, + CreateBucketCommand, + DeleteBucketCommand, + CompleteMultipartUploadCommand, +} = require('@aws-sdk/client-s3'); const getConfig = require('../support/config'); @@ -21,14 +26,16 @@ describe('aws-node-sdk test bucket complete mpu', () => { let s3; // setup test - before(done => { + before(async () => { const config = getConfig('default', { signatureVersion: 'v4' }); - s3 = new S3(config); - s3.createBucket({ Bucket: bucket }, done); + s3 = new S3Client(config); + await s3.send(new CreateBucketCommand({ Bucket: bucket })); }); // delete bucket after testing - after(done => s3.deleteBucket({ Bucket: bucket }, done)); + after(async () => { + await s3.send(new DeleteBucketCommand({ Bucket: bucket })); + }); const itSkipIfAWS = process.env.AWS_ON_AIR ? it.skip : it; itSkipIfAWS('should not accept xml body larger than 1 MB', done => { @@ -40,15 +47,13 @@ describe('aws-node-sdk test bucket complete mpu', () => { Parts: parts, }, }; - s3.completeMultipartUpload(params, error => { - if (error) { - assert.strictEqual(error.statusCode, 400); - assert.strictEqual( - error.code, 'InvalidRequest'); - done(); - } else { - done('accepted xml body larger than 1 MB'); - } + s3.send(new CompleteMultipartUploadCommand(params)).then(() => { + done('accepted xml body larger than 1 MB'); + }).catch(error => { + assert.strictEqual(error.$metadata.httpStatusCode, 400); + assert.strictEqual( + error.name, 'InvalidRequest'); + done(); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/copyPart.js b/tests/functional/aws-node-sdk/test/object/copyPart.js index 1d48e1ba79..80783b0b75 100644 --- a/tests/functional/aws-node-sdk/test/object/copyPart.js +++ b/tests/functional/aws-node-sdk/test/object/copyPart.js @@ -1,7 +1,17 @@ -const { promisify } = require('util'); const assert = require('assert'); const crypto = require('crypto'); +const { CreateBucketCommand, + PutObjectCommand, + GetObjectCommand, + HeadObjectCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + UploadPartCopyCommand, + CompleteMultipartUploadCommand, + AbortMultipartUploadCommand, + PutObjectAclCommand +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -42,7 +52,7 @@ describe('Object Part Copy', () => { beforeEach(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - s3.createBucketPromise = promisify(s3.createBucket); + s3.createBucketPromise = params => s3.send(new CreateBucketCommand(params)); if (process.env.ENABLE_KMS_ENCRYPTION === 'true') { s3.createBucketPromise = createEncryptedBucketPromise; } @@ -51,30 +61,28 @@ describe('Object Part Copy', () => { process.stdout.write(`Error creating source bucket: ${err}\n`); throw err; }).then(() => - s3.createBucketPromise({ Bucket: destBucketName }) + s3.createBucketPromise({ Bucket: destBucketName }) ).catch(err => { process.stdout.write(`Error creating dest bucket: ${err}\n`); throw err; }) - .then(() => - s3.putObject({ + .then(() => s3.send(new PutObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, Body: content, - }).promise()) + }))) .then(res => { etag = res.ETag; - return s3.headObject({ + return s3.send(new HeadObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, - }).promise(); - }).then(() => - s3.createMultipartUpload({ + })); + }).then(() => s3.send(new CreateMultipartUploadCommand({ Bucket: destBucketName, Key: destObjName, - }).promise()).then(iniateRes => { - uploadId = iniateRes.UploadId; - }).catch(err => { + })).then(initiateRes => { + uploadId = initiateRes.UploadId; + })).catch(err => { process.stdout.write(`Error in outer beforeEach: ${err}\n`); throw err; }); @@ -82,13 +90,12 @@ describe('Object Part Copy', () => { afterEach(() => bucketUtil.empty(sourceBucketName) .then(() => bucketUtil.empty(destBucketName)) - .then(() => s3.abortMultipartUpload({ + .then(() => s3.send(new AbortMultipartUploadCommand({ Bucket: destBucketName, Key: destObjName, UploadId: uploadId, - }).promise()) - .catch(err => { - if (err.code !== 'NoSuchUpload') { + }))).catch(err => { + if (err.name !== 'NoSuchUpload') { process.stdout.write(`Error in afterEach: ${err}\n`); throw err; } @@ -99,225 +106,181 @@ describe('Object Part Copy', () => { it('should copy a part from a source bucket to a different ' + - 'destination bucket', done => { - s3.uploadPartCopy({ Bucket: destBucketName, + 'destination bucket', () => s3.send(new UploadPartCopyCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, - }, - (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, etag); - assert(res.LastModified); - done(); - }); - }); + })).then(res => { + assert.strictEqual(res.CopyPartResult.ETag, etag); + assert(res.CopyPartResult.LastModified); + })); it('should copy a part from a source bucket to a different ' + - 'destination bucket and complete the MPU', done => { - s3.uploadPartCopy({ Bucket: destBucketName, + 'destination bucket and complete the MPU', () => s3.send(new UploadPartCopyCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, - }, - (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, etag); - assert(res.LastModified); - s3.completeMultipartUpload({ - Bucket: destBucketName, - Key: destObjName, + })).then(res => { + assert.strictEqual(res.CopyPartResult.ETag, etag); + assert(res.CopyPartResult.LastModified); + return s3.send(new CompleteMultipartUploadCommand({ + Bucket: destBucketName, + Key: destObjName, UploadId: uploadId, MultipartUpload: { Parts: [ { ETag: etag, PartNumber: 1 }, ], }, - }, (err, res) => { - checkNoError(err); + })).then(res => { assert.strictEqual(res.Bucket, destBucketName); assert.strictEqual(res.Key, destObjName); // AWS confirmed final ETag for MPU assert.strictEqual(res.ETag, '"db77ebbae9e9f5a244a26b86193ad818-1"'); - done(); }); - }); - }); + })); - it('should return InvalidArgument error given invalid range', done => { - s3.putObject({ + it('should return InvalidArgument error given invalid range', () => s3.send(new PutObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, Body: Buffer.alloc(oneHundredMBPlus11, 'packing'), - }, err => { - checkNoError(err); - s3.uploadPartCopy({ Bucket: destBucketName, + })).then(() => s3.send(new UploadPartCopyCommand({ + Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, CopySourceRange: 'bad-range-parameter', - }, - err => { + })).catch(err => { checkError(err, 'InvalidArgument'); - done(); - }); - }); - }); + }))); it('should return EntityTooLarge error if attempt to copy ' + 'object larger than max and do not specify smaller ' + - 'range in request', done => { - s3.putObject({ + 'range in request', () => s3.send(new PutObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, Body: Buffer.alloc(oneHundredMBPlus11, 'packing'), - }, err => { - checkNoError(err); - s3.uploadPartCopy({ Bucket: destBucketName, + })).then(() => s3.send(new UploadPartCopyCommand({ + Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, - }, - err => { - checkError(err, 'EntityTooLarge'); - done(); - }); - }); - }); + }))).catch(err => { + checkError(err, 'EntityTooLarge'); + })); it('should return EntityTooLarge error if attempt to copy ' + 'object larger than max and specify too large ' + - 'range in request', done => { - s3.putObject({ + 'range in request', () => s3.send(new PutObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, Body: Buffer.alloc(oneHundredMBPlus11, 'packing'), - }, err => { - checkNoError(err); - s3.uploadPartCopy({ Bucket: destBucketName, + })).then(() => s3.send(new UploadPartCopyCommand({ + Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, CopySourceRange: `bytes=0-${oneHundredMBPlus11}`, - }, - err => { - checkError(err, 'EntityTooLarge'); - done(); - }); - }); - }); + }))).catch(err => { + checkError(err, 'EntityTooLarge'); + })); it('should succeed if attempt to copy ' + 'object larger than max but specify acceptable ' + - 'range in request', done => { - s3.putObject({ + 'range in request', () => s3.send(new PutObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, Body: Buffer.alloc(oneHundredMBPlus11, 'packing'), - }, err => { - checkNoError(err); - s3.uploadPartCopy({ Bucket: destBucketName, + })).then(() => s3.send(new UploadPartCopyCommand({ + Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, CopySourceRange: 'bytes=0-100', - }, - err => { - checkNoError(err); - done(); - }); - }); - }); + }))).catch(err => { + checkNoError(err); + })); it('should copy a 0 byte object part from a source bucket to a ' + - 'different destination bucket and complete the MPU', done => { + 'different destination bucket and complete the MPU', () => { const emptyFileETag = '"d41d8cd98f00b204e9800998ecf8427e"'; - s3.putObject({ + return s3.send(new PutObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, Body: '', - }, () => { - s3.uploadPartCopy({ Bucket: destBucketName, + })).then(() => s3.send(new UploadPartCopyCommand({ + Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, - }, - (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, emptyFileETag); - assert(res.LastModified); - s3.completeMultipartUpload({ - Bucket: destBucketName, - Key: destObjName, - UploadId: uploadId, - MultipartUpload: { - Parts: [ - { ETag: emptyFileETag, PartNumber: 1 }, - ], - }, - }, (err, res) => { - checkNoError(err); - assert.strictEqual(res.Bucket, destBucketName); - assert.strictEqual(res.Key, destObjName); + })).then(res => { + assert.strictEqual(res.CopyPartResult.ETag, emptyFileETag); + assert(res.CopyPartResult.LastModified); + return s3.send(new CompleteMultipartUploadCommand({ + Bucket: destBucketName, + Key: destObjName, + UploadId: uploadId, + MultipartUpload: { + Parts: [ + { ETag: emptyFileETag, PartNumber: 1 }, + ], + }, + })).then(res => { + assert.strictEqual(res.Bucket, destBucketName); + assert.strictEqual(res.Key, destObjName); // AWS confirmed final ETag for MPU - assert.strictEqual(res.ETag, - '"59adb24ef3cdbe0297f05b395827453f-1"'); - done(); - }); + assert.strictEqual(res.ETag,'"59adb24ef3cdbe0297f05b395827453f-1"'); }); - }); + })); }); it('should copy a part using a range header from a source bucket ' + - 'to a different destination bucket and complete the MPU', done => { + 'to a different destination bucket and complete the MPU', () => { const rangeETag = '"ac1be00f1f162e20d58099eec2ea1c70"'; // AWS confirmed final ETag for MPU const finalMpuETag = '"bff2a6af3adfd8e107a06de01d487176-1"'; - s3.uploadPartCopy({ Bucket: destBucketName, + return s3.send(new UploadPartCopyCommand({ + Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, CopySourceRange: 'bytes=0-3', UploadId: uploadId, - }, - (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, rangeETag); - assert(res.LastModified); - s3.completeMultipartUpload({ + })).then(res => { + assert.strictEqual(res.CopyPartResult.ETag, rangeETag); + assert(res.CopyPartResult.LastModified); + return s3.send(new CompleteMultipartUploadCommand({ + Bucket: destBucketName, + Key: destObjName, + UploadId: uploadId, + MultipartUpload: { + Parts: [ + { ETag: rangeETag, PartNumber: 1 }, + ], + }, + })).then(res => { + assert.strictEqual(res.Bucket, destBucketName); + assert.strictEqual(res.Key, destObjName); + assert.strictEqual(res.ETag, finalMpuETag); + return s3.send(new GetObjectCommand({ Bucket: destBucketName, Key: destObjName, - UploadId: uploadId, - MultipartUpload: { - Parts: [ - { ETag: rangeETag, PartNumber: 1 }, - ], - }, - }, (err, res) => { - checkNoError(err); - assert.strictEqual(res.Bucket, destBucketName); - assert.strictEqual(res.Key, destObjName); + })).then(async res => { assert.strictEqual(res.ETag, finalMpuETag); - s3.getObject({ - Bucket: destBucketName, - Key: destObjName, - }, (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, finalMpuETag); - assert.strictEqual(res.ContentLength, 4); - assert.strictEqual(res.Body.toString(), 'I am'); - done(); - }); + assert.strictEqual(res.ContentLength, 4); + const body = await res.Body.transformToString(); + assert.strictEqual(body, 'I am'); }); }); + }); }); describe('When copy source was put by MPU', () => { @@ -338,11 +301,11 @@ describe('Object Part Copy', () => { const otherPartBuff = Buffer.alloc(5242880, 1); otherMd5HashPart.update(otherPartBuff); const otherPartHash = otherMd5HashPart.digest('hex'); - return s3.createMultipartUpload({ + return s3.send(new CreateMultipartUploadCommand({ Bucket: sourceBucketName, Key: sourceMpuKey, - }).promise().then(iniateRes => { - sourceMpuId = iniateRes.UploadId; + })).then(initiateRes => { + sourceMpuId = initiateRes.UploadId; }).catch(err => { process.stdout.write(`Error initiating MPU ' + 'in MPU beforeEach: ${err}\n`); @@ -352,13 +315,13 @@ describe('Object Part Copy', () => { for (let i = 1; i < 10; i++) { const partBuffHere = i % 2 ? partBuff : otherPartBuff; const partHashHere = i % 2 ? partHash : otherPartHash; - partUploads.push(s3.uploadPart({ + partUploads.push(s3.send(new UploadPartCommand({ Bucket: sourceBucketName, Key: sourceMpuKey, PartNumber: i, UploadId: sourceMpuId, Body: partBuffHere, - }).promise()); + }))); parts.push({ ETag: partHashHere, PartNumber: i, @@ -372,14 +335,14 @@ describe('Object Part Copy', () => { throw err; }).then(() => { process.stdout.write('completing mpu'); - return s3.completeMultipartUpload({ + return s3.send(new CompleteMultipartUploadCommand({ Bucket: sourceBucketName, Key: sourceMpuKey, UploadId: sourceMpuId, MultipartUpload: { Parts: parts, }, - }).promise(); + })); }).then(() => { process.stdout.write('finished completing mpu'); }).catch(err => { @@ -388,60 +351,60 @@ describe('Object Part Copy', () => { }); }); - afterEach(() => s3.abortMultipartUpload({ + afterEach(() => s3.send(new AbortMultipartUploadCommand({ Bucket: sourceBucketName, Key: sourceMpuKey, UploadId: sourceMpuId, - }).promise().catch(err => { - if (err.code !== 'NoSuchUpload' - && err.code !== 'NoSuchBucket') { + })).catch(err => { + if (err.name !== 'NoSuchUpload' + && err.name !== 'NoSuchBucket') { process.stdout.write(`Error in afterEach: ${err}\n`); throw err; } })); it('should copy a part from a source bucket to a different ' + - 'destination bucket', done => { + 'destination bucket', () => { process.stdout.write('Entered first mpu test'); - return s3.uploadPartCopy({ Bucket: destBucketName, + return s3.send(new UploadPartCopyCommand({ + Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceMpuKey}`, PartNumber: 1, UploadId: uploadId, - }, - (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, - totalMpuObjectHash); - assert(res.LastModified); - done(); - }); + })).then(res => { + assert.strictEqual(res.CopyPartResult.ETag, + totalMpuObjectHash); + assert(res.CopyPartResult.LastModified); + }); }); it('should copy two parts from a source bucket to a different ' + 'destination bucket and complete the MPU', () => { process.stdout.write('Putting first part in MPU test'); - return s3.uploadPartCopy({ Bucket: destBucketName, + return s3.send(new UploadPartCopyCommand({ + Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceMpuKey}`, PartNumber: 1, UploadId: uploadId, - }).promise().then(res => { - assert.strictEqual(res.ETag, totalMpuObjectHash); - assert(res.LastModified); + })).then(res => { + assert.strictEqual(res.CopyPartResult.ETag, totalMpuObjectHash); + assert(res.CopyPartResult.LastModified); }).then(() => { process.stdout.write('Putting second part in MPU test'); - return s3.uploadPartCopy({ Bucket: destBucketName, + return s3.send(new UploadPartCopyCommand({ + Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceMpuKey}`, PartNumber: 2, UploadId: uploadId, - }).promise().then(res => { - assert.strictEqual(res.ETag, totalMpuObjectHash); - assert(res.LastModified); + })).then(res => { + assert.strictEqual(res.CopyPartResult.ETag, totalMpuObjectHash); + assert(res.CopyPartResult.LastModified); }).then(() => { process.stdout.write('Completing MPU'); - return s3.completeMultipartUpload({ + return s3.send(new CompleteMultipartUploadCommand({ Bucket: destBucketName, Key: destObjName, UploadId: uploadId, @@ -451,16 +414,16 @@ describe('Object Part Copy', () => { { ETag: totalMpuObjectHash, PartNumber: 2 }, ], }, - }).promise(); - }).then(res => { - assert.strictEqual(res.Bucket, destBucketName); - assert.strictEqual(res.Key, destObjName); - // combined ETag returned by AWS (combination of part ETags - // with number of parts at the end) + })).then(res => { + assert.strictEqual(res.Bucket, destBucketName); + assert.strictEqual(res.Key, destObjName); + // combined ETag returned by AWS (combination of part ETags + // with number of parts at the end) assert.strictEqual(res.ETag, '"5bba96810ff449d94aa8f5c5a859b0cb-2"'); - }).catch(err => { - checkNoError(err); + }).catch(err => { + checkNoError(err); + }); }); }); }); @@ -475,29 +438,31 @@ describe('Object Part Copy', () => { // with number of parts at the end) const finalCombinedETag = '"e08ede4e8b942e18537cb2289f613ae3-2"'; - return s3.uploadPartCopy({ Bucket: destBucketName, + return s3.send(new UploadPartCopyCommand({ + Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceMpuKey}`, PartNumber: 1, UploadId: uploadId, CopySourceRange: 'bytes=5242890-15242880', - }).promise().then(res => { - assert.strictEqual(res.ETag, part1ETag); - assert(res.LastModified); + })).then(res => { + assert.strictEqual(res.CopyPartResult.ETag, part1ETag); + assert(res.CopyPartResult.LastModified); }).then(() => { process.stdout.write('Putting second part in MPU test'); - return s3.uploadPartCopy({ Bucket: destBucketName, + return s3.send(new UploadPartCopyCommand({ + Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceMpuKey}`, PartNumber: 2, UploadId: uploadId, CopySourceRange: 'bytes=15242891-30242991', - }).promise().then(res => { - assert.strictEqual(res.ETag, part2ETag); - assert(res.LastModified); + })).then(res => { + assert.strictEqual(res.CopyPartResult.ETag, part2ETag); + assert(res.CopyPartResult.LastModified); }).then(() => { process.stdout.write('Completing MPU'); - return s3.completeMultipartUpload({ + return s3.send(new CompleteMultipartUploadCommand({ Bucket: destBucketName, Key: destObjName, UploadId: uploadId, @@ -507,24 +472,24 @@ describe('Object Part Copy', () => { { ETag: part2ETag, PartNumber: 2 }, ], }, - }).promise(); - }).then(res => { - assert.strictEqual(res.Bucket, destBucketName); - assert.strictEqual(res.Key, destObjName); - assert.strictEqual(res.ETag, finalCombinedETag); - }).then(() => { - process.stdout.write('Getting new object'); - return s3.getObject({ - Bucket: destBucketName, - Key: destObjName, - }).promise(); - }).then(res => { - assert.strictEqual(res.ContentLength, 25000092); - assert.strictEqual(res.ETag, finalCombinedETag); - }) - .catch(err => { - checkNoError(err); - }); + })).then(res => { + assert.strictEqual(res.Bucket, destBucketName); + assert.strictEqual(res.Key, destObjName); + assert.strictEqual(res.ETag, finalCombinedETag); + }).then(() => { + process.stdout.write('Getting new object'); + return s3.send(new GetObjectCommand({ + Bucket: destBucketName, + Key: destObjName, + })).then(res => { + assert.strictEqual(res.ContentLength, 25000092); + assert.strictEqual(res.ETag, finalCombinedETag); + }) + .catch(err => { + checkNoError(err); + }); + }); + }); }); }); @@ -532,27 +497,29 @@ describe('Object Part Copy', () => { // AWS response etag for this completed MPU const finalObjETag = '"db77ebbae9e9f5a244a26b86193ad818-1"'; process.stdout.write('Putting first part in MPU test'); - return s3.uploadPartCopy({ Bucket: destBucketName, + return s3.send(new UploadPartCopyCommand({ + Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceMpuKey}`, PartNumber: 1, UploadId: uploadId, - }).promise().then(res => { - assert.strictEqual(res.ETag, totalMpuObjectHash); - assert(res.LastModified); + })).then(res => { + assert.strictEqual(res.CopyPartResult.ETag, totalMpuObjectHash); + assert(res.CopyPartResult.LastModified); }).then(() => { process.stdout.write('Overwriting first part in MPU test'); - return s3.uploadPartCopy({ Bucket: destBucketName, + return s3.send(new UploadPartCopyCommand({ + Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, - UploadId: uploadId }).promise(); - }).then(res => { - assert.strictEqual(res.ETag, etag); - assert(res.LastModified); - }).then(() => { + UploadId: uploadId + }) + ).then(res => { + assert.strictEqual(res.CopyPartResult.ETag, etag); + assert(res.CopyPartResult.LastModified); process.stdout.write('Completing MPU'); - return s3.completeMultipartUpload({ + return s3.send(new CompleteMultipartUploadCommand({ Bucket: destBucketName, Key: destObjName, UploadId: uploadId, @@ -561,24 +528,26 @@ describe('Object Part Copy', () => { { ETag: etag, PartNumber: 1 }, ], }, - }).promise(); - }).then(res => { - assert.strictEqual(res.Bucket, destBucketName); - assert.strictEqual(res.Key, destObjName); - assert.strictEqual(res.ETag, finalObjETag); - }).then(() => { - process.stdout.write('Getting object put by MPU with ' + - 'overwrite part'); - return s3.getObject({ - Bucket: destBucketName, - Key: destObjName, - }).promise(); - }).then(res => { - assert.strictEqual(res.ETag, finalObjETag); - }).catch(err => { - checkNoError(err); + }) + ).then(res => { + assert.strictEqual(res.Bucket, destBucketName); + assert.strictEqual(res.Key, destObjName); + assert.strictEqual(res.ETag, finalObjETag); + }).then(() => { + process.stdout.write('Getting object put by MPU with ' + + 'overwrite part'); + return s3.send(new GetObjectCommand({ + Bucket: destBucketName, + Key: destObjName, + })).then(res => { + assert.strictEqual(res.ETag, finalObjETag); + }).catch(err => { + checkNoError(err); + }); + }); }); }); + }); it('should not corrupt object if overwriting an existing part by copying a part ' + 'while the MPU is being completed', async () => { @@ -586,68 +555,57 @@ describe('Object Part Copy', () => { process.stdout.write('Putting first part in MPU test"'); const randomDestObjName = `copycatobject${Math.floor(Math.random() * 100000)}`; - const initiateRes = await s3 - .createMultipartUpload({ + const initiateRes = await s3.send(new CreateMultipartUploadCommand({ Bucket: destBucketName, Key: randomDestObjName, - }) - .promise(); + })); const uploadId = initiateRes.UploadId; - const res = await s3 - .uploadPartCopy({ + const res = await s3.send(new UploadPartCopyCommand({ Bucket: destBucketName, Key: randomDestObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, - }) - .promise(); - assert.strictEqual(res.ETag, etag); - assert(res.LastModified); + })); + assert.strictEqual(res.CopyPartResult.ETag, etag); + assert(res.CopyPartResult.LastModified); process.stdout.write( 'Overwriting first part in MPU test and completing MPU at the same time', ); const [completeRes, uploadRes] = await Promise.all([ - s3 - .completeMultipartUpload({ + s3.send(new CompleteMultipartUploadCommand({ Bucket: destBucketName, Key: randomDestObjName, UploadId: uploadId, MultipartUpload: { Parts: [{ ETag: etag, PartNumber: 1 }], }, - }) - .promise() - .catch(err => { + })).catch(err => { throw err; }), - s3 - .uploadPartCopy({ + s3.send(new UploadPartCopyCommand({ Bucket: destBucketName, Key: randomDestObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, - }) - .promise() + })) .catch(err => { const completeMPUFinishedEarlier = - err && err.code === 'NoSuchKey'; + err.name === 'NoSuchKey'; if (completeMPUFinishedEarlier) { return Promise.resolve(null); } - throw err; }), ]); if (uploadRes !== null) { - assert.strictEqual(uploadRes.ETag, etag); - assert(uploadRes.LastModified); + assert.strictEqual(uploadRes.CopyPartResult.ETag, etag); + assert(uploadRes.CopyPartResult.LastModified); } - assert.strictEqual(completeRes.Bucket, destBucketName); assert.strictEqual(completeRes.Key, randomDestObjName); assert.strictEqual(completeRes.ETag, finalObjETag); @@ -655,79 +613,58 @@ describe('Object Part Copy', () => { 'Getting object put by MPU with ' + 'overwrite part', ); const resGet = await s3 - .getObject({ + .send(new GetObjectCommand({ Bucket: destBucketName, Key: randomDestObjName, - }) - .promise(); + })); assert.strictEqual(resGet.ETag, finalObjETag); }); }); it('should return an error if no such upload initiated', - done => { - s3.uploadPartCopy({ Bucket: destBucketName, Key: destObjName, + () => s3.send(new UploadPartCopyCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: 'madeupuploadid444233232', - }, - err => { + })).catch(err => { checkError(err, 'NoSuchUpload'); - done(); - }); - }); + })); it('should return an error if attempt to copy from nonexistent bucket', - done => { - s3.uploadPartCopy({ Bucket: destBucketName, Key: destObjName, + () => s3.send(new UploadPartCopyCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `nobucket453234/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, - }, - err => { + })).catch(err => { checkError(err, 'NoSuchBucket'); - done(); - }); - }); + })); it('should return an error if attempt to copy to nonexistent bucket', - done => { - s3.uploadPartCopy({ Bucket: 'nobucket453234', Key: destObjName, + () => s3.send(new UploadPartCopyCommand({ Bucket: 'nobucket453234', Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, - }, - err => { + })).catch(err => { checkError(err, 'NoSuchBucket'); - done(); - }); - }); + })); it('should return an error if attempt to copy nonexistent object', - done => { - s3.uploadPartCopy({ Bucket: destBucketName, Key: destObjName, + () => s3.send(new UploadPartCopyCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/nokey`, PartNumber: 1, UploadId: uploadId, - }, - err => { + })).catch(err => { checkError(err, 'NoSuchKey'); - done(); - }); - }); + })); it('should return an error if use invalid part number', - done => { - s3.uploadPartCopy({ Bucket: destBucketName, Key: destObjName, + () => s3.send(new UploadPartCopyCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/nokey`, PartNumber: 10001, UploadId: uploadId, - }, - err => { + })).catch(err => { checkError(err, 'InvalidArgument'); - done(); - }); - }); + })); const describeColdStorage = hasColdStorage ? describe : describe.skip; describeColdStorage('with cold storage', () => { @@ -740,15 +677,17 @@ describe('Object Part Copy', () => { }; fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archive, err => { assert.ifError(err); - s3.uploadPartCopy({ + s3.send(new UploadPartCopyCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, - }, err => { - assert.strictEqual(err.code, 'InvalidObjectState'); - assert.strictEqual(err.statusCode, 403); + })).then(() => { + done(new Error('Expected failure but got success')); + }).catch(err => { + + assert.strictEqual(err.$metadata.httpStatusCode, 403); done(); }); }); @@ -757,18 +696,21 @@ describe('Object Part Copy', () => { it('should copy a part of an object when it\'s transitioning to cold', done => { fakeMetadataTransition(sourceBucketName, sourceObjName, undefined, err => { assert.ifError(err); - s3.uploadPartCopy({ + s3.send(new UploadPartCopyCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, - }, (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, etag); - assert(res.LastModified); + })).then(res => { + assert.strictEqual(res.CopyPartResult.ETag, etag); + assert(res.CopyPartResult.LastModified); done(); + }).catch(err => { + checkNoError(err); + done(err); }); + }); }); @@ -782,17 +724,19 @@ describe('Object Part Copy', () => { }; fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archiveCompleted, err => { assert.ifError(err); - s3.uploadPartCopy({ + s3.send(new UploadPartCopyCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: uploadId, - }, (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, etag); - assert(res.LastModified); + })).then(res => { + assert.strictEqual(res.CopyPartResult.ETag, etag); + assert(res.CopyPartResult.LastModified); done(); + }).catch(err => { + checkNoError(err); + done(err); }); }); }); @@ -805,20 +749,20 @@ describe('Object Part Copy', () => { beforeEach(() => { process.stdout.write('In other account before each'); - return otherAccountS3.createBucket({ Bucket: - otherAccountBucket }).promise() + return otherAccountS3.send(new CreateBucketCommand({ Bucket: + otherAccountBucket })) .catch(err => { process.stdout.write('Error creating other account ' + `bucket: ${err}\n`); throw err; }).then(() => { process.stdout.write('Initiating other account MPU'); - return otherAccountS3.createMultipartUpload({ + return otherAccountS3.send(new CreateMultipartUploadCommand({ Bucket: otherAccountBucket, Key: otherAccountKey, - }).promise(); - }).then(iniateRes => { - otherAccountUploadId = iniateRes.UploadId; + })); + }).then(initiateRes => { + otherAccountUploadId = initiateRes.UploadId; }).catch(err => { process.stdout.write('Error in other account ' + `beforeEach: ${err}\n`); @@ -827,69 +771,60 @@ describe('Object Part Copy', () => { }); afterEach(() => otherAccountBucketUtility.empty(otherAccountBucket) - .then(() => otherAccountS3.abortMultipartUpload({ + .then(() => otherAccountS3.send(new AbortMultipartUploadCommand({ Bucket: otherAccountBucket, Key: otherAccountKey, UploadId: otherAccountUploadId, - }).promise()) + }))) .catch(err => { - if (err.code !== 'NoSuchUpload') { + if (err.name !== 'NoSuchUpload') { process.stdout.write('Error in other account ' + `afterEach: ${err}\n`); throw err; } - }).then(() => otherAccountBucketUtility - .deleteOne(otherAccountBucket)) + }).then(() => { + otherAccountBucketUtility.deleteOne(otherAccountBucket); + }) ); it('should not allow an account without read persmission on the ' + - 'source object to copy the object', done => { - otherAccountS3.uploadPartCopy({ Bucket: otherAccountBucket, + 'source object to copy the object', () => otherAccountS3.send(new UploadPartCopyCommand( + { Bucket: otherAccountBucket, Key: otherAccountKey, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: otherAccountUploadId, - }, + })).catch( err => { checkError(err, 'AccessDenied'); - done(); - }); - }); + })); it('should not allow an account without write persmission on the ' + - 'destination bucket to upload part copy the object', done => { - otherAccountS3.putObject({ Bucket: otherAccountBucket, - Key: otherAccountKey, Body: '' }, () => { - otherAccountS3.uploadPartCopy({ Bucket: destBucketName, + 'destination bucket to upload part copy the object', () => { + otherAccountS3.send(new PutObjectCommand({ Bucket: otherAccountBucket, + Key: otherAccountKey, Body: '' })).then(() => otherAccountS3.send( + new UploadPartCopyCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${otherAccountBucket}/${otherAccountKey}`, PartNumber: 1, UploadId: uploadId, - }, - err => { - checkError(err, 'AccessDenied'); - done(); - }); - }); + })).catch(err => checkError(err, 'AccessDenied'))); }); it('should allow an account with read permission on the ' + 'source object and write permission on the destination ' + - 'bucket to upload part copy the object', done => { - s3.putObjectAcl({ Bucket: sourceBucketName, - Key: sourceObjName, ACL: 'public-read' }, () => { - otherAccountS3.uploadPartCopy({ Bucket: otherAccountBucket, + 'bucket to upload part copy the object', () => s3.send(new PutObjectAclCommand( + { Bucket: sourceBucketName, + Key: sourceObjName, ACL: 'public-read' })).then(() => otherAccountS3.send(new UploadPartCopyCommand( + { Bucket: otherAccountBucket, Key: otherAccountKey, CopySource: `${sourceBucketName}/${sourceObjName}`, PartNumber: 1, UploadId: otherAccountUploadId, - }, - err => { + })).catch(err => { checkNoError(err); - done(); - }); - }); - }); + } + ))); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/corsHeaders.js b/tests/functional/aws-node-sdk/test/object/corsHeaders.js index 2647c7a95b..45d0e1a209 100644 --- a/tests/functional/aws-node-sdk/test/object/corsHeaders.js +++ b/tests/functional/aws-node-sdk/test/object/corsHeaders.js @@ -1,6 +1,37 @@ -const { S3 } = require('aws-sdk'); +const { S3Client, + ListObjectsCommand, + GetBucketAclCommand, + GetBucketCorsCommand, + GetBucketVersioningCommand, + GetBucketLocationCommand, + GetBucketWebsiteCommand, + ListMultipartUploadsCommand, + GetObjectCommand, + GetObjectAclCommand, + ListPartsCommand, + HeadBucketCommand, + HeadObjectCommand, + CreateBucketCommand, + PutBucketAclCommand, + PutBucketVersioningCommand, + PutBucketWebsiteCommand, + PutBucketCorsCommand, + PutObjectCommand, + PutObjectAclCommand, + CopyObjectCommand, + UploadPartCommand, + UploadPartCopyCommand, + CreateMultipartUploadCommand, + CompleteMultipartUploadCommand, + DeleteObjectsCommand, + DeleteBucketCommand, + DeleteBucketWebsiteCommand, + DeleteBucketCorsCommand, + DeleteObjectCommand, + AbortMultipartUploadCommand, + ListBucketsCommand } = require('@aws-sdk/client-s3'); +const { promisify } = require('util'); const assert = require('assert'); -const async = require('async'); const getConfig = require('../support/config'); const { methodRequest } = require('../../lib/utility/cors-util'); @@ -8,8 +39,10 @@ const { generateCorsParams } = require('../../lib/utility/cors-util'); const { WebsiteConfigTester } = require('../../lib/utility/website-util'); const { removeAllVersions } = require('../../lib/utility/versioning-util'); +const methodRequestPromise = promisify(methodRequest); + const config = getConfig('default', { signatureVersion: 'v4' }); -const s3 = new S3(config); +const s3 = new S3Client(config); const bucket = 'bucketcorsheadertest'; const objectKey = 'objectKeyName'; @@ -25,77 +58,77 @@ const defaultOptions = { const apiMethods = [ { description: 'GET bucket (list objects)', - action: s3.listObjects, + action: ListObjectsCommand, params: { Bucket: bucket }, }, { description: 'GET bucket ACL', - action: s3.getBucketAcl, + action: GetBucketAclCommand, params: { Bucket: bucket }, }, { description: 'GET bucket CORS', - action: s3.getBucketCors, + action: GetBucketCorsCommand, params: { Bucket: bucket }, }, { description: 'GET bucket versioning', - action: s3.getBucketVersioning, + action: GetBucketVersioningCommand, params: { Bucket: bucket }, }, { description: 'GET bucket location', - action: s3.getBucketLocation, + action: GetBucketLocationCommand, params: { Bucket: bucket }, }, { description: 'GET bucket website', - action: s3.getBucketWebsite, + action: GetBucketWebsiteCommand, params: { Bucket: bucket }, }, { description: 'GET bucket uploads (list multipart uploads)', - action: s3.listMultipartUploads, + action: ListMultipartUploadsCommand, params: { Bucket: bucket }, }, { description: 'GET object', - action: s3.getObject, + action: GetObjectCommand, params: { Bucket: bucket, Key: objectKey }, }, { description: 'GET object ACL', - action: s3.getObjectAcl, + action: GetObjectAclCommand, params: { Bucket: bucket, Key: objectKey }, }, { description: 'GET object uploadId (list multipart upload parts)', - action: s3.listParts, + action: ListPartsCommand, params: { Bucket: bucket, Key: objectKey, UploadId: 'testId' }, }, { description: 'HEAD bucket', - action: s3.headBucket, + action: HeadBucketCommand, params: { Bucket: bucket }, }, { description: 'HEAD object', - action: s3.headObject, + action: HeadObjectCommand, params: { Bucket: bucket, Key: objectKey }, }, { description: 'PUT bucket (create bucket)', - action: s3.createBucket, + action: CreateBucketCommand, params: { Bucket: bucket }, }, { description: 'PUT bucket ACL', - action: s3.putBucketAcl, + action: PutBucketAclCommand, params: { Bucket: bucket, ACL: 'private' }, }, { description: 'PUT bucket versioning', - action: s3.putBucketVersioning, + action: PutBucketVersioningCommand, params: { Bucket: bucket, VersioningConfiguration: { @@ -105,7 +138,7 @@ const apiMethods = [ }, { description: 'PUT bucket website', - action: s3.putBucketWebsite, + action: PutBucketWebsiteCommand, params: { Bucket: bucket, WebsiteConfiguration: { @@ -115,7 +148,7 @@ const apiMethods = [ }, { description: 'PUT bucket CORS', - action: s3.putBucketCors, + action: PutBucketCorsCommand, params: { Bucket: bucket, CORSConfiguration: { @@ -128,12 +161,12 @@ const apiMethods = [ }, { description: 'PUT object', - action: s3.putObject, + action: PutObjectCommand, params: { Bucket: bucket, Key: objectKey }, }, { description: 'PUT object ACL', - action: s3.putObjectAcl, + action: PutObjectAclCommand, params: { Bucket: bucket, Key: objectKey, @@ -142,16 +175,16 @@ const apiMethods = [ }, { description: 'PUT object copy (copy object)', - action: s3.copyObject, + action: CopyObjectCommand, params: { Bucket: bucket, - CopySource: `${bucket}/${objectKey}`, // 'sourceBucket/testSource', + CopySource: `${bucket}/${objectKey}`, Key: objectKey, }, }, { description: 'PUT object part (upload part)', - action: s3.uploadPart, + action: UploadPartCommand, params: { Bucket: bucket, Key: objectKey, @@ -161,10 +194,10 @@ const apiMethods = [ }, { description: 'PUT object part copy (upload part copy)', - action: s3.uploadPartCopy, + action: UploadPartCopyCommand, params: { Bucket: bucket, - CopySource: `${bucket}/${objectKey}`, // 'sourceBucket/testSource', + CopySource: `${bucket}/${objectKey}`, Key: objectKey, PartNumber: 1, UploadId: 'testId', @@ -172,17 +205,17 @@ const apiMethods = [ }, { description: 'POST uploads (create multipart upload)', - action: s3.createMultipartUpload, + action: CreateMultipartUploadCommand, params: { Bucket: bucket, Key: objectKey }, }, { description: 'POST uploadId (complete multipart upload)', - action: s3.completeMultipartUpload, + action: CompleteMultipartUploadCommand, params: { Bucket: bucket, Key: objectKey, UploadId: 'testId' }, }, { description: 'POST delete (multi object delete)', - action: s3.deleteObjects, + action: DeleteObjectsCommand, params: { Bucket: bucket, Delete: { @@ -194,27 +227,27 @@ const apiMethods = [ }, { description: 'DELETE bucket', - action: s3.deleteBucket, + action: DeleteBucketCommand, params: { Bucket: bucket }, }, { description: 'DELETE bucket website', - action: s3.deleteBucketWebsite, + action: DeleteBucketWebsiteCommand, params: { Bucket: bucket }, }, { description: 'DELETE bucket CORS', - action: s3.deleteBucketCors, + action: DeleteBucketCorsCommand, params: { Bucket: bucket }, }, { description: 'DELETE object', - action: s3.deleteObject, + action: DeleteObjectCommand, params: { Bucket: bucket, Key: objectKey }, }, { description: 'DELETE object uploadId (abort multipart upload)', - action: s3.abortMultipartUpload, + action: AbortMultipartUploadCommand, params: { Bucket: bucket, Key: objectKey, UploadId: 'testId' }, }, ]; @@ -229,86 +262,126 @@ function _waitForAWS(callback, err) { } } -function _checkHeaders(action, params, origin, expectedHeaders, callback) { - function _runAssertions(resHeaders, cb) { +async function _checkHeaders(action, params, origin, expectedHeaders) { + function _runAssertions(resHeaders) { if (expectedHeaders) { Object.keys(expectedHeaders).forEach(key => { - assert.deepEqual(resHeaders[key], expectedHeaders[key], - `error header: ${key}`); + assert.deepEqual(resHeaders[key], expectedHeaders[key], `error header: ${key}`); }); } else { - // if no headersResponse provided, should not have these headers - // in the request - ['access-control-allow-origin', - 'access-control-allow-methods', - 'access-control-allow-credentials', - 'vary'].forEach(key => { - assert.strictEqual(resHeaders[key], undefined, - `Error: ${key} should not have value`); - }); + // if no expectedHeaders provided, should not have these headers in the response + ['access-control-allow-origin', + 'access-control-allow-methods', + 'access-control-allow-credentials', + 'vary'].forEach(key => { + assert.strictEqual(resHeaders[key], undefined, `Error: ${key} should not have value`); + }); } - cb(); } - const method = action.bind(s3); - const request = method(params); - // modify underlying http request object created by aws sdk to add - // origin header - request.on('build', () => { - request.httpRequest.headers.origin = origin; - }); - request.on('success', response => { - const resHeaders = response.httpResponse.headers; - _runAssertions(resHeaders, () => { - if (response.data.UploadId) { - // abort multipart upload before deleting bucket in afterEach - return s3.abortMultipartUpload({ Bucket: bucket, Key: objectKey, - UploadId: response.data.UploadId }, callback); + + // Create a new S3 client for each request to avoid middleware conflicts + const testS3 = new S3Client(config); + let capturedHeaders = {}; + + // Add middleware to capture response headers (similar to AWS SDK v2's event approach) + testS3.middlewareStack.add( + next => async args => { + if (origin) { + if (!args.request.headers) { + // eslint-disable-next-line no-param-reassign + args.request.headers = {}; + } + // eslint-disable-next-line no-param-reassign + args.request.headers['origin'] = origin; } - return callback(); - }); - }); - // CORS headers should still be sent in case of errors as long as - // request matches CORS configuration - request.on('error', () => { - const resHeaders = request.response.httpResponse.headers; - _runAssertions(resHeaders, callback); - }); - request.send(); + + try { + const result = await next(args); + + // Capture response headers (equivalent to request.on('success')) + if (result.response && result.response.headers) { + capturedHeaders = result.response.headers; + } else if (result.output && result.output.$metadata && result.output.$metadata.httpHeaders) { + capturedHeaders = result.output.$metadata.httpHeaders; + } + + return result; + } catch (error) { + // Capture headers from error response (equivalent to request.on('error')) + if (error.$response && error.$response.headers) { + capturedHeaders = error.$response.headers; + } else if (error.$metadata && error.$metadata.httpHeaders) { + capturedHeaders = error.$metadata.httpHeaders; + } + throw error; + } + }, + { + step: 'finalizeRequest', + name: 'captureHeaders', + priority: 'high' + } + ); + + try { + // eslint-disable-next-line new-cap + const command = new action(params); + const response = await testS3.send(command); + + // Clean up multipart upload if needed (equivalent to the original cleanup logic) + if (response.UploadId) { + await testS3.send(new AbortMultipartUploadCommand({ + Bucket: bucket, + Key: objectKey, + UploadId: response.UploadId + })); + } + + _runAssertions(capturedHeaders); + + } catch { + // CORS headers should still be sent in case of errors as long as + // request matches CORS configuration + _runAssertions(capturedHeaders); + } } describe('Cross Origin Resource Sharing requests', () => { beforeEach(done => { - s3.createBucket({ Bucket: bucket, ACL: 'public-read-write' }, err => { - if (err) { - process.stdout.write(`Error in beforeEach ${err}`); - } - return _waitForAWS(done, err); + s3.send(new CreateBucketCommand({ + Bucket: bucket, + ACL: 'public-read-write' + })) + .then(() => _waitForAWS(done)) + .catch(err => { + process.stdout.write(`Error in beforeEach: ${err}\n`); + _waitForAWS(done, err); }); }); afterEach(done => { - s3.deleteBucket({ Bucket: bucket }, err => { - if (err && err.code !== 'NoSuchBucket') { - process.stdout.write(`Error in afterEach ${err}`); - return _waitForAWS(done, err); - } - return _waitForAWS(done); - }); + s3.send(new DeleteBucketCommand({ Bucket: bucket })) + .then(() => _waitForAWS(done)) + .catch(err => { + if (err.name !== 'NoSuchBucket') { + process.stdout.write(`Error in afterEach ${err}`); + return _waitForAWS(done, err); + } + return _waitForAWS(done); + }); }); describe('on non-existing bucket', () => { - it('should not respond to request with CORS headers, even ' + - 'if request was sent with Origin header', done => { - _checkHeaders(s3.listObjects, { Bucket: 'nonexistingbucket' }, - allowedOrigin, null, done); + it('should not respond to request with CORS headers, even if request was sent with Origin header', + async () => { + await _checkHeaders(ListObjectsCommand, { Bucket: 'nonexistingbucket' }, allowedOrigin, null); }); }); describe('on bucket without CORS configuration', () => { - it('should not respond to request with CORS headers, even ' + - 'if request was sent with Origin header', done => { - _checkHeaders(s3.listObjects, { Bucket: bucket }, - allowedOrigin, null, done); + it('should not respond to request with CORS headers,' + + ' even if request was sent with Origin header', async () => { + await _checkHeaders(ListObjectsCommand, { Bucket: bucket }, allowedOrigin, null); }); }); @@ -326,12 +399,14 @@ describe('Cross Origin Resource Sharing requests', () => { vary, }; - beforeEach(done => s3.putBucketCors(corsParams, done)); + beforeEach(async () => { + await s3.send(new PutBucketCorsCommand(corsParams)); + }); afterEach(done => { removeAllVersions({ Bucket: bucket }, err => { - if (err && err.code !== 'NoSuchKey' && - err.code !== 'NoSuchBucket') { + if (err && err.name !== 'NoSuchKey' && + err.name !== 'NoSuchBucket') { process.stdout.write(`Unexpected err in afterEach: ${err}`); return done(err); } @@ -340,69 +415,57 @@ describe('Cross Origin Resource Sharing requests', () => { }); describe('when request Origin/method match CORS configuration', () => { - it('should not respond with CORS headers to GET service (list ' + - 'buckets), even if Origin/method match CORS rule', done => { - // no bucket specified in this request - _checkHeaders(s3.listBuckets, {}, allowedOrigin, - null, done); + it('should not respond with CORS headers to GET service (list buckets), ' + + 'even if Origin/method match CORS rule', async () => { + await _checkHeaders(ListBucketsCommand, {}, allowedOrigin, null); }); it('should not respond with CORS headers after deleting bucket, ' + - 'even if Origin/method match CORS rule', done => { - s3.deleteBucket({ Bucket: bucket }, err => { - assert.strictEqual(err, null, `Unexpected err ${err}`); - _checkHeaders(s3.listObjects, { Bucket: bucket }, - allowedOrigin, null, done); - }); + 'even if Origin/method match CORS rule', async () => { + await s3.send(new DeleteBucketCommand({ Bucket: bucket })); + await _checkHeaders(ListObjectsCommand, { Bucket: bucket }, allowedOrigin, null); }); apiMethods.forEach(method => { - it(`should respond to ${method.description} with CORS ` + - 'headers (access-control-allow-origin, access-control-allow-' + - 'methods, access-control-allow-credentials and vary)', done => { - _checkHeaders(method.action, method.params, allowedOrigin, - expectedHeaders, done); + it(`should respond to ${method.description} with CORS headers (access-control-allow-origin, + access-control-allow-methods, access-control-allow-credentials and vary)`, async () => { + await _checkHeaders(method.action, method.params, allowedOrigin, expectedHeaders); }); }); }); describe('when request Origin does not match CORS rule', () => { apiMethods.forEach(method => { - it(`should not respond to ${method.description} with ` + - 'CORS headers', done => { - _checkHeaders(method.action, method.params, - notAllowedOrigin, null, done); + it(`should not respond to ${method.description} with CORS headers`, async () => { + await _checkHeaders(method.action, method.params, notAllowedOrigin, null); }); }); }); }); - describe('on bucket with CORS configuration: ' + - 'allow PUT method and one origin', () => { + describe('on bucket with CORS configuration: allow PUT method and one origin', () => { const corsParams = generateCorsParams(bucket, { allowedMethods: ['PUT'], allowedOrigins: [allowedOrigin], }); - beforeEach(done => { - s3.putBucketCors(corsParams, done); + beforeEach(async () => { + await s3.send(new PutBucketCorsCommand(corsParams)); }); - afterEach(done => { - s3.deleteBucketCors({ Bucket: bucket }, done); + afterEach(async () => { + await s3.send(new DeleteBucketCorsCommand({ Bucket: bucket })); }); - it('when request method does not match CORS rule ' + - 'should not respond with CORS headers', done => { - _checkHeaders(s3.listObjects, { Bucket: bucket }, - allowedOrigin, null, done); + it('when request method does not match CORS rule should not respond with CORS headers', async () => { + await _checkHeaders(ListObjectsCommand, { Bucket: bucket }, allowedOrigin, null); }); }); describe('on bucket with CORS configuration and website configuration', - () => { + () => { const bucket = process.env.AWS_ON_AIR ? 'awsbucketwebsitetester' : - 'bucketwebsitetester'; + 'bucketwebsitetester'; const corsParams = generateCorsParams(bucket, { allowedMethods: ['GET', 'HEAD'], allowedOrigins: [allowedOrigin], @@ -418,82 +481,61 @@ describe('Cross Origin Resource Sharing requests', () => { const redirect = { HostName: 'www.google.com' }; webConfig.addRoutingRule(redirect, condition); - beforeEach(done => - async.series([ - next => s3.createBucket({ - Bucket: bucket, - ACL: 'public-read', - }, next), - next => s3.putBucketCors(corsParams, next), - next => s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, next), - next => s3.putObject({ - Bucket: bucket, - Key: 'index.html', - ACL: 'public-read', - }, next), - ], err => { - assert.strictEqual(err, null, - `Unexpected err ${err} in beforeEach`); - done(err); - }) - ); - - afterEach(done => - s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, err => { - assert.strictEqual(err, null, - `Unexpected err ${err} in afterEach`); - s3.deleteBucket({ Bucket: bucket }, err => { - if (err) { - process.stdout.write(`Error in afterEach ${err}`); - return _waitForAWS(done, err); - } - return _waitForAWS(done); - }); - }) - ); + beforeEach(async () => { + await s3.send(new CreateBucketCommand({ Bucket: bucket, ACL: 'public-read' })); + await s3.send(new PutBucketCorsCommand(corsParams)); + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, WebsiteConfiguration: webConfig })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'index.html', + ACL: 'public-read', + Body: 'test content' })); + }); - it('should respond with CORS headers at website endpoint (GET)', - done => { + afterEach(done => { + s3.send(new DeleteObjectCommand({ + Bucket: bucket, + Key: 'index.html' + })) + .then(() => s3.send(new DeleteBucketCommand({ Bucket: bucket }))) + .then(() => _waitForAWS(done)) + .catch(err => { + process.stdout.write(`Error in afterEach: ${err}\n`); + _waitForAWS(done, err); + }); + }); + + it('should respond with CORS headers at website endpoint (GET)', async () => { const headers = { Origin: allowedOrigin }; - methodRequest({ method: 'GET', bucket, headers, headersResponse, - code: 200, isWebsite: true }, done); + await methodRequestPromise({ method: 'GET', bucket, + headers, headersResponse, code: 200, isWebsite: true }); }); - it('should respond with CORS headers at website endpoint (GET) ' + - 'even in case of error', - done => { + it('should respond with CORS headers at website endpoint (GET) even in case of error', async () => { const headers = { Origin: allowedOrigin }; - methodRequest({ method: 'GET', bucket, objectKey: 'test', - headers, headersResponse, code: 404, isWebsite: true }, done); + await methodRequestPromise({ method: 'GET', bucket, objectKey: 'test', + headers, headersResponse, code: 404, isWebsite: true }); }); - it('should respond with CORS headers at website endpoint (GET) ' + - 'even in case of redirect', - done => { + it('should respond with CORS headers at website endpoint (GET) even in case of redirect', async () => { const headers = { Origin: allowedOrigin }; - methodRequest({ method: 'GET', bucket, objectKey: 'redirect', - headers, headersResponse, code: 301, isWebsite: true }, done); + await methodRequestPromise({ method: 'GET', bucket, objectKey: 'redirect', + headers, headersResponse, code: 301, isWebsite: true }); }); - it('should respond with CORS headers at website endpoint (HEAD)', - done => { + it('should respond with CORS headers at website endpoint (HEAD)', async () => { const headers = { Origin: allowedOrigin }; - methodRequest({ method: 'HEAD', bucket, headers, headersResponse, - code: 200, isWebsite: true }, done); + await methodRequestPromise({ method: 'HEAD', bucket, headers, headersResponse, + code: 200, isWebsite: true }); }); }); - describe('on bucket with additional cors configuration', - () => { - afterEach(done => { - s3.deleteBucketCors({ Bucket: bucket }, done); + describe('on bucket with additional cors configuration', () => { + afterEach(async () => { + await s3.send(new DeleteBucketCorsCommand({ Bucket: bucket })); }); describe('cors configuration : AllowedHeaders', () => { const corsParams = generateCorsParams(bucket, defaultOptions); - corsParams.CORSConfiguration.CORSRules[0] - .AllowedHeaders = ['Content-Type']; + corsParams.CORSConfiguration.CORSRules[0].AllowedHeaders = ['Content-Type']; const headersResponse = { 'access-control-allow-origin': allowedOrigin, @@ -502,33 +544,30 @@ describe('Cross Origin Resource Sharing requests', () => { vary, }; - beforeEach(done => { - s3.putBucketCors(corsParams, done); + beforeEach(async () => { + await s3.send(new PutBucketCorsCommand(corsParams)); }); - it('should not return access-control-allow-headers response ' + - 'header even if request matches CORS rule and other access-' + - 'control headers are returned', done => { + it('should not return access-control-allow-headers response header ' + + 'even if request matches CORS rule and other access-control headers are returned', async () => { const headers = { 'Origin': allowedOrigin, 'Content-Type': 'testvalue', }; const headersOmitted = ['access-control-allow-headers']; - methodRequest({ method: 'GET', bucket, headers, headersResponse, - headersOmitted, code: 200 }, done); + await methodRequestPromise({ method: 'GET', bucket, headers, headersResponse, + headersOmitted, code: 200 }); }); - it('Request with matching Origin/method but additional headers ' + - 'that violate CORS rule:\n\t should still respond with access-' + - 'control headers (headers are only checked in preflight requests)', - done => { + it('Request with matching Origin/method but additional headers that violate CORS rule:\n\t should still ' + + 'respond with access-control headers (headers are only checked in preflight requests)', async () => { const headers = { Origin: allowedOrigin, Test: 'test', Expires: 86400, }; - methodRequest({ method: 'GET', bucket, headers, headersResponse, - code: 200 }, done); + await methodRequestPromise({ method: 'GET', bucket, headers, + headersResponse, code: 200 }); }); }); @@ -546,15 +585,13 @@ describe('Cross Origin Resource Sharing requests', () => { ].forEach(elem => { describe(`cors configuration : ${elem.name}`, () => { const corsParams = generateCorsParams(bucket, defaultOptions); - corsParams.CORSConfiguration.CORSRules[0][elem.name] = - elem.testValue; + corsParams.CORSConfiguration.CORSRules[0][elem.name] = elem.testValue; - beforeEach(done => { - s3.putBucketCors(corsParams, done); + beforeEach(async () => { + await s3.send(new PutBucketCorsCommand(corsParams)); }); - it(`should respond with ${elem.header} header ` + - 'if request matches CORS rule', done => { + it(`should respond with ${elem.header} header if request matches CORS rule`, async () => { const headers = { Origin: allowedOrigin }; const headersResponse = { 'access-control-allow-origin': allowedOrigin, @@ -562,11 +599,8 @@ describe('Cross Origin Resource Sharing requests', () => { 'access-control-allow-credentials': 'true', vary, }; - headersResponse[elem.header] = - Array.isArray(elem.testValue) ? elem.testValue[0] : - elem.testValue; - methodRequest({ method: 'GET', bucket, headers, - headersResponse, code: 200 }, done); + headersResponse[elem.header] = Array.isArray(elem.testValue) ? elem.testValue[0] : elem.testValue; + await methodRequestPromise({ method: 'GET', bucket, headers, headersResponse, code: 200 }); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/corsPreflight.js b/tests/functional/aws-node-sdk/test/object/corsPreflight.js index 79ca135890..41109f6d3c 100644 --- a/tests/functional/aws-node-sdk/test/object/corsPreflight.js +++ b/tests/functional/aws-node-sdk/test/object/corsPreflight.js @@ -1,10 +1,18 @@ -const { S3 } = require('aws-sdk'); +const { + S3Client, + CreateBucketCommand, + DeleteBucketCommand, + PutBucketCorsCommand, + DeleteBucketCorsCommand, + PutObjectCommand, + DeleteObjectCommand, +} = require('@aws-sdk/client-s3'); const getConfig = require('../support/config'); const { methodRequest } = require('../../lib/utility/cors-util'); -const config = getConfig('default', { signatureVersion: 'v4' }); -const s3 = new S3(config); +const config = getConfig('default'); +const s3 = new S3Client(config); const bucket = 'bucketcorstester'; @@ -53,14 +61,14 @@ describe('Preflight CORS request on non-existing bucket', () => { describe('Preflight CORS request with existing bucket', () => { beforeEach(done => { - s3.createBucket({ Bucket: bucket, ACL: 'public-read' }, err => { - _waitForAWS(done, err); - }); + s3.send(new CreateBucketCommand({ Bucket: bucket, ACL: 'public-read' })) + .then(() => _waitForAWS(done)) + .catch(err => _waitForAWS(done, err)); }); afterEach(done => { - s3.deleteBucket({ Bucket: bucket }, err => { - _waitForAWS(done, err); - }); + s3.send(new DeleteBucketCommand({ Bucket: bucket })) + .then(() => _waitForAWS(done)) + .catch(err => _waitForAWS(done, err)); }); it('should allow GET on bucket without cors configuration even if ' + @@ -107,11 +115,15 @@ describe('Preflight CORS request with existing bucket', () => { }, }; beforeEach(done => { - s3.putBucketCors(corsParams, done); + s3.send(new PutBucketCorsCommand(corsParams)) + .then(() => done()) + .catch(done); }); afterEach(done => { - s3.deleteBucketCors({ Bucket: bucket }, done); + s3.send(new DeleteBucketCorsCommand({ Bucket: bucket })) + .then(() => done()) + .catch(done); }); methods.forEach(method => { @@ -171,11 +183,15 @@ describe('Preflight CORS request with existing bucket', () => { }, }; beforeEach(done => { - s3.putBucketCors(corsParams, done); + s3.send(new PutBucketCorsCommand(corsParams)) + .then(() => done()) + .catch(done); }); afterEach(done => { - s3.deleteBucketCors({ Bucket: bucket }, done); + s3.send(new DeleteBucketCorsCommand({ Bucket: bucket })) + .then(() => done()) + .catch(done); }); it('should respond with 200 and access control headers to OPTIONS ' + @@ -242,11 +258,15 @@ describe('Preflight CORS request with existing bucket', () => { describe(`CORS allows method "${allowedMethod}" and allows all origins`, () => { beforeEach(done => { - s3.putBucketCors(corsParams, done); + s3.send(new PutBucketCorsCommand(corsParams)) + .then(() => done()) + .catch(done); }); afterEach(done => { - s3.deleteBucketCors({ Bucket: bucket }, done); + s3.send(new DeleteBucketCorsCommand({ Bucket: bucket })) + .then(() => done()) + .catch(done); }); it('should respond with 200 and access control headers to ' + @@ -308,11 +328,15 @@ describe('Preflight CORS request with existing bucket', () => { describe(`CORS allows method GET and origin "${origin}"`, () => { beforeEach(done => { - s3.putBucketCors(corsParams, done); + s3.send(new PutBucketCorsCommand(corsParams)) + .then(() => done()) + .catch(done); }); afterEach(done => { - s3.deleteBucketCors({ Bucket: bucket }, done); + s3.send(new DeleteBucketCorsCommand({ Bucket: bucket })) + .then(() => done()) + .catch(done); }); [originWithoutWildcard, originReplaceWildcard] @@ -390,11 +414,15 @@ describe('Preflight CORS request with existing bucket', () => { }, }; beforeEach(done => { - s3.putBucketCors(corsParams, done); + s3.send(new PutBucketCorsCommand(corsParams)) + .then(() => done()) + .catch(done); }); afterEach(done => { - s3.deleteBucketCors({ Bucket: bucket }, done); + s3.send(new DeleteBucketCorsCommand({ Bucket: bucket })) + .then(() => done()) + .catch(done); }); it('if OPTIONS request matches rule with multiple origins, response ' + @@ -468,11 +496,15 @@ describe('Preflight CORS request with existing bucket', () => { }, }; beforeEach(done => { - s3.putBucketCors(corsParams, done); + s3.send(new PutBucketCorsCommand(corsParams)) + .then(() => done()) + .catch(done); }); afterEach(done => { - s3.deleteBucketCors({ Bucket: bucket }, done); + s3.send(new DeleteBucketCorsCommand({ Bucket: bucket })) + .then(() => done()) + .catch(done); }); it('should respond with 200 and access control headers to OPTIONS ' + @@ -554,11 +586,15 @@ describe('Preflight CORS request with existing bucket', () => { }, }; beforeEach(done => { - s3.putBucketCors(corsParams, done); + s3.send(new PutBucketCorsCommand(corsParams)) + .then(() => done()) + .catch(done); }); afterEach(done => { - s3.deleteBucketCors({ Bucket: bucket }, done); + s3.send(new DeleteBucketCorsCommand({ Bucket: bucket })) + .then(() => done()) + .catch(done); }); it('should return request access-control-request-headers value, ' + @@ -657,23 +693,23 @@ describe('Preflight CORS request with existing bucket', () => { }; const objectKey = 'testobject'; beforeEach(done => { - s3.putObject({ Key: objectKey, Bucket: bucket }, err => { - if (err) { - process.stdout.write(`err in beforeEach ${err}`); - done(err); - } - s3.putBucketCors(corsParams, done); - }); + s3.send(new PutObjectCommand({ Key: objectKey, Bucket: bucket })) + .then(() => s3.send(new PutBucketCorsCommand(corsParams))) + .then(() => done()) + .catch(done); }); afterEach(done => { - s3.deleteBucketCors({ Bucket: bucket }, err => { - if (err) { + s3.send(new DeleteBucketCorsCommand({ Bucket: bucket })) + .then(() => s3.send(new DeleteObjectCommand({ + Key: objectKey, + Bucket: bucket, + }))) + .then(() => done()) + .catch(err => { process.stdout.write(`err in afterEach ${err}`); done(err); - } - s3.deleteObject({ Key: objectKey, Bucket: bucket }, done); - }); + }); }); it('should respond with 200 and access control headers to OPTIONS ' + @@ -723,11 +759,15 @@ describe('Preflight CORS request with existing bucket', () => { }, }; beforeEach(done => { - s3.putBucketCors(corsParams, done); + s3.send(new PutBucketCorsCommand(corsParams)) + .then(() => done()) + .catch(done); }); afterEach(done => { - s3.deleteBucketCors({ Bucket: bucket }, done); + s3.send(new DeleteBucketCorsCommand({ Bucket: bucket })) + .then(() => done()) + .catch(done); }); it('with fake auth credentials: should respond with 200 and access ' + @@ -785,11 +825,15 @@ describe('Preflight CORS request with existing bucket', () => { }, }; beforeEach(done => { - s3.putBucketCors(corsParams, done); + s3.send(new PutBucketCorsCommand(corsParams)) + .then(() => done()) + .catch(done); }); afterEach(done => { - s3.deleteBucketCors({ Bucket: bucket }, done); + s3.send(new DeleteBucketCorsCommand({ Bucket: bucket })) + .then(() => done()) + .catch(done); }); it('if OPTIONS request matches CORS rule with ExposeHeader\'s, ' + @@ -829,11 +873,15 @@ describe('Preflight CORS request with existing bucket', () => { }, }; beforeEach(done => { - s3.putBucketCors(corsParams, done); + s3.send(new PutBucketCorsCommand(corsParams)) + .then(() => done()) + .catch(done); }); afterEach(done => { - s3.deleteBucketCors({ Bucket: bucket }, done); + s3.send(new DeleteBucketCorsCommand({ Bucket: bucket })) + .then(() => done()) + .catch(done); }); it('if OPTIONS request matches CORS rule with max age seconds, ' + diff --git a/tests/functional/aws-node-sdk/test/object/deleteMpu.js b/tests/functional/aws-node-sdk/test/object/deleteMpu.js index 60978a3001..60fdad6f13 100644 --- a/tests/functional/aws-node-sdk/test/object/deleteMpu.js +++ b/tests/functional/aws-node-sdk/test/object/deleteMpu.js @@ -1,4 +1,10 @@ const assert = require('assert'); +const { + CreateBucketCommand, + AbortMultipartUploadCommand, + CreateMultipartUploadCommand, + UploadPartCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -19,62 +25,76 @@ const confLocations = [ describe('DELETE multipart', () => { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); - const s3 = bucketUtil.s3; + const s3Client = bucketUtil.s3; function _assertStatusCode(uploadId, statusCodeExpected, callback) { - const request = - s3.abortMultipartUpload({ Bucket: bucket, Key: key, - UploadId: uploadId }, err => { - const statusCode = - request.response.httpResponse.statusCode; - assert.strictEqual(statusCode, statusCodeExpected, - `Found unexpected statusCode ${statusCode}`); - if (statusCode === 204) { - assert.strictEqual(err, null, - `Expected no err but found ${err}`); - return callback(err); - } - return callback(); + const command = new AbortMultipartUploadCommand({ + Bucket: bucket, + Key: key, + UploadId: uploadId, }); + + s3Client.send(command) + .then(response => { + const statusCode = + response?.$metadata?.httpStatusCode; + assert.strictEqual(statusCode, statusCodeExpected, + `Found unexpected statusCode ${statusCode}`); + return callback(); + }) + .catch(err => { + const statusCode = err?.$metadata?.httpStatusCode; + if (statusCode) { + assert.strictEqual(statusCode, statusCodeExpected, + `Found unexpected statusCode ${statusCode}`); + } + if (statusCodeExpected === 204) { + return callback(err); + } + return callback(); + }); } it('on bucket that does not exist: should return NoSuchBucket', done => { const uploadId = 'nonexistinguploadid'; - s3.abortMultipartUpload({ Bucket: bucket, Key: key, - UploadId: uploadId }, err => { - assert.notEqual(err, null, - 'Expected NoSuchBucket but found no err'); - assert.strictEqual(err.code, 'NoSuchBucket'); - done(); + const command = new AbortMultipartUploadCommand({ + Bucket: bucket, + Key: key, + UploadId: uploadId, }); + + s3Client.send(command) + .then(() => { + done(new Error('Expected NoSuchBucket but request succeeded')); + }) + .catch(err => { + assert.notEqual(err, null, + 'Expected NoSuchBucket but found no err'); + assert.strictEqual(err.name, 'NoSuchBucket'); + done(); + }); }); + confLocations.forEach(confLocation => { confLocation.describe('on existing bucket with ' + `${confLocation.name}`, () => { - beforeEach(() => - s3.createBucket({ Bucket: bucket, + beforeEach(async () => { + const command = new CreateBucketCommand({ + Bucket: bucket, CreateBucketConfiguration: { LocationConstraint: confLocation.location, - } }).promise() - .catch(err => { - process.stdout.write(`Error in beforeEach: ${err}\n`); - throw err; - }) - ); - - afterEach(() => { - process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucket) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; + }, }); + await s3Client.send(command); + }); + + afterEach(async () => { + process.stdout.write('Emptying bucket\n'); + await bucketUtil.empty(bucket); + process.stdout.write('Deleting bucket\n'); + await bucketUtil.deleteOne(bucket); }); itSkipIfAWS(`should return ${confLocation.statusCode} if ` + @@ -88,25 +108,25 @@ describe('DELETE multipart', () => { () => { let uploadId; - beforeEach(() => - s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - }).promise() - .then(res => { - uploadId = res.UploadId; - return s3.uploadPart({ + beforeEach(async () => { + const createCommand = new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: key, + }); + const createResponse = await s3Client.send(createCommand); + uploadId = createResponse.UploadId; + const uploadCommand = new UploadPartCommand({ Bucket: bucket, Key: key, PartNumber: 1, UploadId: uploadId, + Body: Buffer.from('test data'), }); - }) - ); + await s3Client.send(uploadCommand); + }); it('should return 204 for abortMultipartUpload', done => { - _assertStatusCode(uploadId, 204, - done); + _assertStatusCode(uploadId, 204, done); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/deleteObjTagging.js b/tests/functional/aws-node-sdk/test/object/deleteObjTagging.js index 6870de1a16..d8839a5760 100644 --- a/tests/functional/aws-node-sdk/test/object/deleteObjTagging.js +++ b/tests/functional/aws-node-sdk/test/object/deleteObjTagging.js @@ -1,5 +1,13 @@ const assert = require('assert'); -const async = require('async'); +const { + CreateBucketCommand, + PutObjectCommand, + PutObjectTaggingCommand, + DeleteObjectTaggingCommand, + PutObjectAclCommand, + PutBucketAclCommand, + GetObjectTaggingCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -21,8 +29,8 @@ const taggingConfig = { TagSet: [ function _checkError(err, code, statusCode) { assert(err, 'Expected error but found none'); - assert.strictEqual(err.code, code); - assert.strictEqual(err.statusCode, statusCode); + assert.strictEqual(err.name, code); + assert.strictEqual(err.$metadata.httpStatusCode, statusCode); } describe('DELETE object taggings', () => { @@ -32,112 +40,133 @@ describe('DELETE object taggings', () => { const otherAccountBucketUtility = new BucketUtility('lisa', {}); const otherAccountS3 = otherAccountBucketUtility.s3; - beforeEach(done => s3.createBucket({ Bucket: bucketName }, err => { - if (err) { - return done(err); - } - return s3.putObject({ Bucket: bucketName, Key: objectName }, done); - })); + beforeEach(async () => { + await s3.send(new CreateBucketCommand({ Bucket: bucketName })); + await s3.send(new PutObjectCommand({ Bucket: bucketName, Key: objectName })); + }); - afterEach(() => { + afterEach(async () => { process.stdout.write('Emptying bucket'); - return bucketUtil.empty(bucketName) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(bucketName); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + await bucketUtil.empty(bucketName); + process.stdout.write('Deleting bucket'); + await bucketUtil.deleteOne(bucketName); }); - it('should delete tag set', done => { - s3.putObjectTagging({ + it('should delete tag set', async () => { + await s3.send(new PutObjectTaggingCommand({ Bucket: bucketName, Key: objectName, Tagging: taggingConfig, - }, err => { - assert.ifError(err, `putObjectTagging error: ${err}`); - s3.deleteObjectTagging({ Bucket: bucketName, Key: objectName }, - (err, data) => { - assert.ifError(err, `Found unexpected err ${err}`); - assert.strictEqual(Object.keys(data).length, 0); - done(); - }); - }); + })); + await s3.send(new DeleteObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName + })); + const dataGet = await s3.send(new GetObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + })); + assert.strictEqual(dataGet.TagSet.length, 0); }); - it('should delete a non-existing tag set', done => { - s3.deleteObjectTagging({ Bucket: bucketName, Key: objectName }, - (err, data) => { - assert.ifError(err, `Found unexpected err ${err}`); - assert.strictEqual(Object.keys(data).length, 0); - done(); - }); + it('should delete a non-existing tag set', async () => { + await s3.send(new DeleteObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName + })); + const dataGet = await s3.send(new GetObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName + })); + assert.strictEqual(dataGet.TagSet.length, 0); }); it('should return NoSuchKey deleting tag set to a non-existing object', - done => { - s3.deleteObjectTagging({ - Bucket: bucketName, - Key: 'nonexisting', - }, err => { + async () => { + try { + await s3.send(new DeleteObjectTaggingCommand({ + Bucket: bucketName, + Key: 'nonexisting', + })); + assert.fail('Expected NoSuchKey error'); + } catch (err) { _checkError(err, 'NoSuchKey', 404); - done(); - }); + } }); + it('should return 403 AccessDenied deleting tag set with another ' + - 'account', done => { - otherAccountS3.deleteObjectTagging({ Bucket: bucketName, Key: - objectName }, err => { + 'account', async () => { + try { + await otherAccountS3.send(new DeleteObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName + })); + assert.fail('Expected AccessDenied error'); + } catch (err) { _checkError(err, 'AccessDenied', 403); - done(); - }); + } }); it('should return 403 AccessDenied deleting tag set with a different ' + - 'account to an object with ACL "public-read-write"', - done => { - s3.putObjectAcl({ Bucket: bucketName, Key: objectName, - ACL: 'public-read-write' }, err => { - if (err) { - return done(err); - } - return otherAccountS3.deleteObjectTagging({ Bucket: bucketName, - Key: objectName }, err => { - _checkError(err, 'AccessDenied', 403); - done(); - }); - }); + 'account to an object with ACL "public-read-write"', + async () => { + await s3.send(new PutObjectAclCommand({ + Bucket: bucketName, + Key: objectName, + ACL: 'public-read-write' + })); + + try { + await otherAccountS3.send(new DeleteObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName + })); + assert.fail('Expected AccessDenied error'); + } catch (err) { + _checkError(err, 'AccessDenied', 403); + } }); - it('should return 403 AccessDenied deleting tag set to an object' + - ' in a bucket created with a different account', - done => { - async.waterfall([ - next => s3.putBucketAcl({ Bucket: bucketName, ACL: - 'public-read-write' }, err => next(err)), - next => otherAccountS3.putObject({ Bucket: bucketName, Key: - objectNameAcl }, err => next(err)), - next => otherAccountS3.deleteObjectTagging({ Bucket: bucketName, - Key: objectNameAcl }, err => next(err)), - ], err => { + it('should return 403 AccessDenied deleting tag set to an object '+ + ' in a bucket created with a different account', + async () => { + await s3.send(new PutBucketAclCommand({ + Bucket: bucketName, + ACL: 'public-read-write' + })); + + await otherAccountS3.send(new PutObjectCommand({ + Bucket: bucketName, + Key: objectNameAcl + })); + + try { + await otherAccountS3.send(new DeleteObjectTaggingCommand({ + Bucket: bucketName, + Key: objectNameAcl + })); + assert.fail('Expected AccessDenied error'); + } catch (err) { _checkError(err, 'AccessDenied', 403); - done(); - }); + } }); - it('should delete tag set to an object in a bucket created with same ' + - 'account even though object put by other account', done => { - async.waterfall([ - next => s3.putBucketAcl({ Bucket: bucketName, ACL: - 'public-read-write' }, err => next(err)), - next => otherAccountS3.putObject({ Bucket: bucketName, Key: - objectNameAcl }, err => next(err)), - next => s3.deleteObjectTagging({ Bucket: bucketName, - Key: objectNameAcl }, err => next(err)), - ], done); + it('should delete tag set to an object in a bucket created with '+ + 'same account even though object put by other account', async () => { + await s3.send(new PutBucketAclCommand({ + Bucket: bucketName, + ACL: 'public-read-write' + })); + + await otherAccountS3.send(new PutObjectCommand({ + Bucket: bucketName, + Key: objectNameAcl + })); + + await s3.send(new DeleteObjectTaggingCommand({ + Bucket: bucketName, + Key: objectNameAcl + })); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/deleteObject.js b/tests/functional/aws-node-sdk/test/object/deleteObject.js index 073f098fde..48181e2682 100644 --- a/tests/functional/aws-node-sdk/test/object/deleteObject.js +++ b/tests/functional/aws-node-sdk/test/object/deleteObject.js @@ -1,9 +1,20 @@ const assert = require('assert'); const moment = require('moment'); +const { + CreateBucketCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + CompleteMultipartUploadCommand, + DeleteObjectCommand, + PutObjectCommand, + PutObjectRetentionCommand, + PutObjectLegalHoldCommand, + PutObjectLockConfigurationCommand, + HeadObjectCommand +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); const changeObjectLock = require('../../../../utilities/objectLock-util'); - const objectName = 'key'; const objectNameTwo = 'secondkey'; @@ -19,81 +30,72 @@ describe('DELETE object', () => { describe('with multipart upload', () => { const bucketName = 'testdeletempu'; - before(() => { - process.stdout.write('creating bucket\n'); - return s3.createBucket({ Bucket: bucketName }).promise() - .then(() => { + before(async () => { + try { + process.stdout.write('creating bucket\n'); + await s3.send(new CreateBucketCommand({ Bucket: bucketName })); + process.stdout.write('initiating multipart upload\n'); - return s3.createMultipartUpload({ + const createRes = await s3.send(new CreateMultipartUploadCommand({ Bucket: bucketName, Key: objectName, - }).promise(); - }) - .then(res => { + })); + process.stdout.write('uploading parts\n'); - uploadId = res.UploadId; + uploadId = createRes.UploadId; const uploads = []; for (let i = 1; i <= 3; i++) { uploads.push( - s3.uploadPart({ + s3.send(new UploadPartCommand({ Bucket: bucketName, Key: objectName, PartNumber: i, Body: testfile, UploadId: uploadId, - }).promise() + })) ); } - return Promise.all(uploads); - }) - .catch(err => { - process.stdout.write(`Error with uploadPart ${err}\n`); - throw err; - }) - .then(res => { - process.stdout.write('about to complete multipart ' + - 'upload\n'); - return s3.completeMultipartUpload({ + const uploadResults = await Promise.all(uploads); + + process.stdout.write('about to complete multipart upload\n'); + await s3.send(new CompleteMultipartUploadCommand({ Bucket: bucketName, Key: objectName, UploadId: uploadId, MultipartUpload: { Parts: [ - { ETag: res[0].ETag, PartNumber: 1 }, - { ETag: res[1].ETag, PartNumber: 2 }, - { ETag: res[2].ETag, PartNumber: 3 }, + { ETag: uploadResults[0].ETag, PartNumber: 1 }, + { ETag: uploadResults[1].ETag, PartNumber: 2 }, + { ETag: uploadResults[2].ETag, PartNumber: 3 }, ], }, - }).promise(); - }) - .catch(err => { - process.stdout.write('completeMultipartUpload error: ' + - `${err}\n`); + })); + } catch (err) { + process.stdout.write(`Error in before: ${err}\n`); throw err; - }); + } }); - after(() => { - process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucketName) - .then(() => { + after(async () => { + try { + process.stdout.write('Emptying bucket\n'); + await bucketUtil.empty(bucketName); process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucketName); - }) - .catch(err => { + await bucketUtil.deleteOne(bucketName); + } catch (err) { process.stdout.write('Error in after\n'); throw err; - }); + } }); - it('should delete a object uploaded in parts successfully', - done => { - s3.deleteObject({ Bucket: bucketName, Key: objectName }, - err => { - assert.strictEqual(err, null, - `Expected success, got error ${JSON.stringify(err)}`); - done(); - }); + it('should delete a object uploaded in parts successfully', done => { + s3.send(new DeleteObjectCommand({ Bucket: bucketName, Key: objectName })) + .then(() => { + done(); + }) + .catch(err => { + assert.fail(`Expected success, got error ${JSON.stringify(err)}`); + }); }); }); @@ -101,127 +103,112 @@ describe('DELETE object', () => { const bucketName = 'testdeleteobjectlockbucket'; let versionIdOne; let versionIdTwo; - const retainDate = moment().add(10, 'days').toISOString(); - before(() => { - process.stdout.write('creating bucket\n'); - return s3.createBucket({ - Bucket: bucketName, - ObjectLockEnabledForBucket: true, - }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket ${err}\n`); - throw err; - }) - .then(() => { + const retainDate = moment().add(10, 'days'); + + before(async () => { + try { + process.stdout.write('creating bucket\n'); + await s3.send(new CreateBucketCommand({ + Bucket: bucketName, + ObjectLockEnabledForBucket: true, + })); + process.stdout.write('putting object\n'); - return s3.putObject({ + const res1 = await s3.send(new PutObjectCommand({ Bucket: bucketName, Key: objectName, - }).promise(); - }) - .catch(err => { - process.stdout.write('Error putting object'); - throw err; - }) - .then(res => { - versionIdOne = res.VersionId; + })); + versionIdOne = res1.VersionId; + process.stdout.write('putting object retention\n'); - return s3.putObjectRetention({ + await s3.send(new PutObjectRetentionCommand({ Bucket: bucketName, Key: objectName, Retention: { Mode: 'GOVERNANCE', RetainUntilDate: retainDate, }, - }).promise(); - }) - .catch(err => { - process.stdout.write('Err putting object retention\n'); - throw err; - }) - .then(() => { + })); + process.stdout.write('putting object\n'); - return s3.putObject({ + const res2 = await s3.send(new PutObjectCommand({ Bucket: bucketName, Key: objectNameTwo, - }).promise(); - }) - .catch(err => { - process.stdout.write(('Err putting second object\n')); - throw err; - }) - .then(res => { - versionIdTwo = res.VersionId; + })); + versionIdTwo = res2.VersionId; + process.stdout.write('putting object legal hold\n'); - return s3.putObjectLegalHold({ + await s3.send(new PutObjectLegalHoldCommand({ Bucket: bucketName, Key: objectNameTwo, LegalHold: { Status: 'ON', }, - }).promise(); - }) - .catch(err => { - process.stdout.write('Err putting object legal hold\n'); + })); + } catch (err) { + process.stdout.write(`Error in before: ${err}\n`); throw err; - }); + } }); - after(() => { - process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucketName) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucketName); - }) - .catch(err => { - process.stdout.write('Error in after\n'); - throw err; - }); + after(async () => { + await bucketUtil.empty(bucketName, true); + await bucketUtil.deleteOne(bucketName); }); it('should put delete marker if no version id specified', done => { - s3.deleteObject({ + s3.send(new DeleteObjectCommand({ Bucket: bucketName, Key: objectName, - }, err => { - assert.ifError(err); - done(); - }); + })) + .then(() => { + done(); + }) + .catch(err => { + assert.ifError(err); + done(); + }); }); - it('should not delete object version locked with object ' + - 'retention', done => { - s3.deleteObject({ + it('should not delete object version locked with object retention', done => { + s3.send(new DeleteObjectCommand({ Bucket: bucketName, Key: objectName, VersionId: versionIdOne, - }, err => { - assert.strictEqual(err.code, 'AccessDenied'); - done(); - }); + })) + .then(() => { + assert.fail('Should have failed'); + }) + .catch(err => { + assert.strictEqual(err.name, 'AccessDenied'); + done(); + }); }); - it('should delete locked object version with GOVERNANCE ' + - 'retention mode and correct header', done => { - s3.deleteObject({ + it('should delete locked object version with GOVERNANCE retention mode and correct header', done => { + s3.send(new DeleteObjectCommand({ Bucket: bucketName, Key: objectName, VersionId: versionIdOne, BypassGovernanceRetention: true, - }, err => { - assert.ifError(err); - done(); - }); + })) + .then(() => { + done(); + }) + .catch(err => { + assert.ifError(err); + done(); + }); }); it('should not delete object locked with legal hold', done => { - s3.deleteObject({ + s3.send(new DeleteObjectCommand({ Bucket: bucketName, Key: objectNameTwo, VersionId: versionIdTwo, - }, err => { - assert.strictEqual(err.code, 'AccessDenied'); + })) + .catch(err => { + assert.strictEqual(err.name, 'AccessDenied'); changeObjectLock( [{ bucket: bucketName, @@ -236,93 +223,80 @@ describe('DELETE object', () => { const bucketName = 'testdeletelocklegalholdbucket'; const objectName = 'key'; let versionId; - before(() => { - process.stdout.write('creating bucket\n'); - return s3.createBucket({ - Bucket: bucketName, - ObjectLockEnabledForBucket: true, - }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket ${err}\n`); - throw err; - }) - .then(() => { - process.stdout.write('putting object lock configuration\n'); - return s3.putObjectLockConfiguration({ - Bucket: bucketName, - ObjectLockConfiguration: { - ObjectLockEnabled: 'Enabled', - Rule: { - DefaultRetention: { - Mode: 'GOVERNANCE', - Days: 1, - }, + + before(async () => { + try { + process.stdout.write('creating bucket\n'); + await s3.send(new CreateBucketCommand({ + Bucket: bucketName, + ObjectLockEnabledForBucket: true, + })); + + process.stdout.write('putting object lock configuration\n'); + await s3.send(new PutObjectLockConfigurationCommand({ + Bucket: bucketName, + ObjectLockConfiguration: { + ObjectLockEnabled: 'Enabled', + Rule: { + DefaultRetention: { + Mode: 'GOVERNANCE', + Days: 1, }, }, - }).promise(); - }) - .catch(err => { - process.stdout.write('Error putting object lock configuration\n'); - throw err; - }) - .then(() => { - process.stdout.write('putting object\n'); - return s3.putObject({ - Bucket: bucketName, - Key: objectName, - }).promise(); - }) - .catch(err => { - process.stdout.write('Error putting object'); - throw err; - }) - .then(res => { - versionId = res.VersionId; - process.stdout.write('putting object legal hold\n'); - return s3.putObjectLegalHold({ - Bucket: bucketName, - Key: objectName, - LegalHold: { - Status: 'ON', - }, - }).promise(); - }) - .catch(err => { - process.stdout.write('Err putting object legal hold\n'); - throw err; - }); + }, + })); + + process.stdout.write('putting object\n'); + const res = await s3.send(new PutObjectCommand({ + Bucket: bucketName, + Key: objectName, + })); + versionId = res.VersionId; + + process.stdout.write('putting object legal hold\n'); + await s3.send(new PutObjectLegalHoldCommand({ + Bucket: bucketName, + Key: objectName, + LegalHold: { + Status: 'ON', + }, + })); + } catch (err) { + process.stdout.write(`Error in before: ${err}\n`); + throw err; + } }); - after(() => { - process.stdout.write('Emptying bucket\n'); - return bucketUtil.empty(bucketName) - .then(() => { - process.stdout.write('Deleting bucket\n'); - return bucketUtil.deleteOne(bucketName); - }) - .catch(err => { - process.stdout.write('Error in after\n'); - throw err; - }); + after(async () => { + try { + process.stdout.write('Emptying bucket\n'); + await bucketUtil.empty(bucketName); + process.stdout.write('Deleting bucket\n'); + await bucketUtil.deleteOne(bucketName); + } catch (err) { + process.stdout.write('Error in after\n'); + throw err; + } }); - it('should not delete locked object version with GOVERNANCE ' + - 'retention mode and bypass header when object is legal-hold enabled', done => - s3.deleteObject({ - Bucket: bucketName, - Key: objectName, - VersionId: versionId, - BypassGovernanceRetention: true, - }, err => { - assert.strictEqual(err.code, 'AccessDenied'); - changeObjectLock( - [{ - bucket: bucketName, - key: objectName, - versionId, - }], '', done); - } - )); + it('should not delete locked object version with GOVERNANCE ' + + 'retention mode and bypass header when object is legal-hold enabled', done => { + s3.send(new DeleteObjectCommand({ + Bucket: bucketName, + Key: objectName, + VersionId: versionId, + BypassGovernanceRetention: true, + })) + .catch(err => { + assert.strictEqual(err.name, 'AccessDenied'); + changeObjectLock( + [{ + bucket: bucketName, + key: objectName, + versionId, + }], '', done); + }); + }); }); describe('with conditional headers (unofficial, for backbeat)', () => { @@ -332,36 +306,60 @@ describe('DELETE object', () => { let objectLastModified; before(async () => { - await s3.createBucket({ Bucket: bucketName }).promise(); + await s3.send(new CreateBucketCommand({ Bucket: bucketName })); }); beforeEach(async () => { // Re-create the object for each test since some tests will delete it - await s3.putObject({ + await s3.send(new PutObjectCommand({ Bucket: bucketName, Key: testObjectKey, Body: testObjectBody, - }).promise(); - const head = await s3.headObject({ + })); + const head = await s3.send(new HeadObjectCommand({ Bucket: bucketName, Key: testObjectKey, - }).promise(); + })); objectLastModified = head.LastModified; }); after(async () => { - await bucketUtil.empty(bucketName); + await bucketUtil.empty(bucketName, true); await bucketUtil.deleteOne(bucketName); }); function deleteObjectConditional(s3, params, headers, next) { - const request = s3.deleteObject(params); - request.on('build', () => { + const command = new DeleteObjectCommand(params); + // Create a unique middleware name to avoid conflicts + const middlewareName = `headersAdder_${Date.now()}_${Math.random()}`; + + // Middleware to add custom headers + const middleware = next => async args => { for (const [key, value] of Object.entries(headers)) { - request.httpRequest.headers[key] = value; + // Ensure all header values are strings + // eslint-disable-next-line no-param-reassign + args.request.headers[key] = String(value); } - }); - return request.send(next); + return next(args); + }; + + const middlewareConfig = { + step: 'build', + name: middlewareName, + }; + + // Add middleware + s3.middlewareStack.add(middleware, middlewareConfig); + + s3.send(command) + .then(data => { + s3.middlewareStack.remove(middlewareName); + next(null, data); + }) + .catch(err => { + s3.middlewareStack.remove(middlewareName); + next(err); + }); } describe('If-Unmodified-Since header tests', () => { @@ -375,14 +373,18 @@ describe('DELETE object', () => { 'If-Unmodified-Since': futureDate.toUTCString(), }, (err, data) => { assert.ifError(err); - assert.deepStrictEqual(data, {}); - s3.headObject({ + assert.deepStrictEqual(data.$metadata.httpStatusCode, 204); + s3.send(new HeadObjectCommand({ Bucket: bucketName, Key: testObjectKey, - }, err => { - assert.strictEqual(err.code, 'NotFound'); - done(); - }); + })) + .then(() => { + assert.fail('Object should not exist'); + }) + .catch(err => { + assert.strictEqual(err.name, 'NotFound'); + done(); + }); }); }); @@ -395,8 +397,8 @@ describe('DELETE object', () => { }, { 'If-Unmodified-Since': pastDate.toUTCString(), }, err => { - assert.strictEqual(err.code, 'PreconditionFailed'); - assert.strictEqual(err.statusCode, 412); + assert.strictEqual(err.name, 'PreconditionFailed'); + assert.strictEqual(err.$metadata.httpStatusCode, 412); done(); }); }); @@ -410,15 +412,15 @@ describe('DELETE object', () => { Bucket: bucketName, Key: testObjectKey, }, { - 'If-Modified-Since': pastDate.toUTCString(), + 'If-Modified-Since': pastDate.toUTCString() }, (err, data) => { - assert.ifError(err); - assert.deepStrictEqual(data, {}); - s3.headObject({ + assert.deepStrictEqual(data.$metadata.httpStatusCode, 204); + s3.send(new HeadObjectCommand({ Bucket: bucketName, Key: testObjectKey, - }, err => { - assert.strictEqual(err.code, 'NotFound'); + })) + .catch(err => { + assert.strictEqual(err.name, 'NotFound'); done(); }); }); @@ -433,8 +435,7 @@ describe('DELETE object', () => { }, { 'If-Modified-Since': futureDate.toUTCString(), }, err => { - assert.strictEqual(err.code, 'NotModified'); - assert.strictEqual(err.statusCode, 304); + assert.strictEqual(err.$metadata.httpStatusCode, 304); done(); }); }); @@ -452,13 +453,14 @@ describe('DELETE object', () => { 'If-Modified-Since': pastDate.toUTCString(), 'If-Unmodified-Since': futureDate.toUTCString(), }, (err, data) => { - assert.ifError(err); - assert.deepStrictEqual(data, {}); - s3.headObject({ + assert.deepStrictEqual(data.$metadata.httpStatusCode, 204); + s3.send(new HeadObjectCommand({ Bucket: bucketName, Key: testObjectKey, - }, err => { - assert.strictEqual(err.code, 'NotFound'); + })) + .catch(err => { + assert.strictEqual(err.name, 'NotFound'); + assert.strictEqual(err.$metadata.httpStatusCode, 404); done(); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js b/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js index 0f5cd65342..1bb7774e43 100644 --- a/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js +++ b/tests/functional/aws-node-sdk/test/object/encryptionHeaders.js @@ -1,6 +1,15 @@ const assert = require('assert'); const async = require('async'); const uuid = require('uuid'); +const { + CreateBucketCommand, + HeadObjectCommand, + PutObjectCommand, + PutBucketEncryptionCommand, + CopyObjectCommand, + CreateMultipartUploadCommand, + UploadPartCommand, +} = require('@aws-sdk/client-s3'); const BucketInfo = require('arsenal').models.BucketInfo; const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -31,26 +40,37 @@ const testCases = [ function s3NoOp(_, cb) { cb(); } function getSSEConfig(s3, Bucket, Key, cb) { - return s3.headObject({ Bucket, Key }, (err, resp) => { - if (err) { - return cb(err); - } - return cb(null, - JSON.parse(JSON.stringify({ algo: resp.ServerSideEncryption, masterKeyId: resp.SSEKMSKeyId }))); - }); + const command = new HeadObjectCommand({ Bucket, Key }); + s3.send(command) + .then(resp => { + const sseConfig = JSON.parse(JSON.stringify({ + algo: resp.ServerSideEncryption, + masterKeyId: resp.SSEKMSKeyId + })); + cb(null, sseConfig); + }) + .catch(cb); } function putEncryptedObject(s3, Bucket, Key, sseConfig, kmsKeyId, cb) { const params = { Bucket, Key, - ServerSideEncryption: sseConfig.algo, Body: 'somedata', }; + + if (sseConfig.algo) { + params.ServerSideEncryption = sseConfig.algo; + } + if (sseConfig.masterKeyId) { params.SSEKMSKeyId = kmsKeyId; } - return s3.putObject(params, cb); + + const command = new PutObjectCommand(params); + s3.send(command) + .then(response => cb(null, response)) + .catch(cb); } function createExpected(sseConfig, kmsKeyId) { @@ -84,6 +104,34 @@ function hydrateSSEConfig({ algo: SSEAlgorithm, masterKeyId: KMSMasterKeyID }) { ); } +function putBucketEncryption(s3, params, cb) { + const command = new PutBucketEncryptionCommand(params); + s3.send(command) + .then(response => cb(null, response)) + .catch(cb); +} + +function copyObject(s3, params, cb) { + const command = new CopyObjectCommand(params); + s3.send(command) + .then(response => cb(null, response)) + .catch(cb); +} + +function createMultipartUpload(s3, params, cb) { + const command = new CreateMultipartUploadCommand(params); + s3.send(command) + .then(response => cb(null, response)) + .catch(cb); +} + +function uploadPart(s3, params, cb) { + const command = new UploadPartCommand(params); + s3.send(command) + .then(response => cb(null, response)) + .catch(cb); +} + describe('per object encryption headers', () => { withV4(sigCfg => { let bucket; @@ -106,19 +154,15 @@ describe('per object encryption headers', () => { ); }); - beforeEach(() => { + beforeEach(async () => { bucket = `enc-bucket-${uuid.v4()}`; bucket2 = `enc-bucket-2-${uuid.v4()}`; object = `enc-object-${uuid.v4()}`; object2 = `enc-object-2-${uuid.v4()}`; bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .then(() => s3.createBucket({ Bucket: bucket2 }).promise()) - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + await s3.send(new CreateBucketCommand({ Bucket: bucket })); + await s3.send(new CreateBucketCommand({ Bucket: bucket2 })); }); afterEach(() => { @@ -190,8 +234,9 @@ describe('per object encryption headers', () => { Bucket: bucket, ServerSideEncryptionConfiguration: hydrateSSEConfig(_existing), }; - // no op putBucketNotification for the unencrypted case - const s3Op = existing.algo ? (...args) => s3.putBucketEncryption(...args) : s3NoOp; + // no op putBucketEncryption for the unencrypted case + const s3Op = existing.algo ? + (params, cb) => putBucketEncryption(s3, params, cb) : s3NoOp; s3Op(params, error => { assert.ifError(error); return putEncryptedObject(s3, bucket, object, target, kmsKeyId, error => { @@ -236,8 +281,9 @@ describe('per object encryption headers', () => { Bucket: bucket2, ServerSideEncryptionConfiguration: hydrateSSEConfig(_existing), }; - // no op putBucketNotification for the unencrypted case - const s3Op = existing.algo ? (...args) => s3.putBucketEncryption(...args) : s3NoOp; + // no op putBucketEncryption for the unencrypted case + const s3Op = existing.algo ? + (params, cb) => putBucketEncryption(s3, params, cb) : s3NoOp; s3Op(params, error => { assert.ifError(error); return putEncryptedObject(s3, bucket, object, target, kmsKeyId, error => { @@ -253,7 +299,7 @@ describe('per object encryption headers', () => { if (target.masterKeyId) { copyParams.SSEKMSKeyId = kmsKeyId; } - return s3.copyObject(copyParams, error => { + return copyObject(s3, copyParams, error => { assert.ifError(error); return getSSEConfig( s3, @@ -293,7 +339,7 @@ describe('per object encryption headers', () => { if (target.masterKeyId) { params.SSEKMSKeyId = kmsKeyId; } - s3.createMultipartUpload(params, (error, resp) => { + createMultipartUpload(s3, params, (error, resp) => { assert.ifError(error); const { UploadId } = resp; const partParams = { @@ -303,7 +349,7 @@ describe('per object encryption headers', () => { Key: object, PartNumber: 1, }; - s3.uploadPart(partParams, error => { + uploadPart(s3, partParams, error => { assert.ifError(error); done(); }); @@ -315,7 +361,7 @@ describe('per object encryption headers', () => { Bucket: bucket, Key: object, }; - s3.createMultipartUpload(sourceParams, (error, resp) => { + createMultipartUpload(s3, sourceParams, (error, resp) => { assert.ifError(error); const { UploadId: sourceUploadId } = resp; const sourcePartParams = { @@ -325,7 +371,7 @@ describe('per object encryption headers', () => { Key: object, PartNumber: 1, }; - s3.uploadPart(sourcePartParams, error => { + uploadPart(s3, sourcePartParams, error => { assert.ifError(error); const targetParams = { Bucket: bucket, @@ -337,7 +383,8 @@ describe('per object encryption headers', () => { if (target.masterKeyId) { targetParams.SSEKMSKeyId = kmsKeyId; } - s3.createMultipartUpload(targetParams, (error, resp) => { + createMultipartUpload(s3, targetParams, (error, resp) => { + assert.ifError(error); const { UploadId: targetUploadId } = resp; const targetPartParams = { UploadId: targetUploadId, @@ -346,7 +393,7 @@ describe('per object encryption headers', () => { Key: object2, PartNumber: 1, }; - s3.uploadPart(targetPartParams, error => { + uploadPart(s3, targetPartParams, error => { assert.ifError(error); done(); }); diff --git a/tests/functional/aws-node-sdk/test/object/get.js b/tests/functional/aws-node-sdk/test/object/get.js index bb3b0a2e99..eb493629a7 100644 --- a/tests/functional/aws-node-sdk/test/object/get.js +++ b/tests/functional/aws-node-sdk/test/object/get.js @@ -3,6 +3,21 @@ const assert = require('assert'); const async = require('async'); const crypto = require('crypto'); const moment = require('moment'); +const { + CreateBucketCommand, + DeleteBucketCommand, + DeleteObjectCommand, + GetObjectCommand, + HeadObjectCommand, + PutObjectCommand, + PutObjectTaggingCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + UploadPartCopyCommand, + CompleteMultipartUploadCommand, + AbortMultipartUploadCommand, + ListObjectVersionsCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -21,7 +36,7 @@ const contentLanguage = 'en-US'; const contentType = 'xml'; // AWS Node SDK requires Date object, ISO-8601 string, or // a UNIX timestamp for Expires header -const expires = new Date().toISOString(); +const expires = new Date(); const etagTrim = 'd41d8cd98f00b204e9800998ecf8427e'; const etag = `"${etagTrim}"`; const partSize = 1024 * 1024 * 5; // 5MB minumum required part size. @@ -33,7 +48,7 @@ function checkNoError(err) { function checkError(err, code) { assert.notEqual(err, null, 'Expected failure but got success'); - assert.strictEqual(err.code, code); + assert.strictEqual(err.name, code); } function checkIntegerHeader(integerHeader, expectedSize) { @@ -43,11 +58,11 @@ function checkIntegerHeader(integerHeader, expectedSize) { function dateFromNow(diff) { const d = new Date(); d.setHours(d.getHours() + diff); - return d.toISOString(); + return d; } function dateConvert(d) { - return (new Date(d)).toISOString(); + return new Date(d); } describe('GET object', () => { @@ -56,28 +71,38 @@ describe('GET object', () => { let s3; function requestGet(fields, cb) { - s3.getObject(Object.assign({ + s3.send(new GetObjectCommand(Object.assign({ Bucket: bucketName, Key: objectName, - }, fields), cb); + }, fields))).then(data => cb(null, data)).catch(err => { + if (err.$metadata.httpStatusCode === 304) { + const notModifiedError = new Error('NotModified'); + notModifiedError.name = 'NotModified'; + notModifiedError.$metadata = err.$metadata; + return cb(notModifiedError); + } + return cb(err); + }); } + const requestGetPromise = promisify(requestGet); + function checkGetObjectPart(key, partNumber, len, body, cb) { - s3.getObject({ + s3.send(new GetObjectCommand({ Bucket: bucketName, Key: key, PartNumber: partNumber, - }, (err, data) => { - checkNoError(err); + })).then(async data => { checkIntegerHeader(data.ContentLength, len); const md5Hash = crypto.createHash('md5'); const md5HashExpected = crypto.createHash('md5'); + const bodyText = await data.Body.transformToString(); assert.strictEqual( - md5Hash.update(data.Body).digest('hex'), + md5Hash.update(bodyText).digest('hex'), md5HashExpected.update(body).digest('hex') ); return cb(); - }); + }).catch(cb); } // Upload parts with the given partNumbers array and complete MPU. @@ -91,10 +116,8 @@ describe('GET object', () => { Key: objectName, }; - s3.createMultipartUpload(createMpuParams, (err, data) => { - checkNoError(err); - return next(null, data.UploadId); - }); + s3.send(new CreateMultipartUploadCommand(createMpuParams)).then(data => + next(null, data.UploadId)).catch(next); }, (uploadId, next) => async.eachSeries(partNumbers, (partNumber, callback) => { @@ -105,11 +128,10 @@ describe('GET object', () => { UploadId: uploadId, Body: Buffer.alloc(partSize).fill(partNumber), }; - return s3.uploadPart(uploadPartParams, (err, data) => { - checkNoError(err); + return s3.send(new UploadPartCommand(uploadPartParams)).then(data => { ETags = ETags.concat(data.ETag); return callback(); - }); + }).catch(callback); }, err => next(err, uploadId)), (uploadId, next) => { const parts = Array.from(Array(partNumbers.length).keys()); @@ -124,18 +146,19 @@ describe('GET object', () => { }, UploadId: uploadId, }; - return s3.completeMultipartUpload(params, err => { - checkNoError(err); - return next(null, uploadId); - }); + return s3.send(new CompleteMultipartUploadCommand(params)).then(() => + next(null, uploadId)).catch(next); }, ], (err, uploadId) => { if (err) { - return s3.abortMultipartUpload({ - Bucket: bucketName, - Key: objectName, - UploadId: uploadId, - }, cb); + if (uploadId) { + return s3.send(new AbortMultipartUploadCommand({ + Bucket: bucketName, + Key: objectName, + UploadId: uploadId, + })).then(() => cb(err)).catch(() => cb(err)); + } + return cb(err); } return cb(); }); @@ -145,88 +168,90 @@ describe('GET object', () => { let uploadId; const ETags = []; return async.waterfall([ - next => s3.createMultipartUpload({ + next => s3.send(new CreateMultipartUploadCommand({ Bucket: bucketName, Key: copyPartKey, - }, (err, data) => { - checkNoError(err); + })).then(data => { uploadId = data.UploadId; return next(); - }), + }).catch(next), // Copy an object with three parts. - next => s3.uploadPartCopy({ + next => s3.send(new UploadPartCopyCommand({ Bucket: bucketName, CopySource: `/${bucketName}/${objectName}`, Key: copyPartKey, PartNumber: 1, UploadId: uploadId, - }, (err, data) => { - checkNoError(err); - ETags[0] = data.ETag; + })).then(data => { + ETags[0] = data.CopyPartResult.ETag; return next(); - }), + }).catch(next), // Put an object with one part. - next => s3.uploadPart({ + next => s3.send(new UploadPartCommand({ Bucket: bucketName, Key: copyPartKey, PartNumber: 2, UploadId: uploadId, Body: partTwoBody, - }, (err, data) => { - checkNoError(err); + })).then(data => { ETags[1] = data.ETag; return next(); - }), + }).catch(next), ], err => { if (err) { - return s3.abortMultipartUpload({ - Bucket: bucketName, - Key: copyPartKey, - UploadId: uploadId, - }, cb); + if (uploadId) { + return s3.send(new AbortMultipartUploadCommand({ + Bucket: bucketName, + Key: copyPartKey, + UploadId: uploadId, + })).then(() => cb(err)).catch(() => cb(err)); + } + return cb(err); } return cb(null, uploadId, ETags); }); } - before(done => { + before(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; // Create a bucket to put object to get later - s3.createBucket({ Bucket: bucketName }, done); + await s3.send(new CreateBucketCommand({ Bucket: bucketName })); }); - after(done => { - s3.deleteObject({ Bucket: bucketName, Key: objectName }, err => { - if (err) { - return done(err); - } - return s3.deleteBucket({ Bucket: bucketName }, done); - }); + after(async () => { + await s3.send(new DeleteObjectCommand({ Bucket: bucketName, Key: objectName })); + await s3.send(new DeleteBucketCommand({ Bucket: bucketName })); }); - // aws-sdk now (v2.363.0) returns 'UriParameterError' error - it.skip('should return an error to get request without a valid ' + + + it('should return an error to get request without a valid ' + 'bucket name', done => { - s3.getObject({ Bucket: '', Key: 'somekey' }, err => { - checkError(err, 'MethodNotAllowed'); + s3.send(new GetObjectCommand({ Bucket: '', Key: 'somekey' })).then(() => { + assert.fail('Expected failure but got success'); + }).catch(err => { + assert.strictEqual(err.message, 'Empty value provided for input HTTP label: Bucket.'); return done(); }); }); it('should return NoSuchKey error when no such object', done => { - s3.getObject({ Bucket: bucketName, Key: 'nope' }, err => { - checkError(err, 'NoSuchKey'); + s3.send(new GetObjectCommand({ Bucket: bucketName, Key: 'nope' })).then(() => { + assert.fail('Expected failure but got success'); + }).catch(err => { + assert.strictEqual(err.name, 'NoSuchKey'); return done(); }); }); it('should return NoSuchKey error when no such object even with key longer than 915 bytes', done => { - s3.getObject({ Bucket: bucketName, Key: 'a'.repeat(2000) }, err => { - checkError(err, 'NoSuchKey'); + s3.send(new GetObjectCommand({ Bucket: bucketName, Key: 'a'.repeat(2000) })).then(() => { + assert.fail('Expected failure but got success'); + }).catch(err => { + assert.strictEqual(err.name, 'NoSuchKey'); return done(); }); }); @@ -234,7 +259,7 @@ describe('GET object', () => { describe('Additional headers: [Cache-Control, Content-Disposition, ' + 'Content-Encoding, Expires, Accept-Ranges]', () => { describe('if specified in put object request', () => { - before(done => { + before(async () => { const params = { Bucket: bucketName, Key: objectName, @@ -244,35 +269,30 @@ describe('GET object', () => { ContentType: contentType, Expires: expires, }; - s3.putObject(params, err => done(err)); + await s3.send(new PutObjectCommand(params)); }); it('should return additional headers', done => { - s3.getObject({ Bucket: bucketName, Key: objectName }, - (err, res) => { - if (err) { - return done(err); - } - assert.strictEqual(res.CacheControl, - cacheControl); - assert.strictEqual(res.ContentDisposition, - contentDisposition); - // Should remove V4 streaming value 'aws-chunked' - // to be compatible with AWS behavior - assert.strictEqual(res.ContentEncoding, - 'gzip'); - assert.strictEqual(res.ContentType, contentType); - assert.strictEqual(res.Expires.toGMTString(), - new Date(expires).toGMTString()); - assert.strictEqual(res.AcceptRanges, 'bytes'); - return done(); - }); + s3.send(new GetObjectCommand({ Bucket: bucketName, Key: objectName })).then(res => { + assert.strictEqual(res.CacheControl, + cacheControl); + assert.strictEqual(res.ContentDisposition, + contentDisposition); + // Should remove V4 streaming value 'aws-chunked' + // to be compatible with AWS behavior + assert.strictEqual(res.ContentEncoding, + 'gzip'); + assert.strictEqual(res.ContentType, contentType); + assert.strictEqual(res.Expires.toGMTString(), + new Date(expires).toGMTString()); + assert.strictEqual(res.AcceptRanges, 'bytes'); + return done(); + }).catch(done); }); }); describe('if response content headers are set in query', () => { - before(done => { - s3.putObject({ Bucket: bucketName, Key: objectName }, - err => done(err)); + before(async () => { + await s3.send(new PutObjectCommand({ Bucket: bucketName, Key: objectName })); }); it('should return additional headers even if not set in ' + @@ -287,10 +307,7 @@ describe('GET object', () => { ResponseContentType: contentType, ResponseExpires: expires, }; - s3.getObject(params, (err, res) => { - if (err) { - return done(err); - } + s3.send(new GetObjectCommand(params)).then(res => { assert.strictEqual(res.CacheControl, cacheControl); assert.strictEqual(res.ContentDisposition, @@ -303,30 +320,26 @@ describe('GET object', () => { assert.strictEqual(res.Expires.toGMTString(), new Date(expires).toGMTString()); return done(); - }); + }).catch(done); }); }); }); describe('x-amz-website-redirect-location header', () => { - before(done => { + before(async () => { const params = { Bucket: bucketName, Key: objectName, WebsiteRedirectLocation: '/', }; - s3.putObject(params, err => done(err)); + await s3.send(new PutObjectCommand(params)); }); it('should return website redirect header if specified in ' + 'objectPUT request', done => { - s3.getObject({ Bucket: bucketName, Key: objectName }, - (err, res) => { - if (err) { - return done(err); - } - assert.strictEqual(res.WebsiteRedirectLocation, '/'); - return done(); - }); + s3.send(new GetObjectCommand({ Bucket: bucketName, Key: objectName })).then(res => { + assert.strictEqual(res.WebsiteRedirectLocation, '/'); + return done(); + }).catch(done); }); }); @@ -347,44 +360,38 @@ describe('GET object', () => { ], }, }; - beforeEach(done => { - s3.putObject(params, done); + beforeEach(async () => { + await s3.send(new PutObjectCommand(params)); }); it('should not return "x-amz-tagging-count" if no tag ' + 'associated with the object', done => { - s3.getObject(params, (err, data) => { - if (err) { - return done(err); - } + s3.send(new GetObjectCommand(params)).then(data => { assert.strictEqual(data.TagCount, undefined); return done(); - }); + }).catch(done); }); describe('tag associated with the object', () => { - beforeEach(done => { - s3.putObjectTagging(paramsTagging, done); + beforeEach(async () => { + await s3.send(new PutObjectTaggingCommand(paramsTagging)); }); it('should return "x-amz-tagging-count" header that provides ' + 'the count of number of tags associated with the object', done => { - s3.getObject(params, (err, data) => { - if (err) { - return done(err); - } + s3.send(new GetObjectCommand(params)).then(data => { assert.equal(data.TagCount, 1); return done(); - }); + }).catch(done); }); }); }); describe('conditional headers', () => { const params = { Bucket: bucketName, Key: objectName }; - beforeEach(done => { - s3.putObject(params, done); + beforeEach(async () => { + await s3.send(new PutObjectCommand(params)); }); it('If-Match: returns no error when ETag match, with double ' + 'quotes around ETag', @@ -442,12 +449,12 @@ describe('GET object', () => { }); it('If-None-Match: returns no error when ETag does not match', - done => { - requestGet({ IfNoneMatch: 'non-matching' }, err => { - checkNoError(err); - done(); + done => { + requestGet({ IfNoneMatch: 'non-matching' }, err => { + checkNoError(err); + done(); + }); }); - }); it('If-None-Match: returns no error when all ETags do not match', done => { @@ -535,15 +542,13 @@ describe('GET object', () => { it('If-Modified-Since: returns NotModified if Last modified ' + 'date is equal', done => { - s3.headObject({ Bucket: bucketName, Key: objectName }, - (err, data) => { - checkNoError(err); + s3.send(new HeadObjectCommand({ Bucket: bucketName, Key: objectName })).then(data => { const lastModified = dateConvert(data.LastModified); requestGet({ IfModifiedSince: lastModified }, err => { checkError(err, 'NotModified'); done(); }); - }); + }).catch(done); }); it('If-Unmodified-Since: returns no error when lastModified date ' + @@ -558,16 +563,14 @@ describe('GET object', () => { it('If-Unmodified-Since: returns no error when lastModified ' + 'date is equal', done => { - s3.headObject({ Bucket: bucketName, Key: objectName }, - (err, data) => { - checkNoError(err); - const lastModified = dateConvert(data.LastModified); - requestGet({ IfUnmodifiedSince: lastModified }, - err => { - checkNoError(err); - done(); - }); - }); + s3.send(new HeadObjectCommand({ Bucket: bucketName, Key: objectName })).then(data => { + const lastModified = dateConvert(data.LastModified); + requestGet({ IfUnmodifiedSince: lastModified }, + err => { + checkNoError(err); + done(); + }); + }).catch(done); }); it('If-Unmodified-Since: returns PreconditionFailed when ' + @@ -667,19 +670,13 @@ describe('GET object', () => { it('If-None-Match & If-Modified-Since: returns NotModified when ' + 'Etag does not match and lastModified is greater', done => { - const req = s3.getObject({ - Bucket: bucketName, - Key: objectName, + requestGet({ IfNoneMatch: etagTrim, IfModifiedSince: dateFromNow(1), }, err => { checkError(err, 'NotModified'); done(); }); - req.on('httpHeaders', (code, headers) => { - assert(!headers['content-type']); - assert(!headers['content-length']); - }); }); it('If-None-Match not match & If-Modified-Since not match', @@ -767,14 +764,15 @@ describe('GET object', () => { it(`should get the body of part ${num} when ordered MPU`, done => completeMPU(orderedPartNumbers, err => { checkNoError(err); - return requestGet({ PartNumber: num }, (err, data) => { + return requestGet({ PartNumber: num }, async (err, data) => { checkNoError(err); checkIntegerHeader(data.ContentLength, partSize); const md5Hash = crypto.createHash('md5'); const md5HashExpected = crypto.createHash('md5'); const expected = Buffer.alloc(partSize).fill(num); + const bodyText = await data.Body.transformToString(); assert.strictEqual( - md5Hash.update(data.Body).digest('hex'), + md5Hash.update(bodyText).digest('hex'), md5HashExpected.update(expected).digest('hex') ); return done(); @@ -786,15 +784,16 @@ describe('GET object', () => { it(`should get the body of part ${num} when unordered MPU`, done => completeMPU(unOrderedPartNumbers, err => { checkNoError(err); - return requestGet({ PartNumber: num }, (err, data) => { + return requestGet({ PartNumber: num }, async (err, data) => { checkNoError(err); checkIntegerHeader(data.ContentLength, partSize); const md5Hash = crypto.createHash('md5'); const md5HashExpected = crypto.createHash('md5'); const expected = Buffer.alloc(partSize) .fill(unOrderedPartNumbers[num - 1]); + const bodyText = await data.Body.transformToString(); assert.strictEqual( - md5Hash.update(data.Body).digest('hex'), + md5Hash.update(bodyText).digest('hex'), md5HashExpected.update(expected).digest('hex') ); return done(); @@ -822,57 +821,59 @@ describe('GET object', () => { })); it('should accept a part number of 1 for regular put object', - done => s3.putObject({ - Bucket: bucketName, - Key: objectName, - Body: Buffer.alloc(10), - }, err => { - checkNoError(err); - return requestGet({ PartNumber: 1 }, (err, data) => { - const md5Hash = crypto.createHash('md5'); - const md5HashExpected = crypto.createHash('md5'); - const expected = Buffer.alloc(10); - assert.strictEqual( - md5Hash.update(data.Body).digest('hex'), - md5HashExpected.update(expected).digest('hex') - ); - done(); - }); - })); + async () => { + await s3.send(new PutObjectCommand({ + Bucket: bucketName, + Key: objectName, + Body: Buffer.alloc(10), + })); + + const data = await requestGetPromise({ PartNumber: 1 }); + const md5Hash = crypto.createHash('md5'); + const md5HashExpected = crypto.createHash('md5'); + const expected = Buffer.alloc(10).fill(0); + const bodyText = await data.Body.transformToString(); + assert.strictEqual( + md5Hash.update(bodyText).digest('hex'), + md5HashExpected.update(expected).digest('hex') + ); + }); - it('should accept a part number that is a string', done => - s3.putObject({ + it('should accept a part number that is a string', async () => { + await s3.send(new PutObjectCommand({ Bucket: bucketName, Key: objectName, Body: Buffer.alloc(10), - }, err => { - checkNoError(err); - return requestGet({ PartNumber: '1' }, (err, data) => { - checkIntegerHeader(data.ContentLength, 10); - const md5Hash = crypto.createHash('md5'); - const md5HashExpected = crypto.createHash('md5'); - const expected = Buffer.alloc(10); - assert.strictEqual( - md5Hash.update(data.Body).digest('hex'), - md5HashExpected.update(expected).digest('hex') - ); - done(); - }); })); + + const data = await requestGetPromise({ PartNumber: '1' }); + checkIntegerHeader(data.ContentLength, 10); + const md5Hash = crypto.createHash('md5'); + const md5HashExpected = crypto.createHash('md5'); + const expected = Buffer.alloc(10).fill(0); + const bodyText = await data.Body.transformToString(); + assert.strictEqual( + md5Hash.update(bodyText).digest('hex'), + md5HashExpected.update(expected).digest('hex') + ); + }); it('should not accept a part number greater than 1 for regular ' + - 'put object', done => - s3.putObject({ + 'put object', async () => { + await s3.send(new PutObjectCommand({ Bucket: bucketName, Key: objectName, Body: Buffer.alloc(10), - }, err => { - checkNoError(err); - return requestGet({ PartNumber: 2 }, err => { - checkError(err, 'InvalidPartNumber'); - done(); - }); })); + + await assert.rejects( + () => requestGetPromise({ PartNumber: 2 }), + err => { + checkError(err, 'InvalidPartNumber'); + return true; + } + ); + }); it('should not accept both PartNumber and Range as params', done => completeMPU(orderedPartNumbers, err => { @@ -887,20 +888,16 @@ describe('GET object', () => { })); it('should not include PartsCount response header for regular ' + - 'put object', done => { - s3.putObject({ + 'put object', async () => { + await s3.send(new PutObjectCommand({ Bucket: bucketName, Key: objectName, Body: Buffer.alloc(10), - }, err => { - assert.ifError(err); - requestGet({ PartNumber: 1 }, (err, data) => { - assert.ifError(err); - assert.strictEqual('PartsCount' in data, false, - 'PartsCount header is present.'); - done(); - }); - }); + })); + + const data = await requestGetPromise({ PartNumber: 1 }); + assert.strictEqual('PartsCount' in data, false, + 'PartsCount header is present.'); }); it('should include PartsCount response header for mpu object', @@ -927,7 +924,7 @@ describe('GET object', () => { next => completeMPU(orderedPartNumbers, next), next => createMPUAndPutTwoParts(partTwoBody, next), (uploadId, ETags, next) => - s3.completeMultipartUpload({ + s3.send(new CompleteMultipartUploadCommand({ Bucket: bucketName, Key: copyPartKey, MultipartUpload: { @@ -943,13 +940,15 @@ describe('GET object', () => { ], }, UploadId: uploadId, - }, next), + })).then(() => next()).catch(next), ], done)); - afterEach(done => s3.deleteObject({ - Bucket: bucketName, - Key: copyPartKey, - }, done)); + afterEach(async () => { + await s3.send(new DeleteObjectCommand({ + Bucket: bucketName, + Key: copyPartKey, + })); + }); it('should retrieve a part copied from an MPU', done => checkGetObjectPart(copyPartKey, 1, partOneSize, partOneBody, @@ -974,33 +973,31 @@ describe('GET object', () => { /* eslint-disable no-param-reassign */ // Overwrite part one. (uploadId, ETags, next) => - s3.uploadPart({ + s3.send(new UploadPartCommand({ Bucket: bucketName, Key: copyPartKey, PartNumber: 1, UploadId: uploadId, Body: partOneBody, - }, (err, data) => { - checkNoError(err); + })).then(data => { ETags[0] = data.ETag; return next(null, uploadId, ETags); - }), + }).catch(next), // Overwrite part one with an three-part object. (uploadId, ETags, next) => - s3.uploadPartCopy({ + s3.send(new UploadPartCopyCommand({ Bucket: bucketName, CopySource: `/${bucketName}/${objectName}`, Key: copyPartKey, PartNumber: 2, UploadId: uploadId, - }, (err, data) => { - checkNoError(err); - ETags[1] = data.ETag; + })).then(data => { + ETags[1] = data.CopyPartResult.ETag; return next(null, uploadId, ETags); - }), + }).catch(next), /* eslint-enable no-param-reassign */ (uploadId, ETags, next) => - s3.completeMultipartUpload({ + s3.send(new CompleteMultipartUploadCommand({ Bucket: bucketName, Key: copyPartKey, MultipartUpload: { @@ -1016,13 +1013,15 @@ describe('GET object', () => { ], }, UploadId: uploadId, - }, next), + })).then(() => next()).catch(next), ], done)); - afterEach(done => s3.deleteObject({ - Bucket: bucketName, - Key: copyPartKey, - }, done)); + afterEach(async () => { + await s3.send(new DeleteObjectCommand({ + Bucket: bucketName, + Key: copyPartKey, + })); + }); it('should retrieve a part that overwrote another part ' + 'originally copied from an MPU', done => @@ -1037,24 +1036,20 @@ describe('GET object', () => { }); describe('absent x-amz-website-redirect-location header', () => { - before(done => { + before(async () => { const params = { Bucket: bucketName, Key: objectName, }; - s3.putObject(params, err => done(err)); + await s3.send(new PutObjectCommand(params)); }); it('should return website redirect header if specified in ' + 'objectPUT request', done => { - s3.getObject({ Bucket: bucketName, Key: objectName }, - (err, res) => { - if (err) { - return done(err); - } - assert.strictEqual(res.WebsiteRedirectLocation, - undefined); - return done(); - }); + s3.send(new GetObjectCommand({ Bucket: bucketName, Key: objectName })).then(res => { + assert.strictEqual(res.WebsiteRedirectLocation, + undefined); + return done(); + }).catch(done); }); }); }); @@ -1070,7 +1065,7 @@ describeSkipIfCeph('GET object with object lock', () => { const bucket = 'bucket-with-lock'; const key = 'object-with-lock'; const formatDate = date => date.toString().slice(0, 20); - const mockDate = moment().add(1, 'days').toISOString(); + const mockDate = moment().add(1, 'days'); const mockMode = 'GOVERNANCE'; let versionId; @@ -1082,12 +1077,12 @@ describeSkipIfCeph('GET object with object lock', () => { ObjectLockMode: mockMode, ObjectLockLegalHoldStatus: 'ON', }; - return s3.createBucket({ + return s3.send(new CreateBucketCommand({ Bucket: bucket, ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.putObject(params).promise()) - .then(() => s3.getObject({ Bucket: bucket, Key: key }).promise()) + })) + .then(() => s3.send(new PutObjectCommand(params))) + .then(() => s3.send(new GetObjectCommand({ Bucket: bucket, Key: key }))) /* eslint-disable no-return-assign */ .then(res => versionId = res.VersionId) .catch(err => { @@ -1097,8 +1092,8 @@ describeSkipIfCeph('GET object with object lock', () => { }); afterEach(() => changeLockPromise([{ bucket, key, versionId }], '') - .then(() => s3.listObjectVersions({ Bucket: bucket }).promise()) - .then(res => res.Versions.forEach(object => { + .then(() => s3.send(new ListObjectVersionsCommand({ Bucket: bucket }))) + .then(res => res.Versions?.forEach(object => { const params = [ { bucket, @@ -1112,18 +1107,17 @@ describeSkipIfCeph('GET object with object lock', () => { process.stdout.write('Emptying and deleting buckets\n'); return bucketUtil.empty(bucket); }) - .then(() => s3.deleteBucket({ Bucket: bucket }).promise()) + .then(() => s3.send(new DeleteBucketCommand({ Bucket: bucket }))) .catch(err => { process.stdout.write('Error in afterEach'); throw err; })); it('should return object lock headers if set on the object', done => { - s3.getObject({ Bucket: bucket, Key: key }, (err, res) => { - assert.ifError(err); + s3.send(new GetObjectCommand({ Bucket: bucket, Key: key })).then(res => { assert.strictEqual(res.ObjectLockMode, mockMode); const responseDate - = formatDate(res.ObjectLockRetainUntilDate.toISOString()); + = formatDate(res.ObjectLockRetainUntilDate); const expectedDate = formatDate(mockDate); assert.strictEqual(responseDate, expectedDate); assert.strictEqual(res.ObjectLockLegalHoldStatus, 'ON'); @@ -1135,7 +1129,7 @@ describeSkipIfCeph('GET object with object lock', () => { }, ]; changeObjectLock(objectWithLock, '', done); - }); + }).catch(done); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/getMPU_compatibleHeaders.js b/tests/functional/aws-node-sdk/test/object/getMPU_compatibleHeaders.js index 7f3758fa1f..22d1636c0d 100644 --- a/tests/functional/aws-node-sdk/test/object/getMPU_compatibleHeaders.js +++ b/tests/functional/aws-node-sdk/test/object/getMPU_compatibleHeaders.js @@ -1,4 +1,11 @@ const assert = require('assert'); +const { + CreateBucketCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + CompleteMultipartUploadCommand, + GetObjectCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -36,18 +43,18 @@ describe('GET multipart upload object [Cache-Control, Content-Disposition, ' + return bucketUtil.deleteOne(bucketName); }) .catch(err => { - if (err.code !== 'NoSuchBucket') { + if (err.name !== 'NoSuchBucket') { process.stdout.write(`${err}\n`); throw err; } }) .then(() => { process.stdout.write('creating bucket\n'); - return s3.createBucket({ Bucket: bucketName }).promise(); + return s3.send(new CreateBucketCommand({ Bucket: bucketName })); }) .then(() => { process.stdout.write('initiating multipart upload\n'); - return s3.createMultipartUpload(params).promise(); + return s3.send(new CreateMultipartUploadCommand(params)); }) .then(res => { uploadId = res.UploadId; @@ -75,14 +82,14 @@ describe('GET multipart upload object [Cache-Control, Content-Disposition, ' + () => { const params = { Bucket: bucketName, Key: 'key', PartNumber: 1, UploadId: uploadId }; - return s3.uploadPart(params).promise() + return s3.send(new UploadPartCommand(params)) .catch(err => { process.stdout.write(`Error in uploadPart ${err}\n`); throw err; }) .then(res => { process.stdout.write('about to complete multipart upload\n'); - return s3.completeMultipartUpload({ + return s3.send(new CompleteMultipartUploadCommand({ Bucket: bucketName, Key: objectName, UploadId: uploadId, @@ -91,7 +98,7 @@ describe('GET multipart upload object [Cache-Control, Content-Disposition, ' + { ETag: res.ETag, PartNumber: 1 }, ], }, - }).promise(); + })); }) .catch(err => { process.stdout.write(`Error completing upload ${err}\n`); @@ -99,9 +106,9 @@ describe('GET multipart upload object [Cache-Control, Content-Disposition, ' + }) .then(() => { process.stdout.write('about to get object\n'); - return s3.getObject({ + return s3.send(new GetObjectCommand({ Bucket: bucketName, Key: objectName, - }).promise(); + })); }) .catch(err => { process.stdout.write(`Error getting object ${err}\n`); diff --git a/tests/functional/aws-node-sdk/test/object/getObjTagging.js b/tests/functional/aws-node-sdk/test/object/getObjTagging.js index 73972590b1..282866e7db 100644 --- a/tests/functional/aws-node-sdk/test/object/getObjTagging.js +++ b/tests/functional/aws-node-sdk/test/object/getObjTagging.js @@ -1,8 +1,16 @@ const assert = require('assert'); -const async = require('async'); +const { + CreateBucketCommand, + PutObjectCommand, + PutObjectTaggingCommand, + GetObjectTaggingCommand, + PutBucketAclCommand, + DeleteObjectTaggingCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); +const checkError = require('../../lib/utility/checkError'); const bucketName = 'testtaggingbucket'; const objectName = 'testtaggingobject'; @@ -19,12 +27,6 @@ const taggingConfig = { TagSet: [ }, ] }; -function _checkError(err, code, statusCode) { - assert(err, 'Expected error but found none'); - assert.strictEqual(err.code, code); - assert.strictEqual(err.statusCode, statusCode); -} - describe('GET object taggings', () => { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); @@ -32,13 +34,9 @@ describe('GET object taggings', () => { const otherAccountBucketUtility = new BucketUtility('lisa', {}); const otherAccountS3 = otherAccountBucketUtility.s3; - beforeEach(done => { - async.waterfall([ - next => s3.createBucket({ Bucket: bucketName }, err => - next(err)), - next => s3.putObject({ Bucket: bucketName, Key: objectName }, - err => next(err)), - ], done); + beforeEach(async () => { + await s3.send(new CreateBucketCommand({ Bucket: bucketName })); + await s3.send(new PutObjectCommand({ Bucket: bucketName, Key: objectName })); }); afterEach(() => { @@ -54,111 +52,131 @@ describe('GET object taggings', () => { }); }); - it('should return appropriate tags after putting tags', done => { - s3.putObjectTagging({ + it('should return appropriate tags after putting tags', async () => { + await s3.send(new PutObjectTaggingCommand({ Bucket: bucketName, Key: objectName, Tagging: taggingConfig, - }, err => { - assert.ifError(err, `putObjectTagging error: ${err}`); - s3.getObjectTagging({ Bucket: bucketName, Key: objectName }, - (err, data) => { - assert.ifError(err, `getObjectTagging error: ${err}`); - assert.deepStrictEqual(data, taggingConfig); - done(); - }); - }); - }); + })); - it('should return no tag after putting and deleting tags', done => { - async.waterfall([ - next => s3.putObjectTagging({ - Bucket: bucketName, - Key: objectName, - Tagging: taggingConfig, - }, err => next(err)), - next => s3.deleteObjectTagging({ Bucket: bucketName, - Key: objectName }, err => next(err)), - next => s3.getObjectTagging({ Bucket: bucketName, - Key: objectName }, (err, data) => next(err, data)), - ], (err, data) => { - assert.ifError(err, `error: ${err}`); - assert.deepStrictEqual(data.TagSet, []); - return done(); - }); + const data = await s3.send(new GetObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + })); + + assert.deepStrictEqual(data.TagSet, taggingConfig.TagSet); }); - it('should return empty array after putting no tag', done => { - s3.getObjectTagging({ Bucket: bucketName, Key: objectName }, - (err, data) => { - assert.ifError(err, `getObjectTagging error: ${err}`); - assert.deepStrictEqual(data.TagSet, []); - done(); - }); + it('should return no tag after putting and deleting tags', async () => { + await s3.send(new PutObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + Tagging: taggingConfig, + })); + await s3.send(new DeleteObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + })); + const data = await s3.send(new GetObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + })); + assert.deepStrictEqual(data.TagSet, []); }); - it('should return NoSuchKey getting tag to a non-existing object', - done => { - s3.getObjectTagging({ + it('should return empty array after putting no tag', + async () => { + const data = await s3.send(new GetObjectTaggingCommand({ Bucket: bucketName, - Key: 'nonexisting', - }, err => { - _checkError(err, 'NoSuchKey', 404); - done(); - }); + Key: objectName, + })); + + assert.deepStrictEqual(data.TagSet, []); }); - it('should return 403 AccessDenied getting tag with another account', - done => { - otherAccountS3.getObjectTagging({ Bucket: bucketName, Key: - objectName }, err => { - _checkError(err, 'AccessDenied', 403); - done(); - }); + it('should return NoSuchKey getting tag set to a non-existing object', + async () => { + try { + await s3.send(new GetObjectTaggingCommand({ + Bucket: bucketName, + Key: 'nonexisting', + })); + throw new Error('Expected NoSuchKey error'); + } catch (err) { + checkError(err, 'NoSuchKey', 404); + } + }); + + it('should return 403 AccessDenied getting tag set with another ' + + 'account', async () => { + try { + await otherAccountS3.send(new GetObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + })); + throw new Error('Expected AccessDenied error'); + } catch (err) { + checkError(err, 'AccessDenied', 403); + } }); it('should return 403 AccessDenied getting tag with a different ' + 'account to an object with ACL "public-read-write"', - done => { - s3.putObjectAcl({ Bucket: bucketName, Key: objectName, - ACL: 'public-read-write' }, err => { - if (err) { - return done(err); - } - return otherAccountS3.getObjectTagging({ Bucket: bucketName, - Key: objectName }, err => { - _checkError(err, 'AccessDenied', 403); - done(); - }); - }); + async () => { + try { + await s3.send(new PutBucketAclCommand({ + Bucket: bucketName, + ACL: 'public-read-write', + })); + await otherAccountS3.send(new GetObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + })); + throw new Error('Expected AccessDenied error'); + } catch (err) { + checkError(err, 'AccessDenied', 403); + } }); - it('should return 403 AccessDenied getting tag to an object ' + - 'in a bucket created with a different account', - done => { - async.waterfall([ - next => s3.putBucketAcl({ Bucket: bucketName, ACL: - 'public-read-write' }, err => next(err)), - next => otherAccountS3.putObject({ Bucket: bucketName, Key: - objectNameAcl }, err => next(err)), - next => otherAccountS3.getObjectTagging({ Bucket: bucketName, - Key: objectNameAcl }, err => next(err)), - ], err => { - _checkError(err, 'AccessDenied', 403); - done(); - }); + it('should return 403 AccessDenied getting tag set to an object' + + ' in a bucket created with a different account', async () => { + await s3.send(new PutBucketAclCommand({ + Bucket: bucketName, + ACL: 'public-read-write', + })); + await otherAccountS3.send(new PutObjectCommand({ + Bucket: bucketName, + Key: objectNameAcl, + })); + + try { + await otherAccountS3.send(new GetObjectTaggingCommand({ + Bucket: bucketName, + Key: objectNameAcl, + })); + throw new Error('Expected AccessDenied error'); + } catch (err) { + checkError(err, 'AccessDenied', 403); + } }); it('should get tag to an object in a bucket created with same ' + - 'account', done => { - async.waterfall([ - next => s3.putBucketAcl({ Bucket: bucketName, ACL: - 'public-read-write' }, err => next(err)), - next => otherAccountS3.putObject({ Bucket: bucketName, Key: - objectNameAcl }, err => next(err)), - next => s3.getObjectTagging({ Bucket: bucketName, - Key: objectNameAcl }, err => next(err)), - ], done); + 'account', async () => { + await s3.send(new PutBucketAclCommand({ + Bucket: bucketName, + ACL: 'public-read-write', + })); + await otherAccountS3.send(new PutObjectCommand({ + Bucket: bucketName, + Key: objectNameAcl, + })); + + const data = await s3.send(new GetObjectTaggingCommand({ + Bucket: bucketName, + Key: objectNameAcl, + })); + + assert.deepStrictEqual(data.TagSet, []); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/getObjectLegalHold.js b/tests/functional/aws-node-sdk/test/object/getObjectLegalHold.js index 71fcfa4a7e..424d52de88 100644 --- a/tests/functional/aws-node-sdk/test/object/getObjectLegalHold.js +++ b/tests/functional/aws-node-sdk/test/object/getObjectLegalHold.js @@ -1,5 +1,12 @@ const { promisify } = require('util'); const assert = require('assert'); +const { + CreateBucketCommand, + PutObjectCommand, + PutObjectLegalHoldCommand, + GetObjectLegalHoldCommand, + DeleteObjectCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -24,34 +31,30 @@ describeSkipIfCeph('GET object legal hold', () => { const otherAccountS3 = otherAccountBucketUtility.s3; let versionId; - beforeEach(() => { + beforeEach(async () => { process.stdout.write('Putting buckets and objects\n'); - return s3.createBucket({ + process.stdout.write('Putting object legal hold\n'); + await s3.send(new CreateBucketCommand({ Bucket: bucket, ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.createBucket({ Bucket: unlockedBucket }).promise()) - .then(() => s3.putObject({ Bucket: unlockedBucket, Key: key }).promise()) - .then(() => s3.putObject({ Bucket: bucket, Key: keyNoHold }).promise()) - .then(() => s3.putObject({ Bucket: bucket, Key: key }).promise()) - .then(res => { - versionId = res.VersionId; - process.stdout.write('Putting object legal hold\n'); - return s3.putObjectLegalHold({ - Bucket: bucket, - Key: key, - LegalHold: { Status: 'ON' }, - }).promise(); - }) - .catch(err => { - process.stdout.write('Error in beforeEach\n'); - throw err; - }); + })); + await s3.send(new CreateBucketCommand({ Bucket: unlockedBucket })); + await s3.send(new PutObjectCommand({ Bucket: unlockedBucket, Key: key })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: keyNoHold })); + + const res = await s3.send(new PutObjectCommand({ Bucket: bucket, Key: key })); + versionId = res.VersionId; + process.stdout.write('Putting object legal hold\n'); + await s3.send(new PutObjectLegalHoldCommand({ + Bucket: bucket, + Key: key, + LegalHold: { Status: 'ON' }, + })); }); afterEach(() => { process.stdout.write('Removing object lock\n'); - return changeLockPromise([{ bucket, key, versionId }], '') + return changeLockPromise([{ bucket, key, versionId }], {}) .then(() => { process.stdout.write('Emptying and deleting buckets\n'); return bucketUtil.empty(bucket); @@ -64,95 +67,78 @@ describeSkipIfCeph('GET object legal hold', () => { }); }); - it('should return AccessDenied getting legal hold with another account', - done => { - otherAccountS3.getObjectLegalHold({ - Bucket: bucket, - Key: key, - }, err => { - checkError(err, 'AccessDenied', 403); - done(); - }); - }); - - it('should return NoSuchKey error if key does not exist', done => { - s3.getObjectLegalHold({ + it('should return AccessDenied getting legal hold with another account', + () => otherAccountS3.send(new GetObjectLegalHoldCommand({ Bucket: bucket, - Key: 'thiskeydoesnotexist', - }, err => { - checkError(err, 'NoSuchKey', 404); - done(); - }); - }); + Key: key, + })).then(() => { + throw new Error('Expected AccessDenied error'); + }).catch(err => { + checkError(err, 'AccessDenied', 403); + }) + ); - it('should return NoSuchVersion error if version does not exist', done => { - s3.getObjectLegalHold({ + it('should return MethodNotAllowed if object version is delete marker', () => s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: key, - VersionId: '012345678901234567890123456789012', - }, err => { - checkError(err, 'NoSuchVersion', 404); - done(); - }); - }); - - it('should return MethodNotAllowed if object version is delete marker', done => { - s3.deleteObject({ Bucket: bucket, Key: key }, (err, res) => { - assert.ifError(err); - s3.getObjectLegalHold({ + })).then(res => s3.send(new GetObjectLegalHoldCommand({ Bucket: bucket, Key: key, VersionId: res.VersionId, - }, err => { + })).then(() => { + throw new Error('Expected NoSuchKey error'); + }).catch(err => { checkError(err, 'MethodNotAllowed', 405); - done(); - }); - }); - }); - - it('should return NoSuchKey if latest version is delete marker', done => { - s3.deleteObject({ Bucket: bucket, Key: key }, err => { + })).catch(err => { assert.ifError(err); - s3.getObjectLegalHold({ + }) + ); + + it('should return NoSuchKey if latest version is delete marker', () => s3.send(new DeleteObjectCommand({ + Bucket: bucket, + Key: key, + })).then(() => s3.send(new GetObjectLegalHoldCommand({ Bucket: bucket, Key: key, - }, err => { + })).then(() => { + throw new Error('Expected NoSuchKey error'); + }).catch(err => { checkError(err, 'NoSuchKey', 404); - done(); - }); - }); - }); + })).catch(err => { + assert.ifError(err); + }) + ); it('should return InvalidRequest error getting legal hold of object ' + - 'inside object lock disabled bucket', done => { - s3.getObjectLegalHold({ + 'inside object lock disabled bucket', () => s3.send(new GetObjectLegalHoldCommand({ Bucket: unlockedBucket, Key: key, - }, err => { + })).then(() => { + throw new Error('Expected InvalidRequest error'); + }).catch(err => { checkError(err, 'InvalidRequest', 400); - done(); - }); - }); + }) + ); - it('should return NoSuchObjectLockConfiguration if no legal hold set', done => { - s3.getObjectLegalHold({ - Bucket: bucket, - Key: keyNoHold, - }, err => { + it('should return NoSuchObjectLockConfiguration if no legal hold set', () => + s3.send(new GetObjectLegalHoldCommand({ + Bucket: bucket, + Key: keyNoHold, + })).then(() => { + throw new Error('Expected NoSuchObjectLockConfiguration error'); + }).catch(err => { checkError(err, 'NoSuchObjectLockConfiguration', 404); - done(); - }); - }); + }) + ); - it('should get object legal hold', done => { - s3.getObjectLegalHold({ + it('should get object legal hold', async () => { + const res = await s3.send(new GetObjectLegalHoldCommand({ Bucket: bucket, Key: key, - }, (err, res) => { - assert.ifError(err); - assert.deepStrictEqual(res.LegalHold, { Status: 'ON' }); - changeObjectLock([{ bucket, key, versionId }], '', done); - }); + })); + + assert.deepStrictEqual(res.LegalHold, { Status: 'ON' }); + await changeLockPromise([{ bucket, key, versionId }], {}); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/getPartSize.js b/tests/functional/aws-node-sdk/test/object/getPartSize.js index 815bad5c3e..cda40988c2 100644 --- a/tests/functional/aws-node-sdk/test/object/getPartSize.js +++ b/tests/functional/aws-node-sdk/test/object/getPartSize.js @@ -1,9 +1,19 @@ const assert = require('assert'); -const async = require('async'); +const { + CreateBucketCommand, + HeadObjectCommand, + PutObjectCommand, + DeleteObjectCommand, + DeleteBucketCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + CompleteMultipartUploadCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); const { maximumAllowedPartCount } = require('../../../../../constants'); +const checkError = require('../../lib/utility/checkError'); const bucket = 'mpu-test-bucket'; const object = 'mpu-test-object'; @@ -19,18 +29,6 @@ const invalidPartNumbers = [-1, 0, maximumAllowedPartCount + 1]; let ETags = []; -// Because HEAD has no body, the SDK (v2) returns a generic code such as: -// 400 BadRequest -// 403 Forbidden -// 404 NotFound -// ... -// It will fall back to HTTP statusCode -// Example: 416 InvalidRange will be 416 416 -function checkError(err, statusCode, code) { - assert.strictEqual(err.statusCode, statusCode); - assert.strictEqual(err.code, code); -} - function checkNoError(err) { assert.equal(err, null, `Expected success, got error ${JSON.stringify(err)}`); @@ -44,87 +42,94 @@ describe('Part size tests with object head', () => { withV4(sigCfg => { let bucketUtil; let s3; + let uploadId; function headObject(fields, cb) { - s3.headObject({ + s3.send(new HeadObjectCommand({ Bucket: bucket, Key: object, ...fields, - }, cb); + })).then(data => { + cb(null, data); + }).catch(err => { + cb(err); + }); } - before(function beforeF(done) { + before(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - async.series([ - next => s3.createBucket({ Bucket: bucket }, err => next(err)), - next => s3.createMultipartUpload({ + // Create bucket + await s3.send(new CreateBucketCommand({ Bucket: bucket })); + + // Create multipart upload + const uploadResult = await s3.send(new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: object + })); + uploadId = uploadResult.UploadId; + + // Upload parts + const uploadPromises = partNumbers.map(async partNumber => { + const uploadPartParams = { Bucket: bucket, - Key: object - }, (err, data) => { - checkNoError(err); - this.currentTest.UploadId = data.UploadId; - return next(); - }), - next => async.mapSeries(partNumbers, (partNumber, callback) => { - const uploadPartParams = { - Bucket: bucket, - Key: object, + Key: object, + PartNumber: partNumber + 1, + UploadId: uploadId, + Body: generateContent(partNumber + 1), + }; + const result = await s3.send(new UploadPartCommand(uploadPartParams)); + return result.ETag; + }); + + ETags = await Promise.all(uploadPromises); + + // Put empty object + await s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: emptyObject, + Body: '', + })); + + // Put non-MPU object + await s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: nonMpuObject, + Body: generateContent(0), + })); + + // Complete multipart upload + const completeParams = { + Bucket: bucket, + Key: object, + MultipartUpload: { + Parts: partNumbers.map(partNumber => ({ + ETag: ETags[partNumber], PartNumber: partNumber + 1, - UploadId: this.currentTest.UploadId, - Body: generateContent(partNumber + 1), - }; - - return s3.uploadPart(uploadPartParams, - (err, data) => { - if (err) { - return callback(err); - } - return callback(null, data.ETag); - }); - }, (err, results) => { - checkNoError(err); - ETags = results; - return next(); - }), - next => { - const params = { - Bucket: bucket, - Key: object, - MultipartUpload: { - Parts: partNumbers.map(partNumber => ({ - ETag: ETags[partNumber], - PartNumber: partNumber + 1, - })), - }, - UploadId: this.currentTest.UploadId, - }; - return s3.completeMultipartUpload(params, next); + })), }, - next => s3.putObject({ - Bucket: bucket, - Key: emptyObject, - Body: '', - }, next), - next => s3.putObject({ - Bucket: bucket, - Key: nonMpuObject, - Body: generateContent(0), - }, next), - ], err => { - checkNoError(err); - done(); - }); + UploadId: uploadId, + }; + await s3.send(new CompleteMultipartUploadCommand(completeParams)); }); - after(done => { - async.series([ - next => s3.deleteObject({ Bucket: bucket, Key: object }, next), - next => s3.deleteObject({ Bucket: bucket, Key: emptyObject }, next), - next => s3.deleteObject({ Bucket: bucket, Key: nonMpuObject }, next), - next => s3.deleteBucket({ Bucket: bucket }, next), - ], done); + after(async () => { + await s3.send(new DeleteObjectCommand({ + Bucket: bucket, + Key: object + })); + await s3.send(new DeleteObjectCommand({ + Bucket: bucket, + Key: emptyObject + })); + await s3.send(new DeleteObjectCommand({ + Bucket: bucket, + Key: nonMpuObject + })); + await s3.send(new DeleteBucketCommand({ + Bucket: bucket + })); }); it('should return the total size of the object ' + @@ -154,23 +159,20 @@ describe('Part size tests with object head', () => { invalidPartNumbers.forEach(part => { it(`should return an error when --part-number is set to ${part}`, done => { - headObject({ PartNumber: part }, (err, data) => { - checkError(err, 400, 'BadRequest'); - assert.strictEqual(data, null); + headObject({ PartNumber: part }, err => { + assert.equal(err.$metadata.httpStatusCode, 400); done(); }); }); }); - it('should return an error when incorrect --part-number is used', - done => { - headObject({ PartNumber: partNumbers.length + 1 }, - (err, data) => { - checkError(err, 416, 416); - assert.strictEqual(data, null); - done(); - }); + it('should return an error when incorrect --part-number is used', done => { + headObject({ PartNumber: partNumbers.length + 1 }, + err => { + checkError(err, '', 416); + done(); }); + }); it('should return content-length 0 when requesting part 1 of empty object', done => { headObject({ Key: emptyObject, PartNumber: 1 }, (err, data) => { @@ -181,12 +183,11 @@ describe('Part size tests with object head', () => { }); it('should return an error when requesting part 2 of empty object', done => { - headObject({ Key: emptyObject, PartNumber: 2 }, (err, data) => { - checkError(err, 416, 416); - assert.strictEqual(data, null); + headObject({ Key: emptyObject, PartNumber: 2 }, err => { + checkError(err, '', 416); done(); }); - }); + }); it('should return content-length requesting part 1 of non-MPU object', done => { headObject({ Key: nonMpuObject, PartNumber: 1 }, (err, data) => { @@ -198,8 +199,8 @@ describe('Part size tests with object head', () => { it('should return an error when requesting part 2 of non-MPU object', done => { headObject({ Key: nonMpuObject, PartNumber: 2 }, (err, data) => { - checkError(err, 416, 416); - assert.strictEqual(data, null); + checkError(err, '', 416); + assert.strictEqual(data, undefined); done(); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/getRange.js b/tests/functional/aws-node-sdk/test/object/getRange.js index d95cb19b19..236d0e7618 100644 --- a/tests/functional/aws-node-sdk/test/object/getRange.js +++ b/tests/functional/aws-node-sdk/test/object/getRange.js @@ -1,4 +1,9 @@ const assert = require('assert'); +const { + GetObjectCommand, + CreateBucketCommand, + PutObjectCommand +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -13,49 +18,45 @@ const endRangeTest = (inputRange, expectedRange, cb) => { Range: inputRange, }; - s3.getObject(params, (err, data) => { - assert.strictEqual(data.ContentLength, 90); - assert.strictEqual(data.ContentRange, expectedRange); - assert.deepStrictEqual(data.Body, Buffer.allocUnsafe(90).fill(1)); - cb(); - }); + s3.send(new GetObjectCommand(params)) + .then(async data => { + assert.strictEqual(data.ContentLength, 90); + assert.strictEqual(data.ContentRange, expectedRange); + const chunks = []; + for await (const chunk of data.Body) { + chunks.push(chunk); + } + const bodyBuffer = Buffer.concat(chunks); + const expectedBuffer = Buffer.allocUnsafe(90).fill(1); + assert.deepStrictEqual(bodyBuffer, expectedBuffer); + cb(); + }) + .catch(err => { + cb(err); + }); }; describe('aws-node-sdk range test of large end position', () => { withV4(sigCfg => { let bucketUtil; - beforeEach(() => { + beforeEach(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucketName }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }).then(() => - s3.putObject({ - Bucket: bucketName, - Key: objName, - Body: Buffer.allocUnsafe(2890).fill(0, 0, 2800) - .fill(1, 2800), - }).promise()) - .catch(err => { - process.stdout.write(`Error in beforeEach: ${err}\n`); - throw err; - }); + await s3.send(new CreateBucketCommand({ Bucket: bucketName })); + await s3.send(new PutObjectCommand({ + Bucket: bucketName, + Key: objName, + Body: Buffer.allocUnsafe(2890).fill(0, 0, 2800) + .fill(1, 2800), + })); }); - afterEach(() => { + afterEach(async () => { process.stdout.write('Emptying bucket'); - return bucketUtil.empty(bucketName) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(bucketName); - }) - .catch(err => { - process.stdout.write(`Error in afterEach: ${err}\n`); - throw err; - }); + await bucketUtil.empty(bucketName); + process.stdout.write('Deleting bucket'); + await bucketUtil.deleteOne(bucketName); }); it('should get the final 90 bytes of a 2890 byte object for a byte ' + diff --git a/tests/functional/aws-node-sdk/test/object/getRetention.js b/tests/functional/aws-node-sdk/test/object/getRetention.js index 21e60e0b0e..74203ab5f9 100644 --- a/tests/functional/aws-node-sdk/test/object/getRetention.js +++ b/tests/functional/aws-node-sdk/test/object/getRetention.js @@ -1,6 +1,13 @@ const { promisify } = require('util'); const assert = require('assert'); const moment = require('moment'); +const { + CreateBucketCommand, + PutObjectCommand, + PutObjectRetentionCommand, + GetObjectRetentionCommand, + DeleteObjectCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -17,19 +24,12 @@ const retainDate = moment().add(1, 'days').toISOString(); const retentionConfig = { Mode: 'GOVERNANCE', - RetainUntilDate: retainDate, + RetainUntilDate: new Date(retainDate), }; -// aws sdk manipulates dates by removing milliseconds -// and converting date strings to date objects -function manipulateDate() { - const noMillis = `${retainDate.slice(0, 19)}.000Z`; - return new Date(noMillis); -} - const expectedConfig = { Mode: 'GOVERNANCE', - RetainUntilDate: manipulateDate(), + RetainUntilDate: new Date(retainDate), }; const isCEPH = process.env.CI_CEPH !== undefined; @@ -43,125 +43,121 @@ describeSkipIfCeph('GET object retention', () => { const otherAccountS3 = otherAccountBucketUtility.s3; let versionId; - beforeEach(() => { + beforeEach(async () => { process.stdout.write('Putting buckets and objects\n'); - return s3.createBucket({ + await s3.send(new CreateBucketCommand({ Bucket: bucketName, ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.createBucket({ Bucket: unlockedBucket }).promise()) - .then(() => s3.putObject({ Bucket: unlockedBucket, Key: objectName }).promise()) - .then(() => s3.putObject({ Bucket: bucketName, Key: noRetentionObject }).promise()) - .then(() => s3.putObject({ Bucket: bucketName, Key: objectName }).promise()) - .then(res => { - versionId = res.VersionId; - process.stdout.write('Putting object retention\n'); - return s3.putObjectRetention({ - Bucket: bucketName, - Key: objectName, - Retention: retentionConfig, - }).promise(); - }) - .catch(err => { - process.stdout.write('Error in beforeEach\n'); - throw err; - }); + })); + await s3.send(new CreateBucketCommand({ Bucket: unlockedBucket })); + await s3.send(new PutObjectCommand({ Bucket: unlockedBucket, Key: objectName })); + await s3.send(new PutObjectCommand({ Bucket: bucketName, Key: noRetentionObject })); + + const res = await s3.send(new PutObjectCommand({ Bucket: bucketName, Key: objectName })); + versionId = res.VersionId; + + process.stdout.write('Putting object retention\n'); + await s3.send(new PutObjectRetentionCommand({ + Bucket: bucketName, + Key: objectName, + Retention: retentionConfig, + })); }); - afterEach(() => { - process.stdout.write('Removing object lock\n'); - return changeLockPromise([{ bucket: bucketName, key: objectName, versionId }], '') - .then(() => { - process.stdout.write('Emptying and deleting buckets\n'); - return bucketUtil.empty(bucketName); - }) - .then(() => bucketUtil.empty(unlockedBucket)) - .then(() => bucketUtil.deleteMany([bucketName, unlockedBucket])) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + afterEach(async () => { + await changeLockPromise([{ bucket: bucketName, key: objectName, versionId }], ''); + await bucketUtil.empty(bucketName); + await bucketUtil.empty(unlockedBucket); + await bucketUtil.deleteMany([bucketName, unlockedBucket]); }); it('should return AccessDenied putting retention with another account', - done => { - otherAccountS3.getObjectRetention({ - Bucket: bucketName, - Key: objectName, - }, err => { + async () => { + try { + await otherAccountS3.send(new GetObjectRetentionCommand({ + Bucket: bucketName, + Key: objectName, + })); + throw new Error('Expected AccessDenied error'); + } catch (err) { checkError(err, 'AccessDenied', 403); - done(); - }); + } }); - it('should return NoSuchKey error if key does not exist', done => { - s3.getObjectRetention({ - Bucket: bucketName, - Key: 'thiskeydoesnotexist', - }, err => { + it('should return NoSuchKey error if key does not exist', async () => { + try { + await s3.send(new GetObjectRetentionCommand({ + Bucket: bucketName, + Key: 'thiskeydoesnotexist', + })); + throw new Error('Expected NoSuchKey error'); + } catch (err) { checkError(err, 'NoSuchKey', 404); - done(); - }); + } }); - it('should return NoSuchVersion error if version does not exist', done => { - s3.getObjectRetention({ - Bucket: bucketName, - Key: objectName, - VersionId: '012345678901234567890123456789012', - }, err => { + it('should return NoSuchVersion error if version does not exist', async () => { + try { + await s3.send(new GetObjectRetentionCommand({ + Bucket: bucketName, + Key: objectName, + VersionId: '012345678901234567890123456789012', + })); + throw new Error('Expected NoSuchVersion error'); + } catch (err) { checkError(err, 'NoSuchVersion', 404); - done(); - }); + } }); it('should return MethodNotAllowed if object version is delete marker', - done => { - s3.deleteObject({ Bucket: bucketName, Key: objectName }, (err, res) => { - assert.ifError(err); - s3.getObjectRetention({ + async () => { + const res = await s3.send(new DeleteObjectCommand({ Bucket: bucketName, Key: objectName })); + try { + await s3.send(new GetObjectRetentionCommand({ Bucket: bucketName, Key: objectName, VersionId: res.VersionId, - }, err => { - checkError(err, 'MethodNotAllowed', 405); - done(); - }); - }); + })); + throw new Error('Expected MethodNotAllowed error'); + } catch (err) { + checkError(err, 'MethodNotAllowed', 405); + } }); it('should return InvalidRequest error getting retention to object ' + - 'in bucket with no object lock enabled', done => { - s3.getObjectRetention({ - Bucket: unlockedBucket, - Key: objectName, - }, err => { + 'in bucket with no object lock enabled', async () => { + try { + await s3.send(new GetObjectRetentionCommand({ + Bucket: unlockedBucket, + Key: objectName, + })); + throw new Error('Expected InvalidRequest error'); + } catch (err) { checkError(err, 'InvalidRequest', 400); - done(); - }); + } }); it('should return NoSuchObjectLockConfiguration if no retention set', - done => { - s3.getObjectRetention({ - Bucket: bucketName, - Key: noRetentionObject, - }, err => { + async () => { + try { + await s3.send(new GetObjectRetentionCommand({ + Bucket: bucketName, + Key: noRetentionObject, + })); + throw new Error('Expected NoSuchObjectLockConfiguration error'); + } catch (err) { checkError(err, 'NoSuchObjectLockConfiguration', 404); - done(); - }); + } }); - it('should get object retention', done => { - s3.getObjectRetention({ + it('should get object retention', async () => { + const res = await s3.send(new GetObjectRetentionCommand({ Bucket: bucketName, Key: objectName, - }, (err, res) => { - assert.ifError(err); - assert.deepStrictEqual(res.Retention, expectedConfig); - changeObjectLock([ - { bucket: bucketName, key: objectName, versionId }], '', done); - }); + })); + assert.deepStrictEqual(res.Retention, expectedConfig); + await changeLockPromise([ + { bucket: bucketName, key: objectName, versionId }], ''); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/initiateMPU.js b/tests/functional/aws-node-sdk/test/object/initiateMPU.js index 823ccd1fc2..eb6416973a 100644 --- a/tests/functional/aws-node-sdk/test/object/initiateMPU.js +++ b/tests/functional/aws-node-sdk/test/object/initiateMPU.js @@ -1,5 +1,10 @@ const assert = require('assert'); -const async = require('async'); +const { + CreateBucketCommand, + CreateMultipartUploadCommand, + AbortMultipartUploadCommand, + PutObjectCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -15,190 +20,209 @@ describe('Initiate MPU', () => { let bucketUtil; let s3; - beforeEach(() => { + beforeEach(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + await s3.send(new CreateBucketCommand({ Bucket: bucket })); }); - afterEach(() => bucketUtil.deleteOne(bucket)); + afterEach(async () => await bucketUtil.deleteOne(bucket)); it('should return InvalidRedirectLocation if initiate MPU ' + 'with x-amz-website-redirect-location header that does not start ' + - 'with \'http://\', \'https://\' or \'/\'', done => { - const params = { Bucket: bucket, Key: key, - WebsiteRedirectLocation: 'google.com' }; - s3.createMultipartUpload(params, err => { - assert.strictEqual(err.code, 'InvalidRedirectLocation'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + 'with \'http://\', \'https://\' or \'/\'', async () => { + const params = { + Bucket: bucket, + Key: key, + WebsiteRedirectLocation: 'google.com' + }; + + try { + await s3.send(new CreateMultipartUploadCommand(params)); + throw new Error('Expected InvalidRedirectLocation error'); + } catch (err) { + assert.strictEqual(err.name, 'InvalidRedirectLocation'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); + } }); it('should return InvalidStorageClass error when x-amz-storage-class header is provided ' + - 'and not equal to STANDARD', done => - s3.createMultipartUpload({ + 'and not equal to STANDARD', done => { + s3.send(new CreateMultipartUploadCommand({ Bucket: bucket, Key: key, StorageClass: 'COLD', - }, err => { - assert.strictEqual(err.code, 'InvalidStorageClass'); - assert.strictEqual(err.statusCode, 400); + })).then(() => { + throw new Error('Expected InvalidStorageClass error'); + }).catch(err => { + assert.strictEqual(err.name, 'InvalidStorageClass'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); done(); - }) - ); - - it('should return KeyTooLong error when key is longer than 915 bytes', done => - s3.createMultipartUpload({ Bucket: bucket, Key: 'a'.repeat(916) }, err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'KeyTooLong'); - assert.strictEqual(err.statusCode, 400); + }); + }); + + it('should return KeyTooLong error when key is longer than 915 bytes', done => { + s3.send(new CreateMultipartUploadCommand({ Bucket: bucket, Key: 'a'.repeat(916) })) + .catch(err => { + assert.strictEqual(err.name, 'KeyTooLong'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); done(); - }) - ); + }); + }); it('should return error if initiating MPU w/ > 2KB user-defined md', - done => { + async () => { const metadata = genMaxSizeMetaHeaders(); const params = { Bucket: bucket, Key: key, Metadata: metadata }; - async.waterfall([ - next => s3.createMultipartUpload(params, (err, data) => { - assert.strictEqual(err, null, `Unexpected err: ${err}`); - next(null, data.UploadId); - }), - (uploadId, next) => s3.abortMultipartUpload({ + const data = await s3.send(new CreateMultipartUploadCommand(params)); + const uploadId = data.UploadId; + await s3.send(new AbortMultipartUploadCommand({ + Bucket: bucket, + Key: key, + UploadId: uploadId, + })); + metadata.header0 = `${metadata.header0}${'0'}`; + try { + await s3.send(new CreateMultipartUploadCommand(params)); + throw new Error('Expected MetadataTooLarge error'); + } catch (err) { + assert.strictEqual(err.name, 'MetadataTooLarge'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); + } + }); + + it('should return error if initiating MPU w/ > 2KB user-defined md', + async () => { + const metadata = genMaxSizeMetaHeaders(); + const params = { Bucket: bucket, Key: key, Metadata: metadata }; + const data = await s3.send(new CreateMultipartUploadCommand(params)); + const uploadId = data.UploadId; + await s3.send(new AbortMultipartUploadCommand({ Bucket: bucket, Key: key, UploadId: uploadId, - }, err => { - assert.strictEqual(err, null, `Unexpected err: ${err}`); - // add one more byte to push over limit for next call - metadata.header0 = `${metadata.header0}${'0'}`; - next(); - }), - next => s3.createMultipartUpload(params, next), - ], err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'MetadataTooLarge'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + })); + metadata.header0 = `${metadata.header0}${'0'}`; + try { + await s3.send(new CreateMultipartUploadCommand(params)); + throw new Error('Expected MetadataTooLarge error'); + } catch (err) { + assert.strictEqual(err.name, 'MetadataTooLarge'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); + } }); describe('with tag set', () => { it('should be able to put object with 10 tags', - done => { + async () => { const taggingConfig = generateMultipleTagQuery(10); - s3.createMultipartUpload({ + await s3.send(new CreateMultipartUploadCommand({ Bucket: bucket, Key: key, Tagging: taggingConfig, - }, err => { - assert.ifError(err); - done(); - }); + })); }); - it('should allow putting 50 tags', done => { + it('should allow putting 50 tags', async () => { const taggingConfig = generateMultipleTagQuery(50); - s3.createMultipartUpload({ + await s3.send(new CreateMultipartUploadCommand({ Bucket: bucket, Key: key, Tagging: taggingConfig, - }, err => { - assert.ifError(err); - done(); - }); + })); }); it('should return BadRequest if putting more that 50 tags', - done => { + async () => { const taggingConfig = generateMultipleTagQuery(51); - s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - Tagging: taggingConfig, - }, err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'BadRequest'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + + try { + await s3.send(new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: key, + Tagging: taggingConfig, + })); + throw new Error('Expected BadRequest error'); + } catch (err) { + assert.strictEqual(err.name, 'BadRequest'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); + } }); it('should return InvalidArgument creating mpu tag with ' + - 'invalid characters: %', done => { + 'invalid characters: %', async () => { const value = 'value1%'; - s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - Tagging: `key1=${value}`, - }, err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + + try { + await s3.send(new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: key, + Tagging: `key1=${value}`, + })); + throw new Error('Expected InvalidArgument error'); + } catch (err) { + assert.strictEqual(err.name, 'InvalidArgument'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); + } }); it('should return InvalidArgument creating mpu with ' + - 'bad encoded tags', done => { - s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - Tagging: 'key1==value1', - }, err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + 'bad encoded tags', async () => { + try { + await s3.send(new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: key, + Tagging: 'key1==value1', + })); + throw new Error('Expected InvalidArgument error'); + } catch (err) { + assert.strictEqual(err.name, 'InvalidArgument'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); + } }); - it('should return InvalidArgument if tag with no key', done => { - s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - Tagging: '=value1', - }, err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + it('should return InvalidArgument if tag with no key', async () => { + try { + await s3.send(new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: key, + Tagging: '=value1', + })); + throw new Error('Expected InvalidArgument error'); + } catch (err) { + assert.strictEqual(err.name, 'InvalidArgument'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); + } }); it('should return InvalidArgument if using the same key twice', - done => { - s3.createMultipartUpload({ - Bucket: bucket, - Key: key, - Tagging: 'key1=value1&key1=value2', - }, err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + async () => { + try { + await s3.send(new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: key, + Tagging: 'key1=value1&key1=value2', + })); + throw new Error('Expected InvalidArgument error'); + } catch (err) { + assert.strictEqual(err.name, 'InvalidArgument'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); + } }); it('should return InvalidArgument if using the same key twice ' + - 'and empty tags', done => { - s3.putObject({ - Bucket: bucket, - Key: key, - Tagging: '&&&&&&&&&&&&&&&&&key1=value1&key1=value2', - }, - err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + 'and empty tags', async () => { + try { + await s3.send(new PutObjectCommand({ + Bucket: bucket, + Key: key, + Tagging: '&&&&&&&&&&&&&&&&&key1=value1&key1=value2', + })); + throw new Error('Expected InvalidArgument error'); + } catch (err) { + assert.strictEqual(err.name, 'InvalidArgument'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); + } }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/listParts.js b/tests/functional/aws-node-sdk/test/object/listParts.js index 8ed7474224..f670b33eb8 100644 --- a/tests/functional/aws-node-sdk/test/object/listParts.js +++ b/tests/functional/aws-node-sdk/test/object/listParts.js @@ -1,4 +1,11 @@ const assert = require('assert'); +const { + CreateBucketCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + ListPartsCommand, + AbortMultipartUploadCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -8,72 +15,48 @@ const key = 'key'; const bodyFirstPart = Buffer.allocUnsafe(10).fill(0); const bodySecondPart = Buffer.allocUnsafe(20).fill(0); -function checkNoError(err) { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); -} - describe('List parts', () => { withV4(sigCfg => { - let bucketUtil; - let s3; + const bucketUtil = new BucketUtility('default', sigCfg); + const s3 = bucketUtil.s3; let uploadId; let secondEtag; - beforeEach(() => { - bucketUtil = new BucketUtility('default', sigCfg); - s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .then(() => s3.createMultipartUpload({ - Bucket: bucket, Key: key }).promise()) - .then(res => { - uploadId = res.UploadId; - return s3.uploadPart({ Bucket: bucket, Key: key, - PartNumber: 1, UploadId: uploadId, Body: bodyFirstPart, - }).promise(); - }).then(() => s3.uploadPart({ + beforeEach(async () => { + await s3.send(new CreateBucketCommand({ Bucket: bucket })); + const res = await s3.send(new CreateMultipartUploadCommand({ + Bucket: bucket, Key: key })); + uploadId = res.UploadId; + await s3.send(new UploadPartCommand({ Bucket: bucket, Key: key, + PartNumber: 1, UploadId: uploadId, Body: bodyFirstPart, + })); + const secondRes = await s3.send(new UploadPartCommand({ Bucket: bucket, Key: key, PartNumber: 2, UploadId: uploadId, Body: bodySecondPart, - }).promise()).then(res => { - secondEtag = res.ETag; - return secondEtag; - }) - .catch(err => { - process.stdout.write(`Error in beforeEach: ${err}\n`); - throw err; - }); + })); + secondEtag = secondRes.ETag; }); - afterEach(() => { + afterEach(async () => { process.stdout.write('Emptying bucket'); - return s3.abortMultipartUpload({ + await s3.send(new AbortMultipartUploadCommand({ Bucket: bucket, Key: key, UploadId: uploadId, - }).promise() - .then(() => bucketUtil.empty(bucket)) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + })); + await bucketUtil.empty(bucket); + process.stdout.write('Deleting bucket'); + await bucketUtil.deleteOne(bucket); }); - it('should only list the second part', done => { - s3.listParts({ - Bucket: bucket, - Key: key, - PartNumberMarker: 1, - UploadId: uploadId }, - (err, data) => { - checkNoError(err); - assert.strictEqual(data.Parts[0].PartNumber, 2); - assert.strictEqual(data.Parts[0].Size, 20); - assert.strictEqual(`${data.Parts[0].ETag}`, secondEtag); - done(); - }); - }); + it('should only list the second part', () => s3.send(new ListPartsCommand({ + Bucket: bucket, + Key: key, + PartNumberMarker: '1', + UploadId: uploadId, + })).then(data => { + assert.strictEqual(data.Parts[0].PartNumber, 2); + assert.strictEqual(data.Parts[0].Size, 20); + assert.strictEqual(`${data.Parts[0].ETag}`, secondEtag); + })); }); }); @@ -81,13 +64,13 @@ describe('List parts', () => { function createPart(sigCfg, bucketUtil, s3, key) { let uploadId; - return s3.createBucket({ Bucket: bucket }).promise() - .then(() => s3.createMultipartUpload({ - Bucket: bucket, Key: key }).promise()) + return s3.send(new CreateBucketCommand({ Bucket: bucket })) + .then(() => s3.send(new CreateMultipartUploadCommand({ + Bucket: bucket, Key: key }))) .then(res => { uploadId = res.UploadId; - return s3.uploadPart({ Bucket: bucket, Key: key, - PartNumber: 1, UploadId: uploadId, Body: bodyFirstPart }).promise(); + return s3.send(new UploadPartCommand({ Bucket: bucket, Key: key, + PartNumber: 1, UploadId: uploadId, Body: bodyFirstPart })); }) .then(() => Promise.resolve(uploadId)); } @@ -95,9 +78,9 @@ function createPart(sigCfg, bucketUtil, s3, key) { function deletePart(s3, bucketUtil, key, uploadId) { process.stdout.write('Emptying bucket'); - return s3.abortMultipartUpload({ + return s3.send(new AbortMultipartUploadCommand({ Bucket: bucket, Key: key, UploadId: uploadId, - }).promise() + })) .then(() => bucketUtil.empty(bucket)) .then(() => { process.stdout.write('Deleting bucket'); @@ -105,16 +88,14 @@ function deletePart(s3, bucketUtil, key, uploadId) { }); } -function testFunc(s3, bucket, key, uploadId, cb) { - s3.listParts({ - Bucket: bucket, - Key: key, - UploadId: uploadId }, - (err, data) => { - checkNoError(err); - assert.strictEqual(data.Key, key); - cb(); - }); +function testFunc(s3, bucket, key, uploadId) { + return s3.send(new ListPartsCommand({ + Bucket: bucket, + Key: key, + UploadId: uploadId, + })).then(data => { + assert.strictEqual(data.Key, key); + }); } describe('List parts - object keys with special characters: `&`', () => { @@ -135,7 +116,7 @@ describe('List parts - object keys with special characters: `&`', () => { afterEach(() => deletePart(s3, bucketUtil, key, uploadId)); it('should list parts of an object with `&` in its key', - done => testFunc(s3, bucket, key, uploadId, done)); + () => testFunc(s3, bucket, key, uploadId)); }); }); @@ -157,7 +138,7 @@ describe('List parts - object keys with special characters: `"`', () => { afterEach(() => deletePart(s3, bucketUtil, key, uploadId)); it('should list parts of an object with `"` in its key', - done => testFunc(s3, bucket, key, uploadId, done)); + () => testFunc(s3, bucket, key, uploadId)); }); }); @@ -179,7 +160,7 @@ describe('List parts - object keys with special characters: `\'`', () => { afterEach(() => deletePart(s3, bucketUtil, key, uploadId)); it('should list parts of an object with `\'` in its key', - done => testFunc(s3, bucket, key, uploadId, done)); + () => testFunc(s3, bucket, key, uploadId)); }); }); @@ -201,7 +182,7 @@ describe('List parts - object keys with special characters: `<`', () => { afterEach(() => deletePart(s3, bucketUtil, key, uploadId)); it('should list parts of an object with `<` in its key', - done => testFunc(s3, bucket, key, uploadId, done)); + () => testFunc(s3, bucket, key, uploadId)); }); }); @@ -223,6 +204,6 @@ describe('List parts - object keys with special characters: `>`', () => { afterEach(() => deletePart(s3, bucketUtil, key, uploadId)); it('should list parts of an object with `>` in its key', - done => testFunc(s3, bucket, key, uploadId, done)); + () => testFunc(s3, bucket, key, uploadId)); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/mpu.js b/tests/functional/aws-node-sdk/test/object/mpu.js index 9b256ef7d7..58eaebd8d3 100644 --- a/tests/functional/aws-node-sdk/test/object/mpu.js +++ b/tests/functional/aws-node-sdk/test/object/mpu.js @@ -1,4 +1,11 @@ + const assert = require('assert'); +const { + CreateBucketCommand, + CreateMultipartUploadCommand, + AbortMultipartUploadCommand, + ListMultipartUploadsCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -20,8 +27,6 @@ function getExpectedObj(res, data) { UploadIdMarker: '', MaxUploads: 0, IsTruncated: false, - Uploads: [], - CommonPrefixes: [], }; } @@ -53,7 +58,6 @@ function getExpectedObj(res, data) { ID: userId, }, }], - CommonPrefixes: [], }; // If no `prefixVal` is given, it should not be included in the response. @@ -81,63 +85,60 @@ describe('aws-node-sdk test suite of listMultipartUploads', () => let s3; const data = {}; - beforeEach(() => { + beforeEach(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - - return s3.createBucket({ Bucket: bucket }).promise() - .then(() => bucketUtil.getOwner()) - .then(res => { - // The owner of the bucket will also be the MPU upload owner. - data.displayName = res.DisplayName; - data.userId = res.ID; - }) - .then(() => s3.createMultipartUpload({ + await s3.send(new CreateBucketCommand({ Bucket: bucket })); + const ownerRes = await bucketUtil.getOwner(); + // The owner of the bucket will also be the MPU upload owner. + data.displayName = ownerRes.DisplayName; + data.userId = ownerRes.ID; + + const mpuRes = await s3.send(new CreateMultipartUploadCommand({ Bucket: bucket, Key: objectKey, - }).promise()) - .then(res => { - data.uploadId = res.UploadId; - }); + })); + data.uploadId = mpuRes.UploadId; }); - afterEach(() => - s3.abortMultipartUpload({ + afterEach(async () => { + await s3.send(new AbortMultipartUploadCommand({ Bucket: bucket, Key: objectKey, UploadId: data.uploadId, - }).promise() - .then(() => bucketUtil.empty(bucket)) - .then(() => bucketUtil.deleteOne(bucket)) - ); + })); + await bucketUtil.empty(bucket); + await bucketUtil.deleteOne(bucket); + }); - it('should list ongoing multipart uploads', () => - s3.listMultipartUploads({ Bucket: bucket }).promise() - .then(res => checkValues(res, data)) - ); + it('should list ongoing multipart uploads', async () => { + // eslint-disable-next-line no-unused-vars + const { $metadata, ...res } = await s3.send(new ListMultipartUploadsCommand({ Bucket: bucket })); + checkValues(res, data); + }); - it('should list ongoing multipart uploads with params', () => { + it('should list ongoing multipart uploads with params', async () => { data.prefixVal = 'to'; data.delimiter = 'test-delimiter'; data.maxUploads = 1; - - return s3.listMultipartUploads({ + // eslint-disable-next-line no-unused-vars + const {$metadata, ...res } = await s3.send(new ListMultipartUploadsCommand({ Bucket: bucket, Prefix: 'to', Delimiter: 'test-delimiter', MaxUploads: 1, - }).promise() - .then(res => checkValues(res, data)); + })); + checkValues(res, data); }); - it('should list 0 multipart uploads when MaxUploads is 0', () => { + it('should list 0 multipart uploads when MaxUploads is 0', async () => { data.maxUploads = 0; - - return s3.listMultipartUploads({ + // eslint-disable-next-line no-unused-vars + const { $metadata , ...res } = await s3.send(new ListMultipartUploadsCommand({ Bucket: bucket, MaxUploads: 0, - }).promise() - .then(res => checkValues(res, data)); + })); + checkValues(res, data); }); }) ); diff --git a/tests/functional/aws-node-sdk/test/object/mpuOrder.js b/tests/functional/aws-node-sdk/test/object/mpuOrder.js index a80dfe3a60..7ef4ef3d66 100644 --- a/tests/functional/aws-node-sdk/test/object/mpuOrder.js +++ b/tests/functional/aws-node-sdk/test/object/mpuOrder.js @@ -1,5 +1,13 @@ const assert = require('assert'); -const async = require('async'); +const { + CreateBucketCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + CompleteMultipartUploadCommand, + AbortMultipartUploadCommand, + DeleteObjectCommand, + DeleteBucketCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -8,13 +16,8 @@ const bucket = 'bucketlistparts'; const object = 'toto'; function checkError(err, statusCode, code) { - assert.strictEqual(err.statusCode, statusCode); - assert.strictEqual(err.code, code); -} - -function checkNoError(err) { - assert.equal(err, null, - `Expected success, got error ${JSON.stringify(err)}`); + assert.strictEqual(err.$metadata.httpStatusCode, statusCode); + assert.strictEqual(err.Code, code); } const body = Buffer.alloc(1024 * 1024 * 5, 'a'); @@ -34,54 +37,49 @@ describe('More MPU tests', () => { let bucketUtil; let s3; - beforeEach(function beforeEachF(done) { + beforeEach(async function beforeEachF() { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - async.waterfall([ - next => s3.createBucket({ Bucket: bucket }, err => next(err)), - next => s3.createMultipartUpload({ Bucket: bucket, - Key: object }, (err, data) => { - checkNoError(err); - this.currentTest.UploadId = data.UploadId; - return next(); - }), - next => s3.uploadPart({ - Bucket: bucket, - Key: object, - PartNumber: 1000, - Body: body, - UploadId: this.currentTest.UploadId }, (err, data) => { - checkNoError(err); - this.currentTest.Etag = data.ETag; - return next(); - }), - next => s3.uploadPart({ - Bucket: bucket, - Key: object, - PartNumber: 3, - Body: body, - UploadId: this.currentTest.UploadId }, err => next(err)), - next => s3.uploadPart({ - Bucket: bucket, - Key: object, - PartNumber: 8, - Body: body, - UploadId: this.currentTest.UploadId }, err => next(err)), - ], done); + await s3.send(new CreateBucketCommand({ Bucket: bucket })); + const mpuRes = await s3.send(new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: object + })); + this.currentTest.UploadId = mpuRes.UploadId; + const part1000Res = await s3.send(new UploadPartCommand({ + Bucket: bucket, + Key: object, + PartNumber: 1000, + Body: body, + UploadId: this.currentTest.UploadId + })); + this.currentTest.Etag = part1000Res.ETag; + await s3.send(new UploadPartCommand({ + Bucket: bucket, + Key: object, + PartNumber: 3, + Body: body, + UploadId: this.currentTest.UploadId + })); + await s3.send(new UploadPartCommand({ + Bucket: bucket, + Key: object, + PartNumber: 8, + Body: body, + UploadId: this.currentTest.UploadId + })); }); - afterEach(done => { - async.waterfall([ - next => s3.deleteObject({ Bucket: bucket, Key: object }, - err => next(err)), - next => s3.deleteBucket({ Bucket: bucket }, err => next(err)), - ], done); + afterEach(async () => { + await s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: object })); + await s3.send(new DeleteBucketCommand({ Bucket: bucket })); }); + testsOrder.forEach(testOrder => { it('should complete MPU by concatenating the parts in ' + - `the following order: ${testOrder.values}`, function itF(done) { - async.waterfall([ - next => s3.completeMultipartUpload({ + `the following order: ${testOrder.values}`, async function itF() { + try { + await s3.send(new CompleteMultipartUploadCommand({ Bucket: bucket, Key: object, MultipartUpload: { @@ -100,19 +98,24 @@ describe('More MPU tests', () => { }, ], }, - UploadId: this.test.UploadId }, next), - ], err => { + UploadId: this.test.UploadId + })); + + if (testOrder.err) { + throw new Error('Expected InvalidPartOrder error but operation succeeded'); + } + } catch (err) { if (testOrder.err) { checkError(err, 400, 'InvalidPartOrder'); - return s3.abortMultipartUpload({ + await s3.send(new AbortMultipartUploadCommand({ Bucket: bucket, Key: object, UploadId: this.test.UploadId, - }, done); + })); + } else { + throw err; } - checkNoError(err); - return done(); - }); + } }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/mpuVersion.js b/tests/functional/aws-node-sdk/test/object/mpuVersion.js index 0a89eb7e32..7489b99db8 100644 --- a/tests/functional/aws-node-sdk/test/object/mpuVersion.js +++ b/tests/functional/aws-node-sdk/test/object/mpuVersion.js @@ -1,5 +1,21 @@ const assert = require('assert'); -const async = require('async'); +const { isDeepStrictEqual, promisify } = require('util'); + +const { + CreateBucketCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + CompleteMultipartUploadCommand, + PutObjectCommand, + PutBucketVersioningCommand, + DeleteObjectCommand, + ListObjectsCommand, + HeadObjectCommand, + GetObjectCommand, + PutObjectAclCommand, + PutObjectTaggingCommand, + PutObjectLegalHoldCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -25,62 +41,79 @@ const archive = { restoreRequestedDays: 5, }; -function putMPUVersion(s3, bucketName, objectName, vId, cb) { - async.waterfall([ - next => { - const params = { Bucket: bucketName, Key: objectName }; - const request = s3.createMultipartUpload(params); - if (vId !== undefined) { - request.on('build', () => { - request.httpRequest.headers['x-scal-s3-version-id'] = vId; - }); - } - return request.send(next); - }, - (resCreation, next) => { - const uploadId = resCreation.UploadId; - const params = { - Body: 'okok', - Bucket: bucketName, - Key: objectName, - PartNumber: 1, - UploadId: uploadId, - }; - const request = s3.uploadPart(params); - if (vId !== undefined) { - request.on('build', () => { - request.httpRequest.headers['x-scal-s3-version-id'] = vId; - }); - } - return request.send((err, res) => next(err, res, uploadId)); - }, - (res, uploadId, next) => { - const params = { - Bucket: bucketName, - Key: objectName, - MultipartUpload: { - Parts: [ - { - ETag: res.ETag, - PartNumber: 1 - }, - ] +const fakeMetadataArchivePromise = promisify(fakeMetadataArchive); + +const getMetadataPromise = promisify(getMetadata); + +const metadataListObjectPromise = promisify(metadata.listObject.bind(metadata)); + +const metadataPutObjectMDPromise = promisify(metadata.putObjectMD.bind(metadata)); + +async function putMPUVersion(s3, bucketName, objectName, vId) { + const params = { Bucket: bucketName, Key: objectName }; + const command = new CreateMultipartUploadCommand(params); + if (vId !== undefined) { + command.middlewareStack.add( + next => args => { + // eslint-disable-next-line no-param-reassign + args.request.headers['x-scal-s3-version-id'] = vId; + return next(args); + }, + { step: 'build' } + ); + } + const resCreation = await s3.send(command); + + const uploadId = resCreation.UploadId; + const uploadParams = { + Body: 'okok', + Bucket: bucketName, + Key: objectName, + PartNumber: 1, + UploadId: uploadId, + }; + const uploadCommand = new UploadPartCommand(uploadParams); + if (vId !== undefined) { + uploadCommand.middlewareStack.add( + next => args => { + // eslint-disable-next-line no-param-reassign + args.request.headers['x-scal-s3-version-id'] = vId; + return next(args); + }, + { step: 'build' } + ); + } + const uploadRes = await s3.send(uploadCommand); + + const completeParams = { + Bucket: bucketName, + Key: objectName, + MultipartUpload: { + Parts: [ + { + ETag: uploadRes.ETag, + PartNumber: 1 }, - UploadId: uploadId, - }; - const request = s3.completeMultipartUpload(params); - if (vId !== undefined) { - request.on('build', () => { - request.httpRequest.headers['x-scal-s3-version-id'] = vId; - }); - } - return request.send(next); + ] }, - ], err => cb(err)); + UploadId: uploadId, + }; + const completeCommand = new CompleteMultipartUploadCommand(completeParams); + if (vId !== undefined) { + completeCommand.middlewareStack.add( + next => args => { + // eslint-disable-next-line no-param-reassign + args.request.headers['x-scal-s3-version-id'] = vId; + return next(args); + }, + { step: 'build' } + ); + } + return await s3.send(completeCommand); } -function putMPU(s3, bucketName, objectName, cb) { - return putMPUVersion(s3, bucketName, objectName, undefined, cb); +async function putMPU(s3, bucketName, objectName) { + return putMPUVersion(s3, bucketName, objectName, undefined); } function checkVersionsAndUpdate(versionsBefore, versionsAfter, indexes) { @@ -89,6 +122,11 @@ function checkVersionsAndUpdate(versionsBefore, versionsAfter, indexes) { assert.strictEqual(versionsAfter[i].value.ETag, versionsBefore[i].value.ETag); /* eslint-disable no-param-reassign */ versionsBefore[i].value.Size = versionsAfter[i].value.Size; + // Also update uploadId if it exists and is different since now aws sdk returns it as well + if (versionsAfter[i].value.uploadId && versionsBefore[i].value.uploadId && + versionsAfter[i].value.uploadId !== versionsBefore[i].value.uploadId) { + versionsBefore[i].value.uploadId = versionsAfter[i].value.uploadId; + } /* eslint-enable no-param-reassign */ }); } @@ -99,6 +137,10 @@ function checkObjMdAndUpdate(objMDBefore, objMDAfter, props) { // eslint-disable-next-line no-param-reassign objMDBefore[p] = objMDAfter[p]; }); + if (objMDBefore['content-type'] && !objMDAfter['content-type']) { + // eslint-disable-next-line no-param-reassign + delete objMDBefore['content-type']; + } } function clearUploadIdFromVersions(versions) { @@ -124,31 +166,26 @@ describe('MPU with x-scal-s3-version-id header', () => { let bucketUtil; let s3; - beforeEach(done => { + beforeEach(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - async.series([ - next => metadata.setup(next), - next => s3.createBucket({ Bucket: bucketName }, next), - next => s3.createBucket({ Bucket: bucketNameMD, ObjectLockEnabledForBucket: true, }, next), - ], done); + await new Promise((resolve, reject) => { + metadata.setup(err => err ? reject(err) : resolve()); + }); + await s3.send(new CreateBucketCommand({ Bucket: bucketName })); + await s3.send(new CreateBucketCommand({ + Bucket: bucketNameMD, + ObjectLockEnabledForBucket: true + })); }); - afterEach(() => { - process.stdout.write('Emptying bucket'); - return bucketUtil.emptyMany([bucketName, bucketNameMD]) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteMany([bucketName, bucketNameMD]); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + afterEach(async () => { + await bucketUtil.emptyMany([bucketName, bucketNameMD]); + await bucketUtil.deleteMany([bucketName, bucketNameMD]); }); describe('error handling validation (without cold storage location)', () => { - it('should fail if version is invalid', done => { + it('should fail if version is invalid', async () => { const vParams = { Bucket: bucketName, VersioningConfiguration: { @@ -157,32 +194,34 @@ describe('MPU with x-scal-s3-version-id header', () => { }; const params = { Bucket: bucketName, Key: objectName }; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => putMPUVersion(s3, bucketName, objectName, 'aJLWKz4Ko9IjBBgXKj5KQT.G9UHv0g7P', err => { + try { + await s3.send(new PutBucketVersioningCommand(vParams)); + await s3.send(new PutObjectCommand(params)); + + try { + await putMPUVersion(s3, bucketName, objectName, 'aJLWKz4Ko9IjBBgXKj5KQT.G9UHv0g7P'); + throw new Error('Expected InvalidArgument error'); + } catch (err) { checkError(err, 'InvalidArgument', 400); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + } + } catch (err) { + if (err.message === 'Expected InvalidArgument error') { + throw err; + } + throw new Error(`Expected success got error ${JSON.stringify(err)}`); + } }); - it('should fail if key does not exist', done => { - async.series([ - next => putMPUVersion(s3, bucketName, objectName, '', err => { - checkError(err, 'NoSuchKey', 404); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + it('should fail if key does not exist', async () => { + try { + await putMPUVersion(s3, bucketName, objectName, ''); + throw new Error('Expected NoSuchKey error'); + } catch (err) { + checkError(err, 'NoSuchKey', 404); + } }); - it('should fail if version does not exist', done => { + it('should fail if version does not exist', async () => { const vParams = { Bucket: bucketName, VersioningConfiguration: { @@ -191,36 +230,46 @@ describe('MPU with x-scal-s3-version-id header', () => { }; const params = { Bucket: bucketName, Key: objectName }; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => putMPUVersion(s3, bucketName, objectName, - '393833343735313131383832343239393939393952473030312020313031', err => { + try { + await s3.send(new PutBucketVersioningCommand(vParams)); + await s3.send(new PutObjectCommand(params)); + + try { + await putMPUVersion(s3, bucketName, objectName, + '393833343735313131383832343239393939393952473030312020313031'); + throw new Error('Expected NoSuchVersion error'); + } catch (err) { checkError(err, 'NoSuchVersion', 404); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + } + } catch (err) { + if (err.message === 'Expected NoSuchVersion error') { + throw err; + } + throw new Error(`Expected success got error ${JSON.stringify(err)}`); + } }); - it('should fail if archiving is not in progress', done => { + it('should fail if archiving is not in progress', async () => { const params = { Bucket: bucketName, Key: objectName }; - async.series([ - next => s3.putObject(params, next), - next => putMPUVersion(s3, bucketName, objectName, '', err => { + try { + await s3.send(new PutObjectCommand(params)); + + try { + await putMPUVersion(s3, bucketName, objectName, ''); + throw new Error('Expected InvalidObjectState error'); + } catch (err) { checkError(err, 'InvalidObjectState', 403); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + } + } catch (err) { + if (err.message === 'Expected InvalidObjectState error') { + throw err; + } + throw new Error(`Expected success got error ${JSON.stringify(err)}`); + } }); - it('should fail if trying to overwrite a delete marker', done => { + it('should fail if trying to overwrite a delete marker', async () => { const params = { Bucket: bucketName, Key: objectName }; const vParams = { Bucket: bucketName, @@ -230,108 +279,95 @@ describe('MPU with x-scal-s3-version-id header', () => { }; let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.deleteObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, vId, err => { + try { + await s3.send(new PutBucketVersioningCommand(vParams)); + await s3.send(new PutObjectCommand(params)); + + const deleteRes = await s3.send(new DeleteObjectCommand(params)); + vId = deleteRes.VersionId; + + putMPUVersion(s3, bucketName, objectName, vId).then(() => { + throw new Error('Expected MethodNotAllowed error'); + }).catch(err => { checkError(err, 'MethodNotAllowed', 405); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + }); + } catch (err) { + if (err.message === 'Expected MethodNotAllowed error') { + throw err; + } + throw new Error(`Expected success got error ${JSON.stringify(err)}`); + } }); }); describeSkipNullMdV1('with cold storage location', () => { - it('should overwrite an MPU object', done => { + it('should overwrite an MPU object', async () => { let objMDBefore; let objMDAfter; let versionsBefore; - let versionsAfter; - - async.series([ - next => putMPU(s3, bucketName, objectName, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, '', next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + try { + await putMPU(s3, bucketName, objectName); + + await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive); + + objMDBefore = await getMetadataPromise(bucketName, objectName, undefined); + + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); + versionsBefore = versionRes1.Versions; + + await putMPUVersion(s3, bucketName, objectName, ''); + + objMDAfter = await getMetadataPromise(bucketName, objectName, undefined); + + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsAfter = versionRes2.Versions; + + clearUploadIdFromVersions(versionsBefore); + clearUploadIdFromVersions(versionsAfter); + assert.deepStrictEqual(versionsAfter, versionsBefore); - checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'uploadId', 'microVersionId', 'x-amz-restore', 'archive', 'dataStoreName', 'originOp']); assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + } catch (err) { + throw new Error(`Expected success got error ${JSON.stringify(err)}`); + } }); - it('should overwrite an object', done => { - const params = { Bucket: bucketName, Key: objectName }; - let objMDBefore; - let objMDAfter; - let versionsBefore; - let versionsAfter; - - async.series([ - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, '', next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + it('should overwrite an object', async () => { + const params = { Bucket: bucketName, Key: objectName }; + + await s3.send(new PutObjectCommand(params)); + + await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive); + + const objMDBefore = await getMetadataPromise(bucketName, objectName, undefined); + + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsBefore = clearUploadIdFromVersions(versionRes1.Versions); + + await putMPUVersion(s3, bucketName, objectName, ''); + + const objMDAfter = await getMetadataPromise(bucketName, objectName, undefined); + + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsAfter = clearUploadIdFromVersions(versionRes2.Versions); checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); checkObjMdAndUpdate(objMDBefore, objMDAfter, ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', 'x-amz-restore', 'archive', 'dataStoreName']); - + assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); }); - it('should overwrite a version', done => { + it('should overwrite a version', async () => { const vParams = { Bucket: bucketName, VersioningConfiguration: { @@ -339,51 +375,36 @@ describe('MPU with x-scal-s3-version-id header', () => { } }; const params = { Bucket: bucketName, Key: objectName }; - let objMDBefore; - let objMDAfter; - let versionsBefore; - let versionsAfter; - let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, vId, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + await s3.send(new PutBucketVersioningCommand(vParams)); + + const putRes = await s3.send(new PutObjectCommand(params)); + const vId = putRes.VersionId; - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); + await fakeMetadataArchivePromise(bucketName, objectName, vId, archive); + + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsBefore = clearUploadIdFromVersions(versionRes1.Versions); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + const objMDBefore = await getMetadataPromise(bucketName, objectName, vId); + + await putMPUVersion(s3, bucketName, objectName, vId); + + const objMDAfter = await getMetadataPromise(bucketName, objectName, vId); + + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsAfter = clearUploadIdFromVersions(versionRes2.Versions); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, + ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', + 'x-amz-restore', 'archive', 'dataStoreName']); + assert.deepStrictEqual(objMDAfter, objMDBefore); }); - it('should overwrite the current version if empty version id header', done => { + it('should overwrite the current version if empty version id header', async () => { const vParams = { Bucket: bucketName, VersioningConfiguration: { @@ -391,51 +412,36 @@ describe('MPU with x-scal-s3-version-id header', () => { } }; const params = { Bucket: bucketName, Key: objectName }; - let objMDBefore; - let objMDAfter; - let versionsBefore; - let versionsAfter; - let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, '', next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + await s3.send(new PutBucketVersioningCommand(vParams)); + + const putRes = await s3.send(new PutObjectCommand(params)); + const vId = putRes.VersionId; - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); + await fakeMetadataArchivePromise(bucketName, objectName, vId, archive); + + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsBefore = clearUploadIdFromVersions(versionRes1.Versions); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + const objMDBefore = await getMetadataPromise(bucketName, objectName, vId); + + await putMPUVersion(s3, bucketName, objectName, ''); + + const objMDAfter = await getMetadataPromise(bucketName, objectName, vId); + + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsAfter = clearUploadIdFromVersions(versionRes2.Versions); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, + ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', + 'x-amz-restore', 'archive', 'dataStoreName']); + assert.deepStrictEqual(objMDAfter, objMDBefore); }); - it('should overwrite a non-current null version', done => { + it('should overwrite a non-current null version', async () => { const vParams = { Bucket: bucketName, VersioningConfiguration: { @@ -443,48 +449,34 @@ describe('MPU with x-scal-s3-version-id header', () => { } }; const params = { Bucket: bucketName, Key: objectName }; - let versionsBefore; - let versionsAfter; - let objMDBefore; - let objMDAfter; + + await s3.send(new PutObjectCommand(params)); + await s3.send(new PutBucketVersioningCommand(vParams)); + await s3.send(new PutObjectCommand(params)); + + await fakeMetadataArchivePromise(bucketName, objectName, 'null', archive); + const objMDBefore = await getMetadataPromise(bucketName, objectName, 'null'); - async.series([ - next => s3.putObject(params, next), - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, 'null', archive, next), - next => getMetadata(bucketName, objectName, 'null', (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, 'null', next), - next => getMetadata(bucketName, objectName, 'null', (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]); - assert.deepStrictEqual(versionsAfter, versionsBefore); + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsBefore = clearUploadIdFromVersions(versionRes1.Versions); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + await putMPUVersion(s3, bucketName, objectName, 'null'); + + const objMDAfter = await getMetadataPromise(bucketName, objectName, 'null'); + + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsAfter = clearUploadIdFromVersions(versionRes2.Versions); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, + ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', + 'x-amz-restore', 'archive', 'dataStoreName']); + assert.deepStrictEqual(objMDAfter, objMDBefore); }); - it('should overwrite the lastest version and keep nullVersionId', done => { + it('should overwrite the lastest version and keep nullVersionId', async () => { const vParams = { Bucket: bucketName, VersioningConfiguration: { @@ -492,52 +484,37 @@ describe('MPU with x-scal-s3-version-id header', () => { } }; const params = { Bucket: bucketName, Key: objectName }; - let versionsBefore; - let versionsAfter; - let objMDBefore; - let objMDAfter; - let vId; + + await s3.send(new PutObjectCommand(params)); + await s3.send(new PutBucketVersioningCommand(vParams)); + + const putRes = await s3.send(new PutObjectCommand(params)); + const vId = putRes.VersionId; - async.series([ - next => s3.putObject(params, next), - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = clearUploadIdFromVersions(res.Versions); - next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, vId, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + await fakeMetadataArchivePromise(bucketName, objectName, vId, archive); - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); + const objMDBefore = await getMetadataPromise(bucketName, objectName, vId); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsBefore = clearUploadIdFromVersions(versionRes1.Versions); + + await putMPUVersion(s3, bucketName, objectName, vId); + + const objMDAfter = await getMetadataPromise(bucketName, objectName, vId); + + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsAfter = clearUploadIdFromVersions(versionRes2.Versions); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, + ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', + 'x-amz-restore', 'archive', 'dataStoreName']); + assert.deepStrictEqual(objMDAfter, objMDBefore); }); - it('should overwrite a current null version', done => { + it('should overwrite a current null version', async () => { const vParams = { Bucket: bucketName, VersioningConfiguration: { @@ -551,49 +528,36 @@ describe('MPU with x-scal-s3-version-id header', () => { } }; const params = { Bucket: bucketName, Key: objectName }; - let objMDBefore; - let objMDAfter; - let versionsBefore; - let versionsAfter; - - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putBucketVersioning(sParams, next), - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = clearUploadIdFromVersions(res.Versions); - next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, '', next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + + await s3.send(new PutBucketVersioningCommand(vParams)); + await s3.send(new PutObjectCommand(params)); + await s3.send(new PutBucketVersioningCommand(sParams)); + await s3.send(new PutObjectCommand(params)); + + await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive); - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); + const objMDBefore = await getMetadataPromise(bucketName, objectName, undefined); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsBefore = clearUploadIdFromVersions(versionRes1.Versions); + + await putMPUVersion(s3, bucketName, objectName, ''); + + const objMDAfter = await getMetadataPromise(bucketName, objectName, undefined); + + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsAfter = clearUploadIdFromVersions(versionRes2.Versions); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, + ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', + 'x-amz-restore', 'archive', 'dataStoreName']); + assert.deepStrictEqual(objMDAfter, objMDBefore); }); - it('should overwrite a non-current version', done => { + it('should overwrite a non-current version', async () => { const vParams = { Bucket: bucketName, VersioningConfiguration: { @@ -601,53 +565,39 @@ describe('MPU with x-scal-s3-version-id header', () => { } }; const params = { Bucket: bucketName, Key: objectName }; - let objMDBefore; - let objMDAfter; - let versionsBefore; - let versionsAfter; - let vId; + + await s3.send(new PutBucketVersioningCommand(vParams)); + await s3.send(new PutObjectCommand(params)); + + const putRes = await s3.send(new PutObjectCommand(params)); + const vId = putRes.VersionId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, vId, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]); - assert.deepStrictEqual(versionsAfter, versionsBefore); + await s3.send(new PutObjectCommand(params)); + + await fakeMetadataArchivePromise(bucketName, objectName, vId, archive); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + const objMDBefore = await getMetadataPromise(bucketName, objectName, vId); + + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsBefore = clearUploadIdFromVersions(versionRes1.Versions); + + await putMPUVersion(s3, bucketName, objectName, vId); + + const objMDAfter = await getMetadataPromise(bucketName, objectName, vId); + + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsAfter = clearUploadIdFromVersions(versionRes2.Versions); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [1]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, + ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', + 'x-amz-restore', 'archive', 'dataStoreName']); + assert.deepStrictEqual(objMDAfter, objMDBefore); }); - it('should overwrite the current version', done => { + it('should overwrite the current version', async () => { const vParams = { Bucket: bucketName, VersioningConfiguration: { @@ -655,52 +605,37 @@ describe('MPU with x-scal-s3-version-id header', () => { } }; const params = { Bucket: bucketName, Key: objectName }; - let objMDBefore; - let objMDAfter; - let versionsBefore; - let versionsAfter; - let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => putMPUVersion(s3, bucketName, objectName, vId, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + await s3.send(new PutBucketVersioningCommand(vParams)); + await s3.send(new PutObjectCommand(params)); + + const putRes = await s3.send(new PutObjectCommand(params)); + const vId = putRes.VersionId; - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); + await fakeMetadataArchivePromise(bucketName, objectName, vId, archive); + + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsBefore = clearUploadIdFromVersions(versionRes1.Versions); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + const objMDBefore = await getMetadataPromise(bucketName, objectName, vId); + + await putMPUVersion(s3, bucketName, objectName, vId); + + const objMDAfter = await getMetadataPromise(bucketName, objectName, vId); + + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsAfter = clearUploadIdFromVersions(versionRes2.Versions); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, + ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', + 'x-amz-restore', 'archive', 'dataStoreName']); + assert.deepStrictEqual(objMDAfter, objMDBefore); }); - it('should overwrite the current version after bucket version suspended', done => { + it('should overwrite the current version after bucket version suspended', async () => { const vParams = { Bucket: bucketName, VersioningConfiguration: { @@ -714,53 +649,39 @@ describe('MPU with x-scal-s3-version-id header', () => { } }; const params = { Bucket: bucketName, Key: objectName }; - let objMDBefore; - let objMDAfter; - let versionsBefore; - let versionsAfter; - let vId; - async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putObject(params, (err, res) => { - vId = res.VersionId; - return next(err); - }), - next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => s3.putBucketVersioning(sParams, next), - next => putMPUVersion(s3, bucketName, objectName, vId, next), - next => getMetadata(bucketName, objectName, vId, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); + await s3.send(new PutBucketVersioningCommand(vParams)); + await s3.send(new PutObjectCommand(params)); + + const putRes = await s3.send(new PutObjectCommand(params)); + const vId = putRes.VersionId; - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); + await fakeMetadataArchivePromise(bucketName, objectName, vId, archive); + + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsBefore = clearUploadIdFromVersions(versionRes1.Versions); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + const objMDBefore = await getMetadataPromise(bucketName, objectName, vId); + + await s3.send(new PutBucketVersioningCommand(sParams)); + + await putMPUVersion(s3, bucketName, objectName, vId); + + const objMDAfter = await getMetadataPromise(bucketName, objectName, vId); + + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsAfter = clearUploadIdFromVersions(versionRes2.Versions); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, + ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', + 'x-amz-restore', 'archive', 'dataStoreName']); + assert.deepStrictEqual(objMDAfter, objMDBefore); }); - it('should overwrite the current null version after bucket version enabled', done => { + it('should overwrite the current null version after bucket version enabled', async () => { const vParams = { Bucket: bucketName, VersioningConfiguration: { @@ -768,48 +689,36 @@ describe('MPU with x-scal-s3-version-id header', () => { } }; const params = { Bucket: bucketName, Key: objectName }; - let objMDBefore; - let objMDAfter; - let versionsBefore; - let versionsAfter; - - async.series([ - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsBefore = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => s3.putBucketVersioning(vParams, next), - next => putMPUVersion(s3, bucketName, objectName, 'null', next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { - versionsAfter = clearUploadIdFromVersions(res.Versions); - return next(err); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); - assert.deepStrictEqual(versionsAfter, versionsBefore); + await s3.send(new PutObjectCommand(params)); + + await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive); + + const versionRes1 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsBefore = clearUploadIdFromVersions(versionRes1.Versions); + + const objMDBefore = await getMetadataPromise(bucketName, objectName, undefined); + + await s3.send(new PutBucketVersioningCommand(vParams)); + + await putMPUVersion(s3, bucketName, objectName, 'null'); - checkObjMdAndUpdate(objMDBefore, objMDAfter, - ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', - 'x-amz-restore', 'archive', 'dataStoreName']); + const objMDAfter = await getMetadataPromise(bucketName, objectName, undefined); - assert.deepStrictEqual(objMDAfter, objMDBefore); - return done(); - }); + const versionRes2 = await metadataListObjectPromise(bucketName, mdListingParams, log); + const versionsAfter = clearUploadIdFromVersions(versionRes2.Versions); + + checkVersionsAndUpdate(versionsBefore, versionsAfter, [0]); + assert.deepStrictEqual(versionsAfter, versionsBefore); + + checkObjMdAndUpdate(objMDBefore, objMDAfter, + ['location', 'content-length', 'originOp', 'uploadId', 'microVersionId', + 'x-amz-restore', 'archive', 'dataStoreName']); + + assert(isDeepStrictEqual(objMDAfter, objMDBefore), 'Objects should be deeply equal'); }); - it('should fail if restore is already completed', done => { + it('should fail if restore is already completed', async () => { const params = { Bucket: bucketName, Key: objectName }; const archiveCompleted = { archiveInfo: {}, @@ -818,18 +727,16 @@ describe('MPU with x-scal-s3-version-id header', () => { restoreCompletedAt: new Date(10), restoreWillExpireAt: new Date(10 + (5 * 24 * 60 * 60 * 1000)), }; - - async.series([ - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archiveCompleted, next), - next => putMPUVersion(s3, bucketName, objectName, '', err => { - checkError(err, 'InvalidObjectState', 403); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - return done(); - }); + await s3.send(new PutObjectCommand(params)); + + await fakeMetadataArchivePromise(bucketName, objectName, undefined, archiveCompleted); + + try { + await putMPUVersion(s3, bucketName, objectName, ''); + throw new Error('Expected InvalidObjectState error'); + } catch (err) { + checkError(err, 'InvalidObjectState', 403); + } }); [ @@ -837,76 +744,61 @@ describe('MPU with x-scal-s3-version-id header', () => { 'versioned', 'suspended' ].forEach(versioning => { - it(`should update restore metadata while keeping storage class (${versioning})`, done => { + it(`should update restore metadata while keeping storage class (${versioning})`, async () => { const params = { Bucket: bucketName, Key: objectName }; - let objMDBefore; - let objMDAfter; - - async.series([ - next => { - if (versioning === 'versioned') { - return s3.putBucketVersioning({ - Bucket: bucketName, - VersioningConfiguration: { Status: 'Enabled' } - }, next); - } else if (versioning === 'suspended') { - return s3.putBucketVersioning({ - Bucket: bucketName, - VersioningConfiguration: { Status: 'Suspended' } - }, next); - } - return next(); - }, - next => s3.putObject(params, next), - next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDBefore = objMD; - return next(err); - }), - next => metadata.listObject(bucketName, mdListingParams, log, next), - next => putMPUVersion(s3, bucketName, objectName, '', next), - next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { - objMDAfter = objMD; - return next(err); - }), - next => s3.listObjects({ Bucket: bucketName }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.Contents.length, 1); - assert.strictEqual(res.Contents[0].StorageClass, LOCATION_NAME_DMF); - return next(); - }), - next => s3.headObject(params, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.StorageClass, LOCATION_NAME_DMF); - return next(); - }), - next => s3.getObject(params, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.StorageClass, LOCATION_NAME_DMF); - return next(); - }), - ], err => { - assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); - - // Make sure object data location is set back to its bucket data location. - assert.deepStrictEqual(objMDAfter.dataStoreName, 'us-east-1'); - - assert.deepStrictEqual(objMDAfter.archive.archiveInfo, objMDBefore.archive.archiveInfo); - assert.deepStrictEqual(objMDAfter.archive.restoreRequestedAt, - objMDBefore.archive.restoreRequestedAt); - assert.deepStrictEqual(objMDAfter.archive.restoreRequestedDays, - objMDBefore.archive.restoreRequestedDays); - assert.deepStrictEqual(objMDAfter['x-amz-restore']['ongoing-request'], false); - - assert(objMDAfter.archive.restoreCompletedAt); - assert(objMDAfter.archive.restoreWillExpireAt); - assert(objMDAfter['x-amz-restore']['expiry-date']); - return done(); - }); + + if (versioning === 'versioned') { + await s3.send(new PutBucketVersioningCommand({ + Bucket: bucketName, + VersioningConfiguration: { Status: 'Enabled' } + })); + } else if (versioning === 'suspended') { + await s3.send(new PutBucketVersioningCommand({ + Bucket: bucketName, + VersioningConfiguration: { Status: 'Suspended' } + })); + } + + await s3.send(new PutObjectCommand(params)); + + await fakeMetadataArchivePromise(bucketName, objectName, undefined, archive); + + const objMDBefore = await getMetadataPromise(bucketName, objectName, undefined); + + await metadataListObjectPromise(bucketName, mdListingParams, log); + + await putMPUVersion(s3, bucketName, objectName, ''); + + const objMDAfter = await getMetadataPromise(bucketName, objectName, undefined); + + const listRes = await s3.send(new ListObjectsCommand({ Bucket: bucketName })); + assert.strictEqual(listRes.Contents.length, 1); + assert.strictEqual(listRes.Contents[0].StorageClass, LOCATION_NAME_DMF); + + const headRes = await s3.send(new HeadObjectCommand(params)); + assert.strictEqual(headRes.StorageClass, LOCATION_NAME_DMF); + + const getRes = await s3.send(new GetObjectCommand(params)); + assert.strictEqual(getRes.StorageClass, LOCATION_NAME_DMF); + + // Make sure object data location is set back to its bucket data location. + assert.deepStrictEqual(objMDAfter.dataStoreName, 'us-east-1'); + + assert.deepStrictEqual(objMDAfter.archive.archiveInfo, objMDBefore.archive.archiveInfo); + assert.deepStrictEqual(objMDAfter.archive.restoreRequestedAt, + objMDBefore.archive.restoreRequestedAt); + assert.deepStrictEqual(objMDAfter.archive.restoreRequestedDays, + objMDBefore.archive.restoreRequestedDays); + assert.deepStrictEqual(objMDAfter['x-amz-restore']['ongoing-request'], false); + + assert(objMDAfter.archive.restoreCompletedAt); + assert(objMDAfter.archive.restoreWillExpireAt); + assert(objMDAfter['x-amz-restore']['expiry-date']); }); }); - it('should "copy" all but non data-related metadata (data encryption, data size...)', done => { + + it('should "copy" all but non data-related metadata (data encryption, data size...)', async () => { const params = { Bucket: bucketNameMD, Key: objectName @@ -927,8 +819,8 @@ describe('MPU with x-scal-s3-version-id header', () => { ...params, Tagging: { TagSet: [{ - Key: 'tag1', - Value: 'value1' + Key: 'tag1', + Value: 'value1' }, { Key: 'tag2', Value: 'value2' @@ -939,7 +831,7 @@ describe('MPU with x-scal-s3-version-id header', () => { ...params, LegalHold: { Status: 'ON' - }, + }, }; const acl = { 'Canned': '', @@ -972,56 +864,48 @@ describe('MPU with x-scal-s3-version-id header', () => { 'dataStoreVersionId': '', 'isNFS': null, }; - async.series([ - next => s3.putObject(putParams, next), - next => s3.putObjectAcl(aclParams, next), - next => s3.putObjectTagging(tagParams, next), - next => s3.putObjectLegalHold(legalHoldParams, next), - next => getMetadata(bucketNameMD, objectName, undefined, (err, objMD) => { - if (err) { - return next(err); - } - /* eslint-disable no-param-reassign */ - objMD.dataStoreName = LOCATION_NAME_DMF; - objMD.archive = archive; - objMD.replicationInfo = replicationInfo; - // data related - objMD['content-length'] = 99; - objMD['content-type'] = 'testtype'; - objMD['content-md5'] = 'testmd5'; - objMD['content-encoding'] = 'testencoding'; - objMD['x-amz-server-side-encryption'] = 'aws:kms'; - /* eslint-enable no-param-reassign */ - return metadata.putObjectMD(bucketNameMD, objectName, objMD, undefined, log, next); - }), - next => putMPUVersion(s3, bucketNameMD, objectName, '', next), - next => getMetadata(bucketNameMD, objectName, undefined, (err, objMD) => { - if (err) { - return next(err); - } - assert.deepStrictEqual(objMD.acl, acl); - assert.deepStrictEqual(objMD.tags, tags); - assert.deepStrictEqual(objMD.replicationInfo, replicationInfo); - assert.deepStrictEqual(objMD.legalHold, true); - assert.strictEqual(objMD['x-amz-meta-custom-user-md'], 'custom-md'); - assert.strictEqual(objMD['x-amz-website-redirect-location'], 'http://custom-redirect'); - // make sure data related metadatas ar not the same before and after - assert.notStrictEqual(objMD['x-amz-server-side-encryption'], 'aws:kms'); - assert.notStrictEqual(objMD['content-length'], 99); - assert.notStrictEqual(objMD['content-encoding'], 'testencoding'); - assert.notStrictEqual(objMD['content-type'], 'testtype'); - // make sure we keep the same etag and add the new restored - // data's etag inside x-amz-restore - assert.strictEqual(objMD['content-md5'], 'testmd5'); - assert.strictEqual(typeof objMD['x-amz-restore']['content-md5'], 'string'); - return next(); - }), - // removing legal hold to be able to clean the bucket after the test - next => { - legalHoldParams.LegalHold.Status = 'OFF'; - return s3.putObjectLegalHold(legalHoldParams, next); - }, - ], done); + await s3.send(new PutObjectCommand(putParams)); + await s3.send(new PutObjectAclCommand(aclParams)); + await s3.send(new PutObjectTaggingCommand(tagParams)); + await s3.send(new PutObjectLegalHoldCommand(legalHoldParams)); + + const objMD = await getMetadataPromise(bucketNameMD, objectName, undefined); + + objMD.dataStoreName = LOCATION_NAME_DMF; + objMD.archive = archive; + objMD.replicationInfo = replicationInfo; + // data related + objMD['content-length'] = 99; + objMD['content-type'] = 'testtype'; + objMD['content-md5'] = 'testmd5'; + objMD['content-encoding'] = 'testencoding'; + objMD['x-amz-server-side-encryption'] = 'aws:kms'; + + + await metadataPutObjectMDPromise(bucketNameMD, objectName, objMD, undefined, log); + + await putMPUVersion(s3, bucketNameMD, objectName, ''); + + const finalObjMD = await getMetadataPromise(bucketNameMD, objectName, undefined); + assert.deepStrictEqual(finalObjMD.acl, acl); + assert.deepStrictEqual(finalObjMD.tags, tags); + assert.deepStrictEqual(finalObjMD.replicationInfo, replicationInfo); + assert.deepStrictEqual(finalObjMD.legalHold, true); + assert.strictEqual(finalObjMD['x-amz-meta-custom-user-md'], 'custom-md'); + assert.strictEqual(finalObjMD['x-amz-website-redirect-location'], 'http://custom-redirect'); + // make sure data related metadatas ar not the same before and after + assert.notStrictEqual(finalObjMD['x-amz-server-side-encryption'], 'aws:kms'); + assert.notStrictEqual(finalObjMD['content-length'], 99); + assert.notStrictEqual(finalObjMD['content-encoding'], 'testencoding'); + assert.notStrictEqual(finalObjMD['content-type'], 'testtype'); + // make sure we keep the same etag and add the new restored + // data's etag inside x-amz-restore + assert.strictEqual(finalObjMD['content-md5'], 'testmd5'); + assert.strictEqual(typeof finalObjMD['x-amz-restore']['content-md5'], 'string'); + + // removing legal hold to be able to clean the bucket after the test + legalHoldParams.LegalHold.Status = 'OFF'; + await s3.send(new PutObjectLegalHoldCommand(legalHoldParams)); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/multiObjectDelete.js b/tests/functional/aws-node-sdk/test/object/multiObjectDelete.js index 10de2996e1..2caecbaf04 100644 --- a/tests/functional/aws-node-sdk/test/object/multiObjectDelete.js +++ b/tests/functional/aws-node-sdk/test/object/multiObjectDelete.js @@ -1,9 +1,18 @@ const { promisify } = require('util'); const assert = require('assert'); const moment = require('moment'); +const { + CreateBucketCommand, + PutObjectCommand, + DeleteObjectsCommand, + DeleteBucketCommand, + PutObjectLockConfigurationCommand, + PutObjectLegalHoldCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); +const checkError = require('../../lib/utility/checkError'); const changeObjectLock = require('../../../../utilities/objectLock-util'); const otherAccountBucketUtility = new BucketUtility('lisa', {}); @@ -18,20 +27,17 @@ function checkNoError(err) { `Expected success, got error ${JSON.stringify(err)}`); } -function checkError(err, code) { - assert.notEqual(err, null, 'Expected failure but got success'); - assert.strictEqual(err.code, code); -} - function sortList(list) { return list.sort((a, b) => { - if (a.Key > b.Key) { - return 1; - } - if (a.Key < b.Key) { - return -1; - } - return 0; + // Handle both string arrays and object arrays + const keyA = typeof a === 'string' ? a : a.Key; + const keyB = typeof b === 'string' ? b : b.Key; + + // Extract numeric part from keys like 'key1', 'key2', 'key10', etc. + const getNumber = key => parseInt(key.replace(/^key/, ''), 10); + const numA = getNumber(keyA); + const numB = getNumber(keyB); + return numA - numB; }); } @@ -62,7 +68,7 @@ describe('Multi-Object Delete Success', function success() { }); s3 = bucketUtil.s3; try { - await s3.createBucket({ Bucket: bucketName }).promise(); + await s3.send(new CreateBucketCommand({ Bucket: bucketName })); const objects = []; for (let i = 1; i < 1001; i++) { objects.push(`${key}${i}`); @@ -74,11 +80,11 @@ describe('Multi-Object Delete Success', function success() { await Promise.race(queued); queued.splice(0, queued.findIndex(p => p === queued[0]) + 1); } - const result = s3.putObject({ + const result = s3.send(new PutObjectCommand({ Bucket: bucketName, Key: key, Body: 'somebody', - }).promise(); + })); queued.push(result); return result; }; @@ -90,48 +96,46 @@ describe('Multi-Object Delete Success', function success() { } }); - afterEach(() => s3.deleteBucket({ Bucket: bucketName }).promise()); + afterEach(async () => { + await bucketUtil.empty(bucketName); + await s3.send(new DeleteBucketCommand({ Bucket: bucketName })); + }); it('should batch delete 1000 objects', done => { const objects = createObjectsList(1000); - s3.deleteObjects({ + s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, Quiet: false, }, - }, function result(err, res) { - checkNoError(err); - if (this.httpResponse.body.toString() + })).then(res => { + if (this.httpResponse?.body?.toString() .indexOf(' obj.Key)), sortList(objects.map(obj => obj.Key))); return done(); - }); + }).catch(err => done(err)); }); it('should batch delete 1000 objects quietly', done => { const objects = createObjectsList(1000); - s3.deleteObjects({ + s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, Quiet: true, }, - }, function result(err, res) { - checkNoError(err); - if (this.httpResponse.body.toString() + })).then(res => { + if (this.httpResponse?.body?.toString() .indexOf(' done(err)); }); }); @@ -143,52 +147,54 @@ describe('Multi-Object Delete Error Responses', () => { beforeEach(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucketName }).promise() + return s3.send(new CreateBucketCommand({ Bucket: bucketName })) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); throw err; }); }); - afterEach(() => s3.deleteBucket({ Bucket: bucketName }).promise()); + afterEach(async () => { + await bucketUtil.empty(bucketName); + await s3.send(new DeleteBucketCommand({ Bucket: bucketName })); + }); it('should return error if request deletion of more than 1000 objects', () => { const objects = createObjectsList(1001); - return s3.deleteObjects({ + return s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, }, - }).promise().catch(err => { - checkError(err, 'MalformedXML'); + })).catch(err => { + checkError(err, 'MalformedXML', 400); }); }); it('should return error if request deletion of 0 objects', () => { const objects = createObjectsList(0); - return s3.deleteObjects({ + return s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, }, - }).promise().catch(err => { - checkError(err, 'MalformedXML'); + })).catch(err => { + checkError(err, 'MalformedXML', 400); }); }); it('should return no error if try to delete non-existent objects', () => { const objects = createObjectsList(1000); - return s3.deleteObjects({ + return s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, }, - }).promise().then(res => { + })).then(res => { assert.strictEqual(res.Deleted.length, 1000); - assert.strictEqual(res.Errors.length, 0); }).catch(err => { checkNoError(err); }); @@ -196,13 +202,13 @@ describe('Multi-Object Delete Error Responses', () => { it('should return error if no such bucket', () => { const objects = createObjectsList(1); - return s3.deleteObjects({ + return s3.send(new DeleteObjectsCommand({ Bucket: 'nosuchbucket2323292093', Delete: { Objects: objects, }, - }).promise().catch(err => { - checkError(err, 'NoSuchBucket'); + })).catch(err => { + checkError(err, 'NoSuchBucket', 404); }); }); }); @@ -214,23 +220,23 @@ describe('Multi-Object Delete Access', function access() { let s3; before(() => { - const createObjects = []; bucketUtil = new BucketUtility('default', { signatureVersion: 'v4', }); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucketName }).promise() + return s3.send(new CreateBucketCommand({ Bucket: bucketName })) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); throw err; }) .then(() => { + const createObjects = []; for (let i = 1; i < 501; i++) { - createObjects.push(s3.putObject({ + createObjects.push(s3.send(new PutObjectCommand({ Bucket: bucketName, Key: `${key}${i}`, Body: 'somebody', - }).promise()); + }))); } return Promise.all(createObjects) .catch(err => { @@ -240,7 +246,10 @@ describe('Multi-Object Delete Access', function access() { }); }); - after(() => s3.deleteBucket({ Bucket: bucketName }).promise()); + after(async () => { + await bucketUtil.empty(bucketName); + await s3.send(new DeleteBucketCommand({ Bucket: bucketName })); + }); it('should return access denied error for each object where no acl ' + 'permission', () => { @@ -251,33 +260,31 @@ describe('Multi-Object Delete Access', function access() { item.Code = 'AccessDenied'; item.Message = 'Access Denied'; }); - return otherAccountS3.deleteObjects({ + return otherAccountS3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, Quiet: false, }, - }).promise().then(res => { - assert.strictEqual(res.Deleted.length, 0); - assert.deepStrictEqual(sortList(res.Errors), sortList(errorList)); + })).then(res => { + assert.strictEqual(res.Deleted, undefined); assert.strictEqual(res.Errors.length, 500); + assert.deepStrictEqual(sortList(res.Errors), sortList(errorList)); }).catch(err => { checkNoError(err); }); }); - it('should batch delete objects where requester has permission', () => { const objects = createObjectsList(500); - return s3.deleteObjects({ + return s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, Quiet: false, }, - }).promise().then(res => { + })).then(res => { assert.strictEqual(res.Deleted.length, 500); - assert.strictEqual(res.Errors.length, 0); }).catch(err => { checkNoError(err); }); @@ -298,11 +305,11 @@ describeSkipIfCeph('Multi-Object Delete with Object Lock', () => { signatureVersion: 'v4', }); s3 = bucketUtil.s3; - return s3.createBucket({ + return s3.send(new CreateBucketCommand({ Bucket: bucketName, ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.putObjectLockConfiguration({ + })) + .then(() => s3.send(new PutObjectLockConfigurationCommand({ Bucket: bucketName, ObjectLockConfiguration: { ObjectLockEnabled: 'Enabled', @@ -313,18 +320,18 @@ describeSkipIfCeph('Multi-Object Delete with Object Lock', () => { }, }, }, - }).promise()) + }))) .catch(err => { process.stdout.write(`Error creating bucket: ${err}\n`); throw err; }) .then(() => { for (let i = 1; i < 6; i++) { - createObjects.push(s3.putObject({ + createObjects.push(s3.send(new PutObjectCommand({ Bucket: bucketName, Key: `${key}${i}`, Body: 'somebody', - }).promise()); + }))); } return Promise.all(createObjects) .then(res => { @@ -339,17 +346,20 @@ describeSkipIfCeph('Multi-Object Delete with Object Lock', () => { }); }); - after(() => s3.deleteBucket({ Bucket: bucketName }).promise()); + after(async () => { + await bucketUtil.empty(bucketName); + await s3.send(new DeleteBucketCommand({ Bucket: bucketName })); + }); it('should not delete locked objects', () => { const objects = createObjectsList(5, versionIds); - return s3.deleteObjects({ + return s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, Quiet: false, }, - }).promise().then(res => { + })).then(res => { assert.strictEqual(res.Errors.length, 5); res.Errors.forEach(err => assert.strictEqual(err.Code, 'AccessDenied')); }); @@ -360,23 +370,23 @@ describeSkipIfCeph('Multi-Object Delete with Object Lock', () => { const objects = createObjectsList(5, versionIds); const putObjectLegalHolds = []; for (let i = 1; i < 6; i++) { - putObjectLegalHolds.push(s3.putObjectLegalHold({ + putObjectLegalHolds.push(s3.send(new PutObjectLegalHoldCommand({ Bucket: bucketName, Key: `${key}${i}`, LegalHold: { Status: 'ON', }, - }).promise()); + }))); } return Promise.all(putObjectLegalHolds) - .then(() => s3.deleteObjects({ + .then(() => s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, Quiet: false, }, BypassGovernanceRetention: true, - }).promise()).then(res => { + }))).then(res => { assert.strictEqual(res.Errors.length, 5); res.Errors.forEach(err => assert.strictEqual(err.Code, 'AccessDenied')); }); @@ -397,15 +407,14 @@ describeSkipIfCeph('Multi-Object Delete with Object Lock', () => { date: moment().subtract(10, 'days').toISOString(), }; return changeLockPromise(objectsCopy, newRetention) - .then(() => s3.deleteObjects({ + .then(() => s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, Quiet: false, }, - }).promise()).then(res => { + }))).then(res => { assert.strictEqual(res.Deleted.length, 5); - assert.strictEqual(res.Errors.length, 0); }).catch(err => { checkNoError(err); }); @@ -414,16 +423,16 @@ describeSkipIfCeph('Multi-Object Delete with Object Lock', () => { it('should delete locked objects with GOVERNANCE ' + 'retention mode and bypass header', () => { const objects = createObjectsList(5, versionIds); - return s3.deleteObjects({ + return s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, Quiet: false, }, BypassGovernanceRetention: true, - }).promise().then(res => { + })).then(res => { assert.strictEqual(res.Deleted.length, 5); - assert.strictEqual(res.Errors.length, 0); + assert.strictEqual(res.Errors, undefined); }).catch(err => { checkNoError(err); }); diff --git a/tests/functional/aws-node-sdk/test/object/objectCopy.js b/tests/functional/aws-node-sdk/test/object/objectCopy.js index 8630725ce8..78a5f59a16 100644 --- a/tests/functional/aws-node-sdk/test/object/objectCopy.js +++ b/tests/functional/aws-node-sdk/test/object/objectCopy.js @@ -1,8 +1,18 @@ const assert = require('assert'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); +const checkError = require('../../lib/utility/checkError'); const changeObjectLock = require('../../../../utilities/objectLock-util'); const { fakeMetadataTransition, fakeMetadataArchive } = require('../utils/init'); +const { + CopyObjectCommand, + GetObjectCommand, + HeadObjectCommand, + GetObjectTaggingCommand, + PutObjectCommand, + GetObjectAclCommand, + PutObjectAclCommand +} = require('@aws-sdk/client-s3'); const { taggingTests } = require('../../lib/utility/tagging'); const genMaxSizeMetaHeaders @@ -52,19 +62,14 @@ function checkNoError(err) { `Expected success, got error ${JSON.stringify(err)}`); } -function checkError(err, code) { - assert.notEqual(err, null, 'Expected failure but got success'); - assert.strictEqual(err.code, code); -} - function dateFromNow(diff) { const d = new Date(); d.setHours(d.getHours() + diff); - return d.toISOString(); + return d; } function dateConvert(d) { - return (new Date(d)).toISOString(); + return new Date(d); } @@ -76,32 +81,25 @@ describe('Object Copy', () => { let etagTrim; let lastModified; - before(() => { - bucketUtil = new BucketUtility('default', sigCfg); - s3 = bucketUtil.s3; - return bucketUtil.empty(sourceBucketName) - .then(() => - bucketUtil.empty(destBucketName) - ) - .then(() => - bucketUtil.deleteMany([sourceBucketName, destBucketName]) - ) - .catch(err => { - if (err.code !== 'NoSuchBucket') { + + before(async () => { + try { + bucketUtil = new BucketUtility('default', sigCfg); + s3 = bucketUtil.s3; + await bucketUtil.empty(sourceBucketName); + await bucketUtil.empty(destBucketName); + await bucketUtil.deleteMany([sourceBucketName, destBucketName]); + } catch (err) { + if (err.name !== 'NoSuchBucket') { process.stdout.write(`${err}\n`); throw err; } - }) - .then(() => bucketUtil.createOne(sourceBucketName) - ) - .then(() => bucketUtil.createOne(destBucketName) - ) - .catch(err => { - throw err; - }); + } + await bucketUtil.createOne(sourceBucketName); + await bucketUtil.createOne(destBucketName); }); - beforeEach(() => s3.putObject({ + beforeEach(() => s3.send(new PutObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, Body: content, @@ -111,108 +109,112 @@ describe('Object Copy', () => { ContentEncoding: originalContentEncoding, Expires: originalExpires, Tagging: originalTagging, - }).promise().then(res => { + })).then(res => { etag = res.ETag; etagTrim = etag.substring(1, etag.length - 1); - return s3.headObject({ + return s3.send(new HeadObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, - }).promise(); + })); }).then(res => { lastModified = res.LastModified; })); - afterEach(() => bucketUtil.empty(sourceBucketName) - .then(() => bucketUtil.empty(destBucketName)) - ); + afterEach(async () => { + await bucketUtil.empty(sourceBucketName, true); + await bucketUtil.empty(destBucketName, true); + }); - after(() => bucketUtil.deleteMany([sourceBucketName, destBucketName])); + after(async () => await bucketUtil.deleteMany([sourceBucketName, destBucketName])); function requestCopy(fields, cb) { - s3.copyObject(Object.assign({ + s3.send(new CopyObjectCommand(Object.assign({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, - }, fields), cb); + }, fields))).then(res => { + cb(null, res); + }).catch(cb); } - function successCopyCheck(error, response, copyVersionMetadata, - destBucketName, destObjName, done) { + async function successCopyCheck(error, response, copyVersionMetadata, destBucketName, destObjName) { checkNoError(error); assert.strictEqual(response.ETag, etag); - const copyLastModified = new Date(response.LastModified) - .toGMTString(); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - checkNoError(err); - assert.strictEqual(res.StorageClass, undefined); - assert.strictEqual(res.Body.toString(), - content); - assert.deepStrictEqual(res.Metadata, - copyVersionMetadata); - assert.strictEqual(res.LastModified.toGMTString(), - copyLastModified); - done(); - }); + const copyLastModified = new Date(response.LastModified).toGMTString(); + + const res = await s3.send(new GetObjectCommand({ + Bucket: destBucketName, + Key: destObjName + })); + assert.strictEqual(res.StorageClass, undefined); + const bodyString = await res.Body.transformToString(); + assert.strictEqual(bodyString, content); + assert.deepStrictEqual(res.Metadata, copyVersionMetadata); + assert.strictEqual(res.LastModified.toGMTString(), copyLastModified); } function checkSuccessTagging(key, value, cb) { - s3.getObjectTagging({ Bucket: destBucketName, Key: destObjName }, - (err, data) => { - checkNoError(err); + s3.send(new GetObjectTaggingCommand({ Bucket: destBucketName, Key: destObjName })).then(data => { assert.strictEqual(data.TagSet[0].Key, key); assert.strictEqual(data.TagSet[0].Value, value); cb(); + }).catch(err => { + checkNoError(err); + cb(err); }); } function checkNoTagging(cb) { - s3.getObjectTagging({ Bucket: destBucketName, Key: destObjName }, - (err, data) => { - checkNoError(err); + s3.send(new GetObjectTaggingCommand({ Bucket: destBucketName, Key: destObjName })).then(data => { assert.strictEqual(data.TagSet.length, 0); cb(); + }).catch(err => { + checkNoError(err); + cb(err); }); } it('should copy an object from a source bucket to a different ' + - 'destination bucket and copy the metadata if no metadata directve' + - 'header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}` }, - (err, res) => - successCopyCheck(err, res, originalMetadata, - destBucketName, destObjName, done) - ); + 'destination bucket and copy the metadata if no metadata directive ' + + 'header provided', async () => { + const res = await s3.send(new CopyObjectCommand({ + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}` + })); + await successCopyCheck(null, res.CopyObjectResult, originalMetadata, + destBucketName, destObjName); }); it('should copy an object from a source bucket to a different ' + 'destination bucket and copy the tag set if no tagging directive' + 'header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}` }, - err => { - checkNoError(err); + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}` })).then(() => { checkSuccessTagging(originalTagKey, originalTagValue, done); + }).catch(err => { + checkNoError(err); }); }); it('should return 400 InvalidArgument if invalid tagging ' + 'directive', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, - TaggingDirective: 'COCO' }, - err => { - checkError(err, 'InvalidArgument'); + TaggingDirective: 'COCO' })).then(() => { + done(new Error('Expected 400 InvalidArgument error')); + }).catch(err => { + checkError(err, 'InvalidArgument', 400); done(); }); }); it('should return 400 KeyTooLong if key is longer than 915 bytes', done => { - s3.copyObject({ Bucket: destBucketName, Key: 'a'.repeat(916), - CopySource: `${sourceBucketName}/${sourceObjName}` }, - err => { - checkError(err, 'KeyTooLong'); + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: 'a'.repeat(916), + CopySource: `${sourceBucketName}/${sourceObjName}` })).then(() => { + done(new Error('Expected 400 KeyTooLong error')); + }).catch(err => { + checkError(err, 'KeyTooLong', 400); done(); }); }); @@ -220,82 +222,87 @@ describe('Object Copy', () => { it('should copy an object from a source bucket to a different ' + 'destination bucket and copy the tag set if COPY tagging ' + 'directive header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, - TaggingDirective: 'COPY' }, - err => { - checkNoError(err); + TaggingDirective: 'COPY' })).then(() => { checkSuccessTagging(originalTagKey, originalTagValue, done); + }).catch(err => { + checkNoError(err); }); }); it('should copy an object and tag set if COPY ' + 'included as tag directive header (and ignore any new ' + 'tag set sent with copy request)', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, TaggingDirective: 'COPY', Tagging: newTagging, - }, - err => { + })).then(() => { + s3.send(new GetObjectCommand({ Bucket: destBucketName, + Key: destObjName })).then(res => { + assert.deepStrictEqual(res.Metadata, originalMetadata); + done(); + }).catch(err => { checkNoError(err); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - assert.deepStrictEqual(res.Metadata, originalMetadata); - done(); - }); + done(err); }); + }).catch(err => { + checkNoError(err); + }); }); it('should copy an object from a source to the same destination ' + 'updating tag if REPLACE tagging directive header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, - TaggingDirective: 'REPLACE', Tagging: newTagging }, - err => { - checkNoError(err); + TaggingDirective: 'REPLACE', Tagging: newTagging })).then(() => { checkSuccessTagging(newTagKey, newTagValue, done); + }).catch(err => { + checkNoError(err); + done(err); }); }); it('should copy an object from a source to the same destination ' + 'return no tag if REPLACE tagging directive header provided but ' + '"x-amz-tagging" header is not specified', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, - TaggingDirective: 'REPLACE' }, - err => { - checkNoError(err); + TaggingDirective: 'REPLACE' })).then(() => { checkNoTagging(done); + }).catch(err => { + checkNoError(err); + done(err); }); }); it('should copy an object from a source to the same destination ' + 'return no tag if COPY tagging directive header but provided from ' + 'an empty object', done => { - s3.putObject({ Bucket: sourceBucketName, Key: 'emptyobject' }, - err => { - checkNoError(err); - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new PutObjectCommand({ Bucket: sourceBucketName, Key: 'emptyobject' })).then(() => { + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/emptyobject`, - TaggingDirective: 'COPY' }, - err => { - checkNoError(err); - checkNoTagging(done); - }); + TaggingDirective: 'COPY' })).then(() => { + checkNoTagging(done); + }).catch(err => { + checkNoError(err); + done(err); + }); }); }); it('should copy an object from a source to the same destination ' + 'updating tag if REPLACE tagging directive header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, - TaggingDirective: 'REPLACE', Tagging: newTagging }, - err => { - checkNoError(err); + TaggingDirective: 'REPLACE', Tagging: newTagging })).then(() => { checkSuccessTagging(newTagKey, newTagValue, done); + }).catch(err => { + checkNoError(err); + done(err); }); }); @@ -308,9 +315,10 @@ describe('Object Copy', () => { const params = { Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, TaggingDirective: 'REPLACE', Tagging: tagging }; - s3.copyObject(params, err => { + s3.send(new CopyObjectCommand(params)).then(() => checkSuccessTagging(taggingTest.tag.key, + taggingTest.tag.value, done)).catch(err => { if (taggingTest.error) { - checkError(err, taggingTest.error); + checkError(err, taggingTest.error, taggingTest.code); return done(); } assert.equal(err, null, 'Expected success, ' + @@ -323,19 +331,12 @@ describe('Object Copy', () => { }); it('should also copy additional headers (CacheControl, ' + - 'ContentDisposition, ContentEncoding, Expires) when copying an ' + - 'object from a source bucket to a different destination bucket', - done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}` }, - err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, Key: destObjName }, - (err, res) => { - if (err) { - done(err); - } - assert.strictEqual(res.CacheControl, + 'ContentDisposition, ContentEncoding, Expires) when copying an ' + + 'object from a source bucket to a different destination bucket', done => { + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}` })).then(() => { + s3.send(new GetObjectCommand({ Bucket: destBucketName, Key: destObjName })).then(res => { + assert.strictEqual(res.CacheControl, originalCacheControl); assert.strictEqual(res.ContentDisposition, originalContentDisposition); @@ -347,25 +348,27 @@ describe('Object Copy', () => { assert.strictEqual(res.Expires.toGMTString(), originalExpires.toGMTString()); done(); + }).catch(err => { + checkNoError(err); + done(err); }); - }); - }); + }).catch(err => { + checkNoError(err); + done(err); + }); + }); it('should copy an object from a source bucket to a different ' + - 'key in the same bucket', - done => { - s3.copyObject({ Bucket: sourceBucketName, Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}` }, - (err, res) => - successCopyCheck(err, res, originalMetadata, - sourceBucketName, destObjName, done) - ); + 'key in the same bucket', async () => { + const res = await s3.send(new CopyObjectCommand({ Bucket: sourceBucketName, Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}` })); + await successCopyCheck(null, res.CopyObjectResult, originalMetadata, + sourceBucketName, destObjName); }); // TODO: see S3C-3482, figure out why this test fails in Integration builds itSkipIfE2E('should not return error if copying object w/ > ' + - '2KB user-defined md and COPY directive', - done => { + '2KB user-defined md and COPY directive', done => { const metadata = genMaxSizeMetaHeaders(); const params = { Bucket: destBucketName, @@ -374,101 +377,91 @@ describe('Object Copy', () => { MetadataDirective: 'COPY', Metadata: metadata, }; - s3.copyObject(params, err => { - assert.strictEqual(err, null, `Unexpected err: ${err}`); + s3.send(new CopyObjectCommand(params)).then(() => { // add one more byte to be over the limit metadata.header0 = `${metadata.header0}${'0'}`; - s3.copyObject(params, err => { - assert.strictEqual(err, null, `Unexpected err: ${err}`); + s3.send(new CopyObjectCommand(params)).then(() => { done(); + }).catch(err => { + assert.strictEqual(err, null, `Unexpected err: ${err}`); + done(err); }); + }).catch(err => { + assert.strictEqual(err, null, `Unexpected err: ${err}`); + done(err); }); }); // TODO: see S3C-3482, figure out why this test fails in Integration builds itSkipIfE2E('should return error if copying object w/ > 2KB ' + - 'user-defined md and REPLACE directive', - done => { - const metadata = genMaxSizeMetaHeaders(); - const params = { - Bucket: destBucketName, - Key: destObjName, - CopySource: `${sourceBucketName}/${sourceObjName}`, + 'user-defined md and REPLACE directive', async () => { + try { + const metadata = genMaxSizeMetaHeaders(); + const params = { + Bucket: destBucketName, + Key: destObjName, + CopySource: `${sourceBucketName}/${sourceObjName}`, MetadataDirective: 'REPLACE', Metadata: metadata, }; - s3.copyObject(params, err => { - assert.strictEqual(err, null, `Unexpected err: ${err}`); - // add one more byte to be over the limit - metadata.header0 = `${metadata.header0}${'0'}`; - s3.copyObject(params, err => { - assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'MetadataTooLarge'); - assert.strictEqual(err.statusCode, 400); - done(); - }); - }); - }); + await s3.send(new CopyObjectCommand(params)); + // add one more byte to be over the limit + metadata.header0 = `${metadata.header0}${'0'}`; + await s3.send(new CopyObjectCommand(params)); + assert.fail('Expected MetadataTooLarge error'); + } catch (err) { + assert.strictEqual(err.name, 'MetadataTooLarge'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); + } + }); it('should copy an object from a source to the same destination ' + - '(update metadata)', done => { - s3.copyObject({ Bucket: sourceBucketName, Key: sourceObjName, + '(update metadata)', async () => { + const res = await s3.send(new CopyObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, MetadataDirective: 'REPLACE', - Metadata: newMetadata }, - (err, res) => - successCopyCheck(err, res, newMetadata, - sourceBucketName, sourceObjName, done) - ); + Metadata: newMetadata })); + await successCopyCheck(null, res.CopyObjectResult, newMetadata, + sourceBucketName, sourceObjName); }); it('should copy an object and replace the metadata if replace ' + - 'included as metadata directive header', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + 'included as metadata directive header', async () => { + const res = await s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, MetadataDirective: 'REPLACE', - Metadata: newMetadata, - }, - (err, res) => - successCopyCheck(err, res, newMetadata, - destBucketName, destObjName, done) - ); + Metadata: newMetadata })); + await successCopyCheck(null, res.CopyObjectResult, newMetadata, + destBucketName, destObjName); }); it('should copy an object and replace ContentType if replace ' + 'included as a metadata directive header, and new ContentType is ' + - 'provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + 'provided', async () => { + await s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, MetadataDirective: 'REPLACE', ContentType: 'image', - }, () => { - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - if (err) { - return done(err); - } - assert.strictEqual(res.ContentType, 'image'); - return done(); - }); - }); + })); + const res = await s3.send(new GetObjectCommand({ Bucket: destBucketName, + Key: destObjName })); + assert.strictEqual(res.ContentType, 'image'); }); it('should copy an object and keep ContentType if replace ' + 'included as a metadata directive header, but no new ContentType ' + 'is provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, MetadataDirective: 'REPLACE', - }, () => { - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - if (err) { - return done(err); - } - assert.strictEqual(res.ContentType, - 'application/octet-stream'); - return done(); + })).then(() => { + s3.send(new GetObjectCommand({ Bucket: destBucketName, + Key: destObjName })).then(res => { + assert.strictEqual(res.ContentType, 'application/octet-stream'); + done(); + }).catch(err => { + checkNoError(err); + done(err); }); }); }); @@ -476,20 +469,16 @@ describe('Object Copy', () => { it('should also replace additional headers if replace ' + 'included as metadata directive header and new headers are ' + 'specified', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, MetadataDirective: 'REPLACE', CacheControl: newCacheControl, ContentDisposition: newContentDisposition, ContentEncoding: newContentEncoding, Expires: newExpires, - }, err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - if (err) { - done(err); - } + })).then(() => { + s3.send(new GetObjectCommand({ Bucket: destBucketName, + Key: destObjName })).then(res => { assert.strictEqual(res.CacheControl, newCacheControl); assert.strictEqual(res.ContentDisposition, newContentDisposition); @@ -499,32 +488,42 @@ describe('Object Copy', () => { assert.strictEqual(res.Expires.toGMTString(), newExpires.toGMTString()); done(); + }).catch(err => { + checkNoError(err); + done(err); }); + }).catch(err => { + checkNoError(err); + done(err); }); }); it('should copy an object and the metadata if copy ' + 'included as metadata directive header (and ignore any new ' + 'metadata sent with copy request)', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, MetadataDirective: 'COPY', Metadata: newMetadata, - }, - err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { + })).then(() => { + s3.send(new GetObjectCommand({ Bucket: destBucketName, + Key: destObjName })).then(res => { assert.deepStrictEqual(res.Metadata, originalMetadata); done(); + }).catch(err => { + checkNoError(err); + done(err); }); - }); + }).catch(err => { + checkNoError(err); + done(err); + }); }); it('should copy an object and its additional headers if copy ' + 'included as metadata directive header (and ignore any new ' + 'headers sent with copy request)', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, MetadataDirective: 'COPY', Metadata: newMetadata, @@ -532,14 +531,9 @@ describe('Object Copy', () => { ContentDisposition: newContentDisposition, ContentEncoding: newContentEncoding, Expires: newExpires, - }, err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, Key: destObjName }, - (err, res) => { - if (err) { - done(err); - } - assert.strictEqual(res.CacheControl, + })).then(() => { + s3.send(new GetObjectCommand({ Bucket: destBucketName, Key: destObjName })).then(res => { + assert.strictEqual(res.CacheControl, originalCacheControl); assert.strictEqual(res.ContentDisposition, originalContentDisposition); @@ -554,23 +548,26 @@ describe('Object Copy', () => { it('should copy a 0 byte object to different destination', done => { const emptyFileETag = '"d41d8cd98f00b204e9800998ecf8427e"'; - s3.putObject({ Bucket: sourceBucketName, Key: sourceObjName, - Body: '', Metadata: originalMetadata }, () => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new PutObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, + Body: '', Metadata: originalMetadata })).then(() => { + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, - }, - (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, emptyFileETag); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - checkNoError(err); + })).then(res => { + assert.strictEqual(res.CopyObjectResult.ETag, emptyFileETag); + s3.send(new GetObjectCommand({ Bucket: destBucketName, + Key: destObjName })).then(res => { assert.deepStrictEqual(res.Metadata, originalMetadata); assert.strictEqual(res.ETag, emptyFileETag); done(); }); + }).catch(err => { + checkNoError(err); + done(err); }); + }).catch(err => { + checkNoError(err); + done(err); }); }); @@ -578,100 +575,127 @@ describe('Object Copy', () => { if (constants.validStorageClasses.includes('REDUCED_REDUNDANCY')) { it('should copy a 0 byte object to same destination', done => { const emptyFileETag = '"d41d8cd98f00b204e9800998ecf8427e"'; - s3.putObject({ Bucket: sourceBucketName, Key: sourceObjName, Body: '' }, () => { - s3.copyObject({ Bucket: sourceBucketName, Key: sourceObjName, + s3.send(new PutObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, Body: '' })).then(() => { + s3.send(new CopyObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, StorageClass: 'REDUCED_REDUNDANCY', - }, (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, emptyFileETag); - s3.getObject({ Bucket: sourceBucketName, - Key: sourceObjName }, (err, res) => { + })).then(res => { + assert.strictEqual(res.CopyObjectResult.ETag, emptyFileETag); + s3.send(new GetObjectCommand({ Bucket: sourceBucketName, + Key: sourceObjName })).then(res => { assert.deepStrictEqual(res.Metadata, {}); assert.deepStrictEqual(res.StorageClass, 'REDUCED_REDUNDANCY'); assert.strictEqual(res.ETag, emptyFileETag); done(); + }).catch(err => { + checkNoError(err); + done(err); }); + }).catch(err => { + checkNoError(err); + done(err); }); }); }); it('should copy an object to a different destination and change ' + 'the storage class if storage class header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, StorageClass: 'REDUCED_REDUNDANCY', - }, err => { - checkNoError(err); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { + })).then(() => { + s3.send(new GetObjectCommand({ Bucket: destBucketName, + Key: destObjName })).then(res => { assert.strictEqual(res.StorageClass, 'REDUCED_REDUNDANCY'); done(); + }).catch(err => { + checkNoError(err); + done(err); }); + }).catch(err => { + checkNoError(err); + done(err); }); }); it('should copy an object to the same destination and change the ' + 'storage class if the storage class header provided', done => { - s3.copyObject({ Bucket: sourceBucketName, Key: sourceObjName, + s3.send(new CopyObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, StorageClass: 'REDUCED_REDUNDANCY', - }, err => { - checkNoError(err); - s3.getObject({ Bucket: sourceBucketName, - Key: sourceObjName }, (err, res) => { - checkNoError(err); + })).then(() => { + s3.send(new GetObjectCommand({ Bucket: sourceBucketName, + Key: sourceObjName })).then(res => { assert.strictEqual(res.StorageClass, 'REDUCED_REDUNDANCY'); done(); + }).catch(err => { + checkNoError(err); + done(err); }); + }).catch(err => { + checkNoError(err); + done(err); }); }); } it('should copy an object to a new bucket and overwrite an already ' + 'existing object in the destination bucket', done => { - s3.putObject({ Bucket: destBucketName, Key: destObjName, - Body: 'overwrite me', Metadata: originalMetadata }, () => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new PutObjectCommand({ Bucket: destBucketName, Key: destObjName, + Body: 'overwrite me', Metadata: originalMetadata })).then(() => { + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, MetadataDirective: 'REPLACE', Metadata: newMetadata, - }, - (err, res) => { - checkNoError(err); - assert.strictEqual(res.ETag, etag); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { + })).then(res => { + assert.strictEqual(res.CopyObjectResult.ETag, etag); + s3.send(new GetObjectCommand({ Bucket: destBucketName, + Key: destObjName })).then(async res => { assert.deepStrictEqual(res.Metadata, newMetadata); assert.strictEqual(res.ETag, etag); - assert.strictEqual(res.Body.toString(), content); + const bodyString = await res.Body.transformToString(); + assert.strictEqual(bodyString, content); done(); + }).catch(err => { + checkNoError(err); + done(err); }); + }).catch(err => { + checkNoError(err); + done(err); }); - }); + }).catch(err => { + checkNoError(err); + done(err); + } + ); }); // skipping test as object level encryption is not implemented yet it.skip('should copy an object and change the server side encryption' + 'option if server side encryption header provided', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, ServerSideEncryption: 'AES256', - }, - err => { + })).then(() => { + s3.send(new GetObjectCommand({ Bucket: destBucketName, + Key: destObjName })).then(res => { + assert.strictEqual(res.ServerSideEncryption, + 'AES256'); + done(); + }).catch(err => { checkNoError(err); - s3.getObject({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { - assert.strictEqual(res.ServerSideEncryption, - 'AES256'); - done(); - }); + done(err); }); + }).catch(err => { + checkNoError(err); + done(err); + }); }); it('should return Not Implemented error for obj. encryption using ' + @@ -679,21 +703,21 @@ describe('Object Copy', () => { const params = { Bucket: destBucketName, Key: 'key', CopySource: `${sourceBucketName}/${sourceObjName}`, SSECustomerAlgorithm: 'AES256' }; - s3.copyObject(params, err => { - assert.strictEqual(err.code, 'NotImplemented'); + s3.send(new CopyObjectCommand(params)).then(() => { + throw Error('Expected NotImplemented error'); + }).catch(err => { + assert.strictEqual(err.name, 'NotImplemented'); done(); }); }); it('should copy an object and set the acl on the new object', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, ACL: 'authenticated-read', - }, - err => { - checkNoError(err); - s3.getObjectAcl({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { + })).then(() => { + s3.send(new GetObjectAclCommand({ Bucket: destBucketName, + Key: destObjName })).then(res => { // With authenticated-read ACL, there are two // grants: // (1) FULL_CONTROL to the object owner @@ -707,62 +731,79 @@ describe('Object Copy', () => { 'http://acs.amazonaws.com/groups/' + 'global/AuthenticatedUsers'); done(); + }).catch(err => { + checkNoError(err); + done(err); }); - }); + }).catch(err => { + checkNoError(err); + done(err); + }); }); it('should copy an object and default the acl on the new object ' + 'to private even if the copied object had a ' + 'different acl', done => { - s3.putObjectAcl({ Bucket: sourceBucketName, Key: sourceObjName, - ACL: 'authenticated-read' }, () => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new PutObjectAclCommand({ Bucket: sourceBucketName, Key: sourceObjName, + ACL: 'authenticated-read' })).then(() => { + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, - }, - () => { - s3.getObjectAcl({ Bucket: destBucketName, - Key: destObjName }, (err, res) => { + })).then(() => { + s3.send(new GetObjectAclCommand({ Bucket: destBucketName, + Key: destObjName })).then(res => { // With private ACL, there is only one grant // of FULL_CONTROL to the object owner assert.strictEqual(res.Grants.length, 1); assert.strictEqual(res.Grants[0].Permission, 'FULL_CONTROL'); done(); + }).catch(err => { + checkNoError(err); + done(err); }); - }); + }).catch(err => { + checkNoError(err); + done(err); + }); + }).catch(err => { + checkNoError(err); + done(err); }); }); it('should return an error if attempt to copy with same source as' + 'destination and do not change any metadata', done => { - s3.copyObject({ Bucket: sourceBucketName, Key: sourceObjName, + s3.send(new CopyObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, - }, - err => { - checkError(err, 'InvalidRequest'); - done(); - }); + })).then(() => { + done(); + }).catch(err => { + checkError(err, 'InvalidRequest', 400); + done(); + }); }); it('should return an error if attempt to copy from nonexistent bucket', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `nobucket453234/${sourceObjName}`, - }, - err => { - checkError(err, 'NoSuchBucket'); + })).then(() => { + done(); + }).catch(err => { + checkError(err, 'NoSuchBucket', 404); done(); }); }); it('should return an error if use invalid redirect location', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, WebsiteRedirectLocation: 'google.com', - }, - err => { - checkError(err, 'InvalidRedirectLocation'); + })).then(() => { + done(); + }).catch(err => { + checkError(err, 'InvalidRedirectLocation', 400); done(); }); }); @@ -770,12 +811,13 @@ describe('Object Copy', () => { it('should return an error if copy request has object lock legal ' + 'hold header but object lock is not enabled on destination bucket', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, ObjectLockLegalHoldStatus: 'ON', - }, - err => { - checkError(err, 'InvalidRequest'); + })).then(() => { + done(); + }).catch(err => { + checkError(err, 'InvalidRequest', 400); done(); }); }); @@ -784,49 +826,65 @@ describe('Object Copy', () => { 'but object lock is not enabled on destination bucket', done => { const mockDate = new Date(2050, 10, 12); - s3.copyObject({ + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, ObjectLockMode: 'GOVERNANCE', ObjectLockRetainUntilDate: mockDate, - }, - err => { - checkError(err, 'InvalidRequest'); + })).then(() => { + done(); + }).catch(err => { + checkError(err, 'InvalidRequest', 400); done(); }); }); it('should return an error if attempt to copy to nonexistent bucket', done => { - s3.copyObject({ Bucket: 'nobucket453234', Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: 'nobucket453234', Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, - }, - err => { - checkError(err, 'NoSuchBucket'); + })).then(() => { + done(); + }).catch(err => { + checkError(err, 'NoSuchBucket', 404); done(); }); }); it('should return an error if attempt to copy nonexistent object', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/nokey`, - }, - err => { - checkError(err, 'NoSuchKey'); + })).then(() => { + done(); + }).catch(err => { + checkError(err, 'NoSuchKey', 404); + done(); + }); + }); + + it('should return an error if attempt to copy nonexistent object', + done => { + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, + CopySource: `${sourceBucketName}/nokey`, + })).then(() => { + done(); + }).catch(err => { + checkError(err, 'NoSuchKey', 404); done(); }); }); it('should return an error if send invalid metadata directive header', done => { - s3.copyObject({ Bucket: destBucketName, Key: destObjName, + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, MetadataDirective: 'copyHalf', - }, - err => { - checkError(err, 'InvalidArgument'); + })).then(() => { + done(); + }).catch(err => { + checkError(err, 'InvalidArgument', 400); done(); }); }); @@ -845,46 +903,37 @@ describe('Object Copy', () => { it('should not allow an account without read persmission on the ' + 'source object to copy the object', done => { - otherAccountS3.copyObject({ Bucket: otherAccountBucket, + otherAccountS3.send(new CopyObjectCommand({ Bucket: otherAccountBucket, Key: otherAccountKey, CopySource: `${sourceBucketName}/${sourceObjName}`, - }, - err => { - checkError(err, 'AccessDenied'); + })).then(() => { + done(); + }).catch(err => { + checkError(err, 'AccessDenied', 403); done(); }); }); it('should not allow an account without write persmission on the ' + - 'destination bucket to copy the object', done => { - otherAccountS3.putObject({ Bucket: otherAccountBucket, - Key: otherAccountKey, Body: '' }, () => { - otherAccountS3.copyObject({ Bucket: destBucketName, + 'destination bucket to copy the object', () => otherAccountS3.send(new PutObjectCommand( + { Bucket: otherAccountBucket, + Key: otherAccountKey, Body: '' })).then(() => otherAccountS3.send(new CopyObjectCommand( + { Bucket: destBucketName, Key: destObjName, CopySource: `${otherAccountBucket}/${otherAccountKey}`, - }, - err => { - checkError(err, 'AccessDenied'); - done(); - }); - }); - }); + })).catch(err => { + checkError(err, 'AccessDenied', 403); + }))); + it('should allow an account with read permission on the ' + 'source object and write permission on the destination ' + - 'bucket to copy the object', done => { - s3.putObjectAcl({ Bucket: sourceBucketName, - Key: sourceObjName, ACL: 'public-read' }, () => { - otherAccountS3.copyObject({ Bucket: otherAccountBucket, + 'bucket to copy the object', () => s3.send(new PutObjectAclCommand({ Bucket: sourceBucketName, + Key: sourceObjName, ACL: 'public-read' })).then(() => otherAccountS3.send(new CopyObjectCommand( + { Bucket: otherAccountBucket, Key: otherAccountKey, CopySource: `${sourceBucketName}/${sourceObjName}`, - }, - err => { - checkNoError(err); - done(); - }); - }); - }); + })))); }); it('If-Match: returns no error when ETag match, with double quotes ' + @@ -935,7 +984,7 @@ describe('Object Copy', () => { it('If-Match: returns PreconditionFailed when ETag does not match', done => { requestCopy({ CopySourceIfMatch: 'non-matching ETag' }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -961,7 +1010,7 @@ describe('Object Copy', () => { 'double quotes around ETag', done => { requestCopy({ CopySourceIfNoneMatch: etag }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -972,7 +1021,7 @@ describe('Object Copy', () => { requestCopy({ CopySourceIfNoneMatch: `non-matching,${etag}`, }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -981,7 +1030,7 @@ describe('Object Copy', () => { 'without double quotes around ETag', done => { requestCopy({ CopySourceIfNoneMatch: etagTrim }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -992,7 +1041,7 @@ describe('Object Copy', () => { requestCopy({ CopySourceIfNoneMatch: `non-matching,${etagTrim}`, }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -1014,7 +1063,7 @@ describe('Object Copy', () => { done => { requestCopy({ CopySourceIfModifiedSince: dateFromNow(1) }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -1025,7 +1074,7 @@ describe('Object Copy', () => { requestCopy({ CopySourceIfModifiedSince: dateConvert(lastModified) }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -1056,7 +1105,7 @@ describe('Object Copy', () => { done => { requestCopy({ CopySourceIfUnmodifiedSince: dateFromNow(-1) }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -1088,7 +1137,7 @@ describe('Object Copy', () => { CopySourceIfMatch: 'non-matching', CopySourceIfUnmodifiedSince: dateFromNow(-1), }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -1130,7 +1179,7 @@ describe('Object Copy', () => { CopySourceIfMatch: 'non-matching', CopySourceIfModifiedSince: dateFromNow(1), }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -1140,7 +1189,7 @@ describe('Object Copy', () => { CopySourceIfMatch: 'non-matching', CopySourceIfModifiedSince: dateFromNow(-1), }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -1152,7 +1201,7 @@ describe('Object Copy', () => { CopySourceIfNoneMatch: etagTrim, CopySourceIfModifiedSince: dateFromNow(-1), }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -1162,7 +1211,7 @@ describe('Object Copy', () => { CopySourceIfNoneMatch: etagTrim, CopySourceIfModifiedSince: dateFromNow(1), }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -1184,7 +1233,7 @@ describe('Object Copy', () => { CopySourceIfNoneMatch: 'non-matching', CopySourceIfModifiedSince: dateFromNow(1), }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -1204,7 +1253,7 @@ describe('Object Copy', () => { CopySourceIfNoneMatch: 'non-matching', CopySourceIfUnmodifiedSince: dateFromNow(-1), }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -1214,7 +1263,7 @@ describe('Object Copy', () => { CopySourceIfNoneMatch: etagTrim, CopySourceIfUnmodifiedSince: dateFromNow(1), }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); @@ -1224,23 +1273,25 @@ describe('Object Copy', () => { CopySourceIfNoneMatch: etagTrim, CopySourceIfUnmodifiedSince: dateFromNow(-1), }, err => { - checkError(err, 'PreconditionFailed'); + checkError(err, 'PreconditionFailed', 412); done(); }); }); it('should return InvalidStorageClass error when x-amz-storage-class header is provided ' + 'and not equal to STANDARD', done => { - s3.copyObject({ + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, StorageClass: 'COLD', - }, err => { - assert.strictEqual(err.code, 'InvalidStorageClass'); - assert.strictEqual(err.statusCode, 400); - done(); - }); + })).then(() => { + throw new Error('Expected InvalidStorageClass error'); + }).catch(err => { + assert.strictEqual(err.name, 'InvalidStorageClass'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); + done(); + }); }); it('should not copy a cold object', done => { @@ -1252,28 +1303,34 @@ describe('Object Copy', () => { }; fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archive, err => { assert.ifError(err); - s3.copyObject({ + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, - }, err => { - assert.strictEqual(err.code, 'InvalidObjectState'); - assert.strictEqual(err.statusCode, 403); - done(); - }); + })).then(() => { + throw new Error('Expected InvalidObjectState error'); + }).catch(err => { + assert.strictEqual(err.name, 'InvalidObjectState'); + assert.strictEqual(err.$metadata.httpStatusCode, 403); + done(); + }); }); }); it('should copy an object when it\'s transitioning to cold', done => { fakeMetadataTransition(sourceBucketName, sourceObjName, undefined, err => { assert.ifError(err); - s3.copyObject({ + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, - }, (err, res) => { - successCopyCheck(err, res, originalMetadata, - destBucketName, destObjName, done); + })).then(async res => { + await successCopyCheck(null, res.CopyObjectResult, originalMetadata, + destBucketName, destObjName); + done(); + }).catch(err => { + checkNoError(err); + done(); }); }); }); @@ -1288,13 +1345,17 @@ describe('Object Copy', () => { }; fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archiveCompleted, err => { assert.ifError(err); - s3.copyObject({ + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, - }, (err, res) => { - successCopyCheck(err, res, originalMetadata, - destBucketName, destObjName, done); + })).then(async res => { + await successCopyCheck(null, res.CopyObjectResult, originalMetadata, + destBucketName, destObjName); + done(); + }).catch(err => { + checkNoError(err); + done(); }); }); }); @@ -1314,12 +1375,12 @@ describeSkipIfCeph('Object Copy with object lock enabled on both destination ' + before(() => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return bucketUtil.empty(sourceBucketName) + return bucketUtil.empty(sourceBucketName, true) .then(() => bucketUtil.empty(destBucketName)) .then(() => bucketUtil.deleteMany([sourceBucketName, destBucketName])) .catch(err => { - if (err.code !== 'NoSuchBucket') { + if (err.name !== 'NoSuchBucket') { process.stdout.write(`${err}\n`); throw err; } @@ -1331,40 +1392,39 @@ describeSkipIfCeph('Object Copy with object lock enabled on both destination ' + }); }); - beforeEach(() => s3.putObject({ + beforeEach(() => s3.send(new PutObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, Body: content, Metadata: originalMetadata, ObjectLockMode: 'GOVERNANCE', ObjectLockRetainUntilDate: new Date(2050, 1, 1), - }).promise().then(res => { + })).then(res => { versionId = res.VersionId; - s3.headObject({ + s3.send(new HeadObjectCommand({ Bucket: sourceBucketName, Key: sourceObjName, - }).promise(); + })); })); - afterEach(() => bucketUtil.empty(sourceBucketName) - .then(() => bucketUtil.empty(destBucketName))); + afterEach(async () => { + await bucketUtil.empty(sourceBucketName); + await bucketUtil.empty(destBucketName); + }); - after(() => bucketUtil.deleteMany([sourceBucketName, destBucketName])); + after(async () => await bucketUtil.deleteMany([sourceBucketName, destBucketName])); it('should not copy default retention info of the destination ' + 'bucket if legal hold header is passed with copy object request', done => { - s3.copyObject({ + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, ObjectLockLegalHoldStatus: 'ON', - }, - err => { - assert.ifError(err); - s3.getObject({ Bucket: destBucketName, Key: destObjName }, - (err, res) => { - assert.ifError(err); + })).then(() => { + s3.send(new GetObjectCommand({ Bucket: destBucketName, Key: destObjName })) + .then(res => { assert.strictEqual(res.ObjectLockMode, undefined); assert.strictEqual(res.ObjectLockRetainUntilDate, undefined); @@ -1381,25 +1441,40 @@ describeSkipIfCeph('Object Copy with object lock enabled on both destination ' + versionId: res.VersionId, }, ]; - changeObjectLock(removeLockObjs, '', done); - }); + new Promise((resolve, reject) => { + changeObjectLock(removeLockObjs, '', err => { + if (err) { + reject(err); + } else { + resolve(); + } + }); + }).then(done).catch(err => { + assert.ifError(err); + done(err); + }); + }).catch(err => { + assert.ifError(err); + done(err); }); + }).catch(err => { + assert.ifError(err); + done(err); }); + }); it('should not copy default retention info of the destination ' + 'bucket if legal hold header is passed with copy object request', done => { - s3.copyObject({ + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, ObjectLockLegalHoldStatus: 'on', - }, - err => { - assert.ifError(err); - s3.getObject({ Bucket: destBucketName, Key: destObjName }, - (err, res) => { - assert.ifError(err); + })).then(() => { + s3.send(new GetObjectCommand({ Bucket: destBucketName, Key: destObjName })) + .then(res => { + assert.strictEqual(res.ObjectLockMode, undefined); assert.strictEqual(res.ObjectLockMode, undefined); assert.strictEqual(res.ObjectLockRetainUntilDate, undefined); @@ -1413,25 +1488,28 @@ describeSkipIfCeph('Object Copy with object lock enabled on both destination ' + }, ]; changeObjectLock(removeLockObjs, '', done); + }).catch(err => { + assert.ifError(err); + done(err); }); + }).catch(err => { + assert.ifError(err); + done(err); }); }); it('should overwrite default retention info of the destination ' + 'bucket if retention headers passed with copy object request', done => { - s3.copyObject({ + s3.send(new CopyObjectCommand({ Bucket: destBucketName, Key: destObjName, CopySource: `${sourceBucketName}/${sourceObjName}`, ObjectLockMode: 'COMPLIANCE', ObjectLockRetainUntilDate: new Date(2055, 2, 3), - }, - err => { - assert.ifError(err); - s3.getObject({ Bucket: destBucketName, Key: destObjName }, - (err, res) => { - assert.ifError(err); + })).then(() => { + s3.send(new GetObjectCommand({ Bucket: destBucketName, Key: destObjName })) + .then(res => { assert.strictEqual(res.ObjectLockMode, 'COMPLIANCE'); assert.strictEqual(res.ObjectLockRetainUntilDate.toGMTString(), new Date(2055, 2, 3).toGMTString()); @@ -1447,8 +1525,15 @@ describeSkipIfCeph('Object Copy with object lock enabled on both destination ' + }, ]; changeObjectLock(removeLockObjs, '', done); + }).catch(err => { + assert.ifError(err); + done(err); }); + }).catch(err => { + assert.ifError(err); + done(err); }); }); - }); + }); + }); diff --git a/tests/functional/aws-node-sdk/test/object/objectHead.js b/tests/functional/aws-node-sdk/test/object/objectHead.js index bca6a0e13b..92e2b64799 100644 --- a/tests/functional/aws-node-sdk/test/object/objectHead.js +++ b/tests/functional/aws-node-sdk/test/object/objectHead.js @@ -3,10 +3,22 @@ const assert = require('assert'); const async = require('async'); const { errorInstances } = require('arsenal'); const moment = require('moment'); +const { + HeadObjectCommand, + PutObjectCommand, + CreateBucketCommand, + DeleteBucketCommand, + GetObjectCommand, + ListObjectVersionsCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + CompleteMultipartUploadCommand, +} = require('@aws-sdk/client-s3'); const changeObjectLock = require('../../../../utilities/objectLock-util'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); +const checkError = require('../../lib/utility/checkError'); const changeLockPromise = promisify(changeObjectLock); @@ -19,19 +31,14 @@ function checkNoError(err) { `Expected success, got error ${JSON.stringify(err)}`); } -function checkError(err, code) { - assert.notEqual(err, null, 'Expected failure but got success'); - assert.strictEqual(err.code, code); -} - function dateFromNow(diff) { const d = new Date(); d.setHours(d.getHours() + diff); - return d.toISOString(); + return d; } function dateConvert(d) { - return (new Date(d)).toISOString(); + return new Date(d); } describe('HEAD object, conditions', () => { @@ -49,7 +56,7 @@ describe('HEAD object, conditions', () => { bucketUtil.deleteOne(bucketName) ) .catch(err => { - if (err.code !== 'NoSuchBucket') { + if (err.name !== 'NoSuchBucket') { process.stdout.write(`${err}\n`); throw err; } @@ -58,21 +65,22 @@ describe('HEAD object, conditions', () => { }); function requestHead(fields, cb) { - s3.headObject(Object.assign({ + s3.send(new HeadObjectCommand(Object.assign({ Bucket: bucketName, Key: objectName, - }, fields), cb); + }, fields))).then(res => cb(null, res)).catch(cb); } - beforeEach(() => s3.putObject({ + beforeEach(() => s3.send(new PutObjectCommand({ Bucket: bucketName, Key: objectName, Body: 'I am the best content ever', - }).promise().then(res => { + })) + .then(res => { etag = res.ETag; etagTrim = etag.substring(1, etag.length - 1); - return s3.headObject( - { Bucket: bucketName, Key: objectName }).promise(); + return s3.send(new HeadObjectCommand( + { Bucket: bucketName, Key: objectName })); }).then(res => { lastModified = res.LastModified; })); @@ -127,7 +135,7 @@ describe('HEAD object, conditions', () => { it('If-Match: returns PreconditionFailed when ETag does not match', done => { requestHead({ IfMatch: 'non-matching ETag' }, err => { - checkError(err, errorInstances.PreconditionFailed.code); + assert.equal(err.$metadata.httpStatusCode, 412); done(); }); }); @@ -153,7 +161,7 @@ describe('HEAD object, conditions', () => { 'quotes around ETag', done => { requestHead({ IfNoneMatch: etag }, err => { - checkError(err, 'NotModified'); + assert.equal(err.$metadata.httpStatusCode, 304); done(); }); }); @@ -164,7 +172,7 @@ describe('HEAD object, conditions', () => { requestHead({ IfNoneMatch: `non-matching,${etag}`, }, err => { - checkError(err, 'NotModified'); + assert.equal(err.$metadata.httpStatusCode, 304); done(); }); }); @@ -173,7 +181,7 @@ describe('HEAD object, conditions', () => { 'double quotes around ETag', done => { requestHead({ IfNoneMatch: etagTrim }, err => { - checkError(err, 'NotModified'); + assert.equal(err.$metadata.httpStatusCode, 304); done(); }); }); @@ -184,7 +192,7 @@ describe('HEAD object, conditions', () => { requestHead({ IfNoneMatch: `non-matching,${etagTrim}`, }, err => { - checkError(err, 'NotModified'); + assert.equal(err.$metadata.httpStatusCode, 304); done(); }); }); @@ -206,7 +214,7 @@ describe('HEAD object, conditions', () => { done => { requestHead({ IfModifiedSince: dateFromNow(1) }, err => { - checkError(err, 'NotModified'); + checkError(err, errorInstances.NotModified.code); done(); }); }); @@ -216,7 +224,7 @@ describe('HEAD object, conditions', () => { done => { requestHead({ IfModifiedSince: dateConvert(lastModified) }, err => { - checkError(err, 'NotModified'); + assert.equal(err.$metadata.httpStatusCode, 304); done(); }); }); @@ -244,7 +252,7 @@ describe('HEAD object, conditions', () => { 'lastModified date is lesser', done => { requestHead({ IfUnmodifiedSince: dateFromNow(-1) }, err => { - checkError(err, errorInstances.PreconditionFailed.code); + assert.equal(err.$metadata.httpStatusCode, 412); done(); }); }); @@ -276,7 +284,7 @@ describe('HEAD object, conditions', () => { IfMatch: 'non-matching', IfUnmodifiedSince: dateFromNow(-1), }, err => { - checkError(err, errorInstances.PreconditionFailed.code); + assert.equal(err.$metadata.httpStatusCode, 412); done(); }); }); @@ -286,7 +294,7 @@ describe('HEAD object, conditions', () => { IfMatch: 'non-matching', IfUnmodifiedSince: dateFromNow(1), }, err => { - checkError(err, errorInstances.PreconditionFailed.code); + assert.equal(err.$metadata.httpStatusCode, 412); done(); }); }); @@ -318,7 +326,7 @@ describe('HEAD object, conditions', () => { IfMatch: 'non-matching', IfModifiedSince: dateFromNow(1), }, err => { - checkError(err, errorInstances.PreconditionFailed.code); + assert.equal(err.$metadata.httpStatusCode, 412); done(); }); }); @@ -328,7 +336,7 @@ describe('HEAD object, conditions', () => { IfMatch: 'non-matching', IfModifiedSince: dateFromNow(-1), }, err => { - checkError(err, errorInstances.PreconditionFailed.code); + assert.equal(err.$metadata.httpStatusCode, 412); done(); }); }); @@ -340,7 +348,7 @@ describe('HEAD object, conditions', () => { IfNoneMatch: etagTrim, IfModifiedSince: dateFromNow(-1), }, err => { - checkError(err, 'NotModified'); + assert.equal(err.$metadata.httpStatusCode, 304); done(); }); }); @@ -350,7 +358,7 @@ describe('HEAD object, conditions', () => { IfNoneMatch: etagTrim, IfModifiedSince: dateFromNow(1), }, err => { - checkError(err, 'NotModified'); + assert.equal(err.$metadata.httpStatusCode, 304); done(); }); }); @@ -372,7 +380,7 @@ describe('HEAD object, conditions', () => { IfNoneMatch: 'non-matching', IfModifiedSince: dateFromNow(1), }, err => { - checkError(err, 'NotModified'); + assert.equal(err.$metadata.httpStatusCode, 304); done(); }); }); @@ -392,7 +400,7 @@ describe('HEAD object, conditions', () => { IfNoneMatch: 'non-matching', IfUnmodifiedSince: dateFromNow(-1), }, err => { - checkError(err, errorInstances.PreconditionFailed.code); + assert.equal(err.$metadata.httpStatusCode, 412); done(); }); }); @@ -402,7 +410,7 @@ describe('HEAD object, conditions', () => { IfNoneMatch: etagTrim, IfUnmodifiedSince: dateFromNow(1), }, err => { - checkError(err, 'NotModified'); + assert.equal(err.$metadata.httpStatusCode, 304); done(); }); }); @@ -412,7 +420,7 @@ describe('HEAD object, conditions', () => { IfNoneMatch: etagTrim, IfUnmodifiedSince: dateFromNow(-1), }, err => { - checkError(err, errorInstances.PreconditionFailed.code); + assert.equal(err.$metadata.httpStatusCode, 412); done(); }); }); @@ -428,13 +436,11 @@ describe('HEAD object, conditions', () => { Bucket: bucketName, Key: 'redir_present', }; - s3.putObject(redirBktwBody, err => { - checkNoError(err); - s3.headObject(redirBkt, (err, data) => { - checkNoError(err); + s3.send(new PutObjectCommand(redirBktwBody)).then(() => { + s3.send(new HeadObjectCommand(redirBkt)).then(data => { assert.strictEqual(data.WebsiteRedirectLocation, 'http://google.com'); - return done(); + done(); }); }); }); @@ -450,10 +456,8 @@ describe('HEAD object, conditions', () => { Bucket: bucketName, Key: objectName, }; - s3.putObject(mockPutObjectParams, err => { - checkNoError(err); - s3.headObject(mockHeadObjectParams, (err, data) => { - checkNoError(err); + s3.send(new PutObjectCommand(mockPutObjectParams)).then(() => { + s3.send(new HeadObjectCommand(mockHeadObjectParams)).then(data => { assert.strictEqual(data.AcceptRanges, 'bytes'); done(); }); @@ -483,29 +487,29 @@ describe('HEAD object, conditions', () => { 'multipart object', done => { const mpuKey = 'mpukey'; async.waterfall([ - next => s3.createMultipartUpload({ + next => s3.send(new CreateMultipartUploadCommand({ Bucket: bucketName, Key: mpuKey, - }, next), + })).then(data => next(null, data)).catch(next), (data, next) => { const uploadId = data.UploadId; - s3.uploadPart({ + s3.send(new UploadPartCommand({ Bucket: bucketName, Key: mpuKey, UploadId: uploadId, PartNumber: 1, Body: Buffer.alloc(partSize).fill('a'), - }, (err, data) => next(err, uploadId, data.ETag)); + })).then(data => next(null, uploadId, data.ETag)).catch(next); }, - (uploadId, etagOne, next) => s3.uploadPart({ + (uploadId, etagOne, next) => s3.send(new UploadPartCommand({ Bucket: bucketName, Key: mpuKey, UploadId: uploadId, PartNumber: 2, Body: Buffer.alloc(partSize).fill('z'), - }, (err, data) => next(err, uploadId, etagOne, data.ETag)), + })).then(data => next(null, uploadId, etagOne, data.ETag)).catch(next), (uploadId, etagOne, etagTwo, next) => - s3.completeMultipartUpload({ + s3.send(new CompleteMultipartUploadCommand({ Bucket: bucketName, Key: mpuKey, UploadId: uploadId, @@ -518,15 +522,14 @@ describe('HEAD object, conditions', () => { ETag: etagTwo, }], }, - }, next), + })).then(data => next(null, data)).catch(next), ], err => { assert.ifError(err); - s3.headObject({ + s3.send(new HeadObjectCommand({ Bucket: bucketName, Key: mpuKey, PartNumber: 1, - }, (err, data) => { - assert.ifError(err); + })).then(data => { assert.strictEqual(data.PartsCount, 2); done(); }); @@ -544,12 +547,12 @@ describeSkipIfCeph('HEAD object with object lock', () => { const s3 = bucketUtil.s3; const bucket = 'bucket-with-lock'; const key = 'object-with-lock'; - const formatDate = date => date.toString().slice(0, 20); - const mockDate = moment().add(1, 'days').toISOString(); + const formatDate = date => moment(date).format('YYYY-MM-DDTHH:mm:ss.SSS[Z]'); + const mockDate = moment().add(1, 'days'); const mockMode = 'GOVERNANCE'; let versionId; - beforeEach(() => { + beforeEach(async () => { const params = { Bucket: bucket, Key: key, @@ -557,23 +560,20 @@ describeSkipIfCeph('HEAD object with object lock', () => { ObjectLockMode: mockMode, ObjectLockLegalHoldStatus: 'ON', }; - return s3.createBucket({ + await s3.send(new CreateBucketCommand({ Bucket: bucket, ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.putObject(params).promise()) - .then(() => s3.getObject({ Bucket: bucket, Key: key }).promise()) - /* eslint-disable no-return-assign */ - .then(res => versionId = res.VersionId) - .catch(err => { - process.stdout.write('Error in before\n'); - throw err; - }); + })); + await s3.send(new PutObjectCommand(params)); + const res = await s3.send(new GetObjectCommand({ Bucket: bucket, Key: key })); + + versionId = res.VersionId; }); - afterEach(() => changeLockPromise([{ bucket, key, versionId }], '') - .then(() => s3.listObjectVersions({ Bucket: bucket }).promise()) - .then(res => res.Versions.forEach(object => { + afterEach(async () => { + await changeLockPromise([{ bucket, key, versionId }], ''); + const res = await s3.send(new ListObjectVersionsCommand({ Bucket: bucket })); + res.Versions?.forEach(object => { const params = [ { bucket, @@ -582,35 +582,26 @@ describeSkipIfCeph('HEAD object with object lock', () => { }, ]; changeLockPromise(params, ''); - })) - .then(() => { - process.stdout.write('Emptying and deleting buckets\n'); - return bucketUtil.empty(bucket); - }) - .then(() => s3.deleteBucket({ Bucket: bucket }).promise()) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - })); - - it('should return object lock headers if set on the object', done => { - s3.headObject({ Bucket: bucket, Key: key }, (err, res) => { - assert.ifError(err); - assert.strictEqual(res.ObjectLockMode, mockMode); - const responseDate - = formatDate(res.ObjectLockRetainUntilDate.toISOString()); - const expectedDate = formatDate(mockDate); - assert.strictEqual(responseDate, expectedDate); - assert.strictEqual(res.ObjectLockLegalHoldStatus, 'ON'); - const objectWithLock = [ - { - bucket, - key, - versionId: res.VersionId, - }, - ]; - changeObjectLock(objectWithLock, '', done); }); + await bucketUtil.empty(bucket); + await s3.send(new DeleteBucketCommand({ Bucket: bucket })); + }); + + it('should return object lock headers if set on the object', async () => { + const res = await s3.send(new HeadObjectCommand({ Bucket: bucket, Key: key })); + assert.strictEqual(res.ObjectLockMode, mockMode); + const responseDate= formatDate(res.ObjectLockRetainUntilDate); + const expectedDate = formatDate(mockDate); + assert.strictEqual(responseDate, expectedDate); + assert.strictEqual(res.ObjectLockLegalHoldStatus, 'ON'); + const objectWithLock = [ + { + bucket, + key, + versionId: res.VersionId, + }, + ]; + await changeLockPromise(objectWithLock, ''); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/objectHead_compatibleHeaders.js b/tests/functional/aws-node-sdk/test/object/objectHead_compatibleHeaders.js index 1b55458666..b4603858aa 100644 --- a/tests/functional/aws-node-sdk/test/object/objectHead_compatibleHeaders.js +++ b/tests/functional/aws-node-sdk/test/object/objectHead_compatibleHeaders.js @@ -2,6 +2,7 @@ const assert = require('assert'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); +const { PutObjectCommand , HeadObjectCommand } = require('@aws-sdk/client-s3'); const bucketName = 'objectheadtestheaders'; const objectName = 'someObject'; @@ -25,7 +26,7 @@ describe('HEAD object, compatibility headers [Cache-Control, ' + bucketUtil.deleteOne(bucketName) ) .catch(err => { - if (err.code !== 'NoSuchBucket') { + if (err.name !== 'NoSuchBucket') { process.stdout.write(`${err}\n`); throw err; } @@ -40,27 +41,20 @@ describe('HEAD object, compatibility headers [Cache-Control, ' + ContentEncoding: contentEncoding, Expires: expires, }; - return s3.putObject(params).promise(); - }) - .catch(err => { - process.stdout.write(`Error with putObject: ${err}\n`); - throw err; + return s3.send(new PutObjectCommand(params)); }); }); - after(() => { + after(async () => { process.stdout.write('deleting bucket'); - return bucketUtil.empty(bucketName).then(() => - bucketUtil.deleteOne(bucketName)); + await bucketUtil.empty(bucketName); + await bucketUtil.deleteOne(bucketName); }); it('should return additional headers if specified in objectPUT ' + 'request', done => { - s3.headObject({ Bucket: bucketName, Key: objectName }, - (err, res) => { - if (err) { - return done(err); - } + s3.send(new HeadObjectCommand({ Bucket: bucketName, Key: objectName })) + .then(res => { assert.strictEqual(res.CacheControl, cacheControl); assert.strictEqual(res.ContentDisposition, @@ -72,7 +66,10 @@ describe('HEAD object, compatibility headers [Cache-Control, ' + assert.strictEqual(res.Expires.toGMTString(), expires.toGMTString()); return done(); - }); + }).catch(err => { + process.stdout.write(`Error on headObject: ${err}\n`); + return done(err); + }); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/objectHead_replication.js b/tests/functional/aws-node-sdk/test/object/objectHead_replication.js index 04c3bdf558..39beb5e388 100644 --- a/tests/functional/aws-node-sdk/test/object/objectHead_replication.js +++ b/tests/functional/aws-node-sdk/test/object/objectHead_replication.js @@ -1,11 +1,15 @@ const assert = require('assert'); -const async = require('async'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); const { removeAllVersions, versioningEnabled } = require('../../lib/utility/versioning-util'); - +const { PutObjectCommand, + HeadObjectCommand, + CreateBucketCommand, + DeleteBucketCommand, + PutBucketVersioningCommand, + PutBucketReplicationCommand } = require('@aws-sdk/client-s3'); const sourceBucket = 'source-bucket'; const keyPrefix = 'test-prefix'; @@ -14,60 +18,60 @@ describe("Head object 'ReplicationStatus' value", () => { const bucketUtil = new BucketUtility('default', sigCfg); const s3 = bucketUtil.s3; - function checkHeadObj(key, expectedStatus, cb) { + async function checkHeadObj(key, expectedStatus) { const params = { Bucket: sourceBucket, Key: key }; - return async.series([ - next => s3.putObject(params, next), - next => s3.headObject(params, (err, res) => { - if (err) { - return next(err); - } - assert.strictEqual(res.ReplicationStatus, expectedStatus); - return next(); - }), - ], cb); + await s3.send(new PutObjectCommand(params)); + const res = await s3.send(new HeadObjectCommand(params)); + assert.strictEqual(res.ReplicationStatus, expectedStatus); } - beforeEach(done => async.series([ - next => s3.createBucket({ Bucket: sourceBucket }, next), - next => s3.putBucketVersioning({ + beforeEach(async () => { + await s3.send(new CreateBucketCommand({ Bucket: sourceBucket })); + await s3.send(new PutBucketVersioningCommand({ Bucket: sourceBucket, VersioningConfiguration: versioningEnabled, - }, next), - ], done)); + })); + }); - afterEach(done => async.series([ - next => removeAllVersions({ Bucket: sourceBucket }, next), - next => s3.deleteBucket({ Bucket: sourceBucket }, next), - ], done)); + afterEach(done => { + removeAllVersions({ Bucket: sourceBucket }, err => { + if (err) { + return done(err); + } + return s3.send(new DeleteBucketCommand({ Bucket: sourceBucket })) + .then(() => done()).catch(done); + }); + }); it('should be `undefined` when there is no bucket replication config', - done => checkHeadObj(`${keyPrefix}-foobar`, undefined, done)); + async () => await checkHeadObj(`${keyPrefix}-foobar`, undefined)); describe('With bucket replication config', () => { const role = process.env.S3_END_TO_END ? 'arn:aws:iam::123456789012:role/src-resource,arn:aws:iam::123456789012:role/dest-resource' : 'arn:aws:iam::123456789012:role/src-resource'; - beforeEach(done => s3.putBucketReplication({ - Bucket: sourceBucket, - ReplicationConfiguration: { - Role: role, - Rules: [ - { - Destination: { StorageClass: 'us-east-2', - Bucket: 'arn:aws:s3:::dest-bucket' }, - Prefix: keyPrefix, - Status: 'Enabled', - }, - ], - }, - }, done)); + beforeEach(async () => { + await s3.send(new PutBucketReplicationCommand({ + Bucket: sourceBucket, + ReplicationConfiguration: { + Role: role, + Rules: [ + { + Destination: { StorageClass: 'us-east-2', + Bucket: 'arn:aws:s3:::dest-bucket' }, + Prefix: keyPrefix, + Status: 'Enabled', + }, + ], + }, + })); + }); it("should be 'PENDING' when object key prefix applies", - done => checkHeadObj(`${keyPrefix}-foobar`, 'PENDING', done)); + async () => await checkHeadObj(`${keyPrefix}-foobar`, 'PENDING')); it('should be `undefined` when object key prefix does not apply', - done => checkHeadObj(`foobar-${keyPrefix}`, undefined, done)); + async () => await checkHeadObj(`foobar-${keyPrefix}`, undefined)); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/objectOverwrite.js b/tests/functional/aws-node-sdk/test/object/objectOverwrite.js index f4ff972968..8028e3d06a 100644 --- a/tests/functional/aws-node-sdk/test/object/objectOverwrite.js +++ b/tests/functional/aws-node-sdk/test/object/objectOverwrite.js @@ -1,4 +1,9 @@ const assert = require('assert'); +const { + PutObjectCommand, + HeadObjectCommand, + GetObjectCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -21,44 +26,45 @@ describe('Put object with same key as prior object', () => { let s3; let bucketName; - before(done => { + before(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - bucketUtil.createRandom(1) - .then(created => { - bucketName = created; - done(); - }) - .catch(done); + bucketName = await bucketUtil.createRandom(1); }); - beforeEach(() => s3.putObject({ - Bucket: bucketName, - Key: objectName, - Body: 'I am the best content ever', - Metadata: firstPutMetadata, - }).promise().then(() => - s3.headObject({ Bucket: bucketName, Key: objectName }).promise() - ).then(res => { + beforeEach(async () => { + await s3.send(new PutObjectCommand({ + Bucket: bucketName, + Key: objectName, + Body: 'I am the best content ever', + Metadata: firstPutMetadata, + })); + const res = await s3.send(new HeadObjectCommand({ + Bucket: bucketName, + Key: objectName + })); assert.deepStrictEqual(res.Metadata, firstPutMetadata); - })); + }); - afterEach(() => bucketUtil.empty(bucketName)); + afterEach(async () => await bucketUtil.empty(bucketName)); - after(() => bucketUtil.deleteOne(bucketName)); + after(async () => await bucketUtil.deleteOne(bucketName)); it('should overwrite all user metadata and data on overwrite put', - () => s3.putObject({ - Bucket: bucketName, - Key: objectName, - Body: 'Much different', - Metadata: secondPutMetadata, - }).promise().then(() => - s3.getObject({ Bucket: bucketName, Key: objectName }).promise() - ).then(res => { + async () => { + await s3.send(new PutObjectCommand({ + Bucket: bucketName, + Key: objectName, + Body: 'Much different', + Metadata: secondPutMetadata, + })); + const res = await s3.send(new GetObjectCommand({ + Bucket: bucketName, + Key: objectName + })); assert.deepStrictEqual(res.Metadata, secondPutMetadata); - assert.deepStrictEqual(res.Body.toString(), - 'Much different'); - })); + const bodyText = await res.Body.transformToString(); + assert.deepStrictEqual(bodyText, 'Much different'); + }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/put.js b/tests/functional/aws-node-sdk/test/object/put.js index 50d30f8797..c7910a14e6 100644 --- a/tests/functional/aws-node-sdk/test/object/put.js +++ b/tests/functional/aws-node-sdk/test/object/put.js @@ -1,7 +1,15 @@ const assert = require('assert'); - +const fs = require('fs'); +const path = require('path'); +const { CreateBucketCommand, + PutObjectCommand, + GetObjectAclCommand, + GetObjectTaggingCommand, +} = require('@aws-sdk/client-s3'); +const { getSignedUrl } = require('@aws-sdk/s3-request-presigner'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); +const checkError = require('../../lib/utility/checkError'); const provideRawOutput = require('../../lib/utility/provideRawOutput'); const { taggingTests, generateMultipleTagQuery } = require('../../lib/utility/tagging'); @@ -12,25 +20,15 @@ const changeObjectLock = require('../../../../utilities/objectLock-util'); const bucket = 'bucket2putstuffin4324242'; const object = 'object2putstuffin'; -function _checkError(err, code, statusCode) { - assert(err, 'Expected error but found none'); - assert.strictEqual(err.code, code); - assert.strictEqual(err.statusCode, statusCode); -} - describe('PUT object', () => { withV4(sigCfg => { let bucketUtil; let s3; - beforeEach(() => { + beforeEach(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + await s3.send(new CreateBucketCommand({ Bucket: bucket })); }); afterEach(() => { @@ -47,22 +45,25 @@ describe('PUT object', () => { }); it('should put an object and set the acl via query param', - done => { + async () => { + // Create a temporary file for upload + const tempFile = path.join(__dirname, 'temp-upload-file.txt'); + fs.writeFileSync(tempFile, 'test content for upload'); const params = { Bucket: bucket, Key: 'key', ACL: 'public-read', StorageClass: 'STANDARD' }; - const url = s3.getSignedUrl('putObject', params); + + const command = new PutObjectCommand(params); + const url = await getSignedUrl(s3, command); provideRawOutput(['-verbose', '-X', 'PUT', url, - '--upload-file', 'uploadFile'], httpCode => { + '--upload-file', tempFile], httpCode => { + fs.unlinkSync(tempFile); assert.strictEqual(httpCode, '200 OK'); - s3.getObjectAcl({ Bucket: bucket, Key: 'key' }, - (err, result) => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); + s3.send(new GetObjectAclCommand({ Bucket: bucket, Key: 'key' })) + .then(result => { assert.deepStrictEqual(result.Grants[1], { Grantee: { Type: 'Group', URI: 'http://acs.amazonaws.com/groups/global/AllUsers', }, Permission: 'READ' }); - done(); }); }); }); @@ -70,18 +71,22 @@ describe('PUT object', () => { it('should put an object with key slash', done => { const params = { Bucket: bucket, Key: '/' }; - s3.putObject(params, err => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); + s3.send(new PutObjectCommand(params)).then(() => { done(); + }).catch(err => { + assert.equal(err, null, 'Expected success, ' + + `got error ${JSON.stringify(err)}`); + done(err); }); }); it('should return KeyTooLong error when key is longer than 915 bytes', done => { const params = { Bucket: bucket, Key: 'a'.repeat(916) }; - s3.putObject(params, err => { + s3.send(new PutObjectCommand(params)).then(() => { + assert(false, 'Expected failure but got success'); + }).catch(err => { assert(err, 'Expected error but did not find one'); - assert.strictEqual(err.code, 'KeyTooLong'); + assert.strictEqual(err.name, 'KeyTooLong'); assert.match(err.message, /915/); done(); }); @@ -91,16 +96,21 @@ describe('PUT object', () => { done => { const metadata = genMaxSizeMetaHeaders(); const params = { Bucket: bucket, Key: '/', Metadata: metadata }; - s3.putObject(params, err => { - assert.strictEqual(err, null, `Unexpected err: ${err}`); + s3.send(new PutObjectCommand(params)).then(() => { // add one more byte to be over the limit metadata.header0 = `${metadata.header0}${'0'}`; - s3.putObject(params, err => { + s3.send(new PutObjectCommand(params)).then(() => { + assert(false, 'Expected failure but got success'); + }).catch(err => { assert(err, 'Expected err but did not find one'); - assert.strictEqual(err.code, 'MetadataTooLarge'); - assert.strictEqual(err.statusCode, 400); + assert.strictEqual(err.name, 'MetadataTooLarge'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); done(); }); + }).catch(err => { + assert.equal(err, null, 'Expected success, ' + + `got error ${JSON.stringify(err)}`); + done(err); }); }); @@ -114,11 +124,14 @@ describe('PUT object', () => { ObjectLockRetainUntilDate: date, ObjectLockMode: 'GOVERNANCE', }; - s3.putObject(params, err => { + s3.send(new PutObjectCommand(params)).then(() => { + assert(false, 'Expected failure but got success'); + }).catch(err => { const expectedErrMessage = 'Bucket is missing ObjectLockConfiguration'; - assert.strictEqual(err.code, 'InvalidRequest'); - assert.strictEqual(err.message, expectedErrMessage); + assert.strictEqual(err.name, 'InvalidRequest'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); + assert(err.toString().includes(expectedErrMessage)); done(); }); }); @@ -127,8 +140,10 @@ describe('PUT object', () => { 'customer-provided encryption keys', done => { const params = { Bucket: bucket, Key: 'key', SSECustomerAlgorithm: 'AES256' }; - s3.putObject(params, err => { - assert.strictEqual(err.code, 'NotImplemented'); + s3.send(new PutObjectCommand(params)).then(() => { + assert(false, 'Expected failure but got success'); + }).catch(err => { + assert.strictEqual(err.name, 'NotImplemented'); done(); }); }); @@ -138,9 +153,11 @@ describe('PUT object', () => { 'with \'http://\', \'https://\' or \'/\'', done => { const params = { Bucket: bucket, Key: 'key', WebsiteRedirectLocation: 'google.com' }; - s3.putObject(params, err => { - assert.strictEqual(err.code, 'InvalidRedirectLocation'); - assert.strictEqual(err.statusCode, 400); + s3.send(new PutObjectCommand(params)).then(() => { + assert(false, 'Expected failure but got success'); + }).catch(err => { + assert.strictEqual(err.name, 'InvalidRedirectLocation'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); done(); }); }); @@ -153,31 +170,36 @@ describe('PUT object', () => { const tagging = `${key}=${value}`; const params = { Bucket: bucket, Key: object, Tagging: tagging }; - s3.putObject(params, err => { + s3.send(new PutObjectCommand(params)).then(() => + s3.send(new GetObjectTaggingCommand({ Bucket: bucket, + Key: object })).then(data => { + assert.deepStrictEqual(data.TagSet[0], { + Key: taggingTest.tag.key, + Value: taggingTest.tag.value }); + done(); + }).catch(err => { + assert.equal(err, null, 'Expected success, ' + + `got error ${JSON.stringify(err)}`); + done(); + })).catch(err => { if (taggingTest.error) { - _checkError(err, taggingTest.error, 400); + checkError(err, taggingTest.error, 400); return done(); } assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); - return s3.getObjectTagging({ Bucket: bucket, - Key: object }, (err, data) => { - assert.equal(err, null, 'Expected success, ' + - `got error ${JSON.stringify(err)}`); - assert.deepStrictEqual(data.TagSet[0], { - Key: taggingTest.tag.key, - Value: taggingTest.tag.value }); - done(); - }); - }); + return done(); }); }); + }); it('should be able to put object with 10 tags', done => { const taggingConfig = generateMultipleTagQuery(10); - s3.putObject({ Bucket: bucket, Key: object, - Tagging: taggingConfig }, err => { + s3.send(new PutObjectCommand({ Bucket: bucket, Key: object, + Tagging: taggingConfig })).then(() => { + done(); + }).catch(err => { assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); done(); @@ -185,9 +207,11 @@ describe('PUT object', () => { }); it('should be able to put an empty Tag set', done => { - s3.putObject({ Bucket: bucket, Key: object, + s3.send(new PutObjectCommand({ Bucket: bucket, Key: object, Tagging: '', - }, err => { + })).then(() => { + done(); + }).catch(err => { assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); done(); @@ -196,8 +220,10 @@ describe('PUT object', () => { it('should be able to put object with empty tags', done => { - s3.putObject({ Bucket: bucket, Key: object, - Tagging: '&&&&&&&&&&&&&&&&&key1=value1' }, err => { + s3.send(new PutObjectCommand({ Bucket: bucket, Key: object, + Tagging: '&&&&&&&&&&&&&&&&&key1=value1' })).then(() => { + done(); + }).catch(err => { assert.equal(err, null, 'Expected success, ' + `got error ${JSON.stringify(err)}`); done(); @@ -206,53 +232,69 @@ describe('PUT object', () => { it('should allow putting 50 tags', done => { const taggingConfig = generateMultipleTagQuery(50); - s3.putObject({ Bucket: bucket, Key: object, - Tagging: taggingConfig }, done); + s3.send(new PutObjectCommand({ Bucket: bucket, Key: object, + Tagging: taggingConfig })).then(() => { + done(); + }).catch(err => { + assert.equal(err, null, 'Expected success, ' + + `got error ${JSON.stringify(err)}`); + done(); + }); }); it('should return BadRequest if putting more that 50 tags', done => { const taggingConfig = generateMultipleTagQuery(51); - s3.putObject({ Bucket: bucket, Key: object, - Tagging: taggingConfig }, err => { - _checkError(err, 'BadRequest', 400); + s3.send(new PutObjectCommand({ Bucket: bucket, Key: object, + Tagging: taggingConfig })).then(() => { + assert(false, 'Expected failure but got success'); + }).catch(err => { + checkError(err, 'BadRequest', 400); done(); }); }); it('should return InvalidArgument if using the same key twice', done => { - s3.putObject({ Bucket: bucket, Key: object, - Tagging: 'key1=value1&key1=value2' }, err => { - _checkError(err, 'InvalidArgument', 400); + s3.send(new PutObjectCommand({ Bucket: bucket, Key: object, + Tagging: 'key1=value1&key1=value2' })).then(() => { + assert(false, 'Expected failure but got success'); + }).catch(err => { + checkError(err, 'InvalidArgument', 400); done(); }); }); it('should return InvalidArgument if using the same key twice ' + 'and empty tags', done => { - s3.putObject({ Bucket: bucket, Key: object, - Tagging: '&&&&&&&&&&&&&&&&&key1=value1&key1=value2' }, - err => { - _checkError(err, 'InvalidArgument', 400); + s3.send(new PutObjectCommand({ Bucket: bucket, Key: object, + Tagging: '&&&&&&&&&&&&&&&&&key1=value1&key1=value2' })).then(() => { + assert(false, 'Expected failure but got success'); + + }).catch(err => { + checkError(err, 'InvalidArgument', 400); done(); }); }); it('should return InvalidArgument if tag with no key', done => { - s3.putObject({ Bucket: bucket, Key: object, + s3.send(new PutObjectCommand({ Bucket: bucket, Key: object, Tagging: '=value1', - }, err => { - _checkError(err, 'InvalidArgument', 400); + })).then(() => { + assert(false, 'Expected failure but got success'); + }).catch(err => { + checkError(err, 'InvalidArgument', 400); done(); }); }); it('should return InvalidArgument putting object with ' + 'bad encoded tags', done => { - s3.putObject({ Bucket: bucket, Key: object, Tagging: - 'key1==value1' }, err => { - _checkError(err, 'InvalidArgument', 400); + s3.send(new PutObjectCommand({ Bucket: bucket, Key: object, Tagging: + 'key1==value1' })).then(() => { + assert(false, 'Expected failure but got success'); + }).catch(err => { + checkError(err, 'InvalidArgument', 400); done(); }); }); @@ -260,9 +302,11 @@ describe('PUT object', () => { it('should return InvalidArgument putting object tag with ' + 'invalid characters: %', done => { const value = 'value1%'; - s3.putObject({ Bucket: bucket, Key: object, Tagging: - `key1=${value}` }, err => { - _checkError(err, 'InvalidArgument', 400); + s3.send(new PutObjectCommand({ Bucket: bucket, Key: object, Tagging: + `key1=${value}` })).then(() => { + assert(false, 'Expected failure but got success'); + }).catch(err => { + checkError(err, 'InvalidArgument', 400); done(); }); }); @@ -278,17 +322,13 @@ describeSkipIfCeph('PUT object with object lock', () => { let bucketUtil; let s3; - beforeEach(() => { + beforeEach(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ + await s3.send(new CreateBucketCommand({ Bucket: bucket, ObjectLockEnabledForBucket: true, - }).promise() - .catch(err => { - process.stdout.write(`Error creating bucket: ${err}\n`); - throw err; - }); + })); }); afterEach(() => { @@ -313,8 +353,7 @@ describeSkipIfCeph('PUT object with object lock', () => { ObjectLockRetainUntilDate: date, ObjectLockMode: 'COMPLIANCE', }; - s3.putObject(params, (err, res) => { - assert.ifError(err); + s3.send(new PutObjectCommand(params)).then(res => { changeObjectLock( [{ bucket, key: 'key1', versionId: res.VersionId }], '', done); }); @@ -329,8 +368,7 @@ describeSkipIfCeph('PUT object with object lock', () => { ObjectLockRetainUntilDate: date, ObjectLockMode: 'GOVERNANCE', }; - s3.putObject(params, (err, res) => { - assert.ifError(err); + s3.send(new PutObjectCommand(params)).then(res => { changeObjectLock( [{ bucket, key: 'key2', versionId: res.VersionId }], '', done); }); @@ -344,9 +382,11 @@ describeSkipIfCeph('PUT object with object lock', () => { ObjectLockMode: 'Governance', ObjectLockRetainUntilDate: date, }; - s3.putObject(params, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.message, 'Unknown wormMode directive'); + s3.send(new PutObjectCommand(params)).then(() => { + assert(false, 'Expected failure but got success'); + }).catch(err => { + assert.strictEqual(err.name, 'InvalidArgument'); + assert(err.toString().includes('Unknown wormMode directive')); done(); }); }); @@ -357,8 +397,7 @@ describeSkipIfCeph('PUT object with object lock', () => { Key: 'key4', ObjectLockLegalHoldStatus: 'ON', }; - s3.putObject(params, (err, res) => { - assert.ifError(err); + s3.send(new PutObjectCommand(params)).then(res => { changeObjectLock( [{ bucket, key: 'key4', versionId: res.VersionId }], '', done); }); @@ -370,8 +409,7 @@ describeSkipIfCeph('PUT object with object lock', () => { Key: 'key5', ObjectLockLegalHoldStatus: 'OFF', }; - s3.putObject(params, err => { - assert.ifError(err); + s3.send(new PutObjectCommand(params)).then(() => { done(); }); }); @@ -382,10 +420,11 @@ describeSkipIfCeph('PUT object with object lock', () => { Key: 'key6', ObjectLockLegalHoldStatus: 'on', }; - s3.putObject(params, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.message, - 'Legal hold status must be one of "ON", "OFF"'); + s3.send(new PutObjectCommand(params)).then(() => { + assert(false, 'Expected failure but got success'); + }).catch(err => { + assert.strictEqual(err.name, 'InvalidArgument'); + assert(err.toString().includes('Legal hold status must be one of "ON", "OFF"')); done(); }); }); @@ -398,12 +437,14 @@ describeSkipIfCeph('PUT object with object lock', () => { Key: 'key7', ObjectLockRetainUntilDate: date, }; - s3.putObject(params, err => { + s3.send(new PutObjectCommand(params)).then(() => { + assert(false, 'Expected failure but got success'); + }).catch(err => { const expectedErrMessage = 'x-amz-object-lock-retain-until-date and ' + 'x-amz-object-lock-mode must both be supplied'; - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.message, expectedErrMessage); + assert.strictEqual(err.name, 'InvalidArgument'); + assert(err.toString().includes(expectedErrMessage)); done(); }); }); @@ -415,12 +456,14 @@ describeSkipIfCeph('PUT object with object lock', () => { Key: 'key8', ObjectLockMode: 'GOVERNANCE', }; - s3.putObject(params, err => { + s3.send(new PutObjectCommand(params)).then(() => { + assert(false, 'Expected failure but got success'); + }).catch(err => { const expectedErrMessage = 'x-amz-object-lock-retain-until-date and ' + 'x-amz-object-lock-mode must both be supplied'; - assert.strictEqual(err.code, 'InvalidArgument'); - assert.strictEqual(err.message, expectedErrMessage); + assert.strictEqual(err.name, 'InvalidArgument'); + assert(err.toString().includes(expectedErrMessage)); done(); }); }); @@ -432,9 +475,11 @@ describeSkipIfCeph('PUT object with object lock', () => { Key: 'key8', StorageClass: 'COLD', }; - s3.putObject(params, err => { - assert.strictEqual(err.code, 'InvalidStorageClass'); - assert.strictEqual(err.statusCode, 400); + s3.send(new PutObjectCommand(params)).then(() => { + assert(false, 'Expected failure but got success'); + }).catch(err => { + assert.strictEqual(err.name, 'InvalidStorageClass'); + assert.strictEqual(err.$metadata.httpStatusCode, 400); done(); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/putObjAcl.js b/tests/functional/aws-node-sdk/test/object/putObjAcl.js index 0899409130..aa8ce0d9a7 100644 --- a/tests/functional/aws-node-sdk/test/object/putObjAcl.js +++ b/tests/functional/aws-node-sdk/test/object/putObjAcl.js @@ -1,4 +1,8 @@ const assert = require('assert'); +const { + PutObjectCommand, + PutObjectAclCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -45,23 +49,18 @@ describe('PUT Object ACL', () => { const s3 = bucketUtil.s3; const Key = 'aclTest'; - before(done => { - bucketUtil.createRandom(1) - .then(created => { - bucketName = created; - done(); - }) - .catch(done); + before(async () => { + bucketName = await bucketUtil.createRandom(1); }); - afterEach(() => { + afterEach(async () => { process.stdout.write('emptying bucket'); - return bucketUtil.empty(bucketName); + await bucketUtil.empty(bucketName); }); - after(() => { + after(async () => { process.stdout.write('deleting bucket'); - return bucketUtil.deleteOne(bucketName); + await bucketUtil.deleteOne(bucketName); }); it('should put object ACLs', async () => { @@ -71,38 +70,49 @@ describe('PUT Object ACL', () => { { Bucket, Key }, ]; for (const param of objects) { - await s3.putObject(param).promise(); + await s3.send(new PutObjectCommand(param)); } - const data = await s3.putObjectAcl({ Bucket, Key, ACL: 'public-read' }).promise(); + const data = await s3.send(new PutObjectAclCommand({ + Bucket, + Key, + ACL: 'public-read' + })); assert(data); }); it('should return NoSuchKey if try to put object ACLs ' + - 'for nonexistent object', done => { + 'for nonexistent object', async () => { const s3 = bucketUtil.s3; const Bucket = bucketName; - s3.putObjectAcl({ - Bucket, - Key, - ACL: 'public-read' }, err => { + try { + await s3.send(new PutObjectAclCommand({ + Bucket, + Key, + ACL: 'public-read' + })); + throw new Error('Expected NoSuchKey error'); + } catch (err) { assert(err); - assert.strictEqual(err.statusCode, 404); - assert.strictEqual(err.code, 'NoSuchKey'); - done(); - }); + assert.strictEqual(err.$metadata.httpStatusCode, 404); + assert.strictEqual(err.name, 'NoSuchKey'); + } }); describe('on an object', () => { - before(done => s3.putObject({ Bucket: bucketName, Key }, done)); - after(() => { + before(async () => { + await s3.send(new PutObjectCommand({ Bucket: bucketName, Key })); + }); + + after(async () => { process.stdout.write('deleting bucket'); - return bucketUtil.empty(bucketName); + await bucketUtil.empty(bucketName); }); + // The supplied canonical ID is not associated with a real AWS // account, so AWS_ON_AIR will raise a 400 InvalidArgument itSkipIfAWS('should return AccessDenied if try to change owner ' + - 'ID in ACL request body', done => { + 'ID in ACL request body', async () => { const acp = new _AccessControlPolicy( { ownerID: notOwnerCanonicalID }); acp.addGrantee('Group', constants.publicId, 'READ'); @@ -111,12 +121,15 @@ describe('PUT Object ACL', () => { Key, AccessControlPolicy: acp, }; - s3.putObjectAcl(putAclParams, err => { + + try { + await s3.send(new PutObjectAclCommand(putAclParams)); + throw new Error('Expected AccessDenied error'); + } catch (err) { assert(err); - assert.strictEqual(err.statusCode, 403); - assert.strictEqual(err.code, 'AccessDenied'); - done(); - }); + assert.strictEqual(err.$metadata.httpStatusCode, 403); + assert.strictEqual(err.name, 'AccessDenied'); + } }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/putObjTagging.js b/tests/functional/aws-node-sdk/test/object/putObjTagging.js index bc6a4f93e9..a86e089ca4 100644 --- a/tests/functional/aws-node-sdk/test/object/putObjTagging.js +++ b/tests/functional/aws-node-sdk/test/object/putObjTagging.js @@ -1,13 +1,20 @@ const assert = require('assert'); -const async = require('async'); +const { + CreateBucketCommand, + PutObjectCommand, + PutObjectTaggingCommand, + GetObjectTaggingCommand, + PutBucketAclCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); +const checkError = require('../../lib/utility/checkError'); const { taggingTests } = require('../../lib/utility/tagging'); -const bucketName = 'testtaggingbucket'; -const objectName = 'testtaggingobject'; -const objectNameAcl = 'testtaggingobjectacl'; +const bucketName = 'testputtaggingbucket'; +const objectName = 'testputtaggingobject'; +const objectNameAcl = 'testputtaggingobjectacl'; const taggingConfig = { TagSet: [ { @@ -35,12 +42,6 @@ function generateTaggingConfig(key, value) { }; } -function _checkError(err, code, statusCode) { - assert(err, 'Expected error but found none'); - assert.strictEqual(err.code, code); - assert.strictEqual(err.statusCode, statusCode); -} - describe('PUT object taggings', () => { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); @@ -48,166 +49,224 @@ describe('PUT object taggings', () => { const otherAccountBucketUtility = new BucketUtility('lisa', {}); const otherAccountS3 = otherAccountBucketUtility.s3; - beforeEach(done => s3.createBucket({ Bucket: bucketName }, err => { - if (err) { - return done(err); - } - return s3.putObject({ Bucket: bucketName, Key: objectName }, done); - })); + beforeEach(async () => { + await s3.send(new CreateBucketCommand({ Bucket: bucketName })); + await s3.send(new PutObjectCommand({ Bucket: bucketName, Key: objectName })); + }); afterEach(async () => { - process.stdout.write('Emptying bucket'); - return bucketUtil.empty(bucketName) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(bucketName); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + await bucketUtil.empty(bucketName); + await bucketUtil.deleteOne(bucketName); }); taggingTests.forEach(taggingTest => { - it(taggingTest.it, done => { - const taggingConfig = generateTaggingConfig(taggingTest.tag.key, - taggingTest.tag.value); - s3.putObjectTagging({ Bucket: bucketName, Key: objectName, - Tagging: taggingConfig }, (err, data) => { - if (taggingTest.error) { - _checkError(err, taggingTest.error, 400); - } else { - assert.ifError(err, `Found unexpected err ${err}`); - assert.strictEqual(Object.keys(data).length, 0); + it(taggingTest.it, async () => { + const taggingConfig = generateTaggingConfig( + taggingTest.tag.key, + taggingTest.tag.value + ); + + if (taggingTest.error) { + try { + await s3.send(new PutObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + Tagging: taggingConfig + })); + assert.fail('Expected an error but request succeeded'); + } catch (err) { + checkError(err, taggingTest.error, 400); } - done(); - }); + } else { + const data = await s3.send(new PutObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + Tagging: taggingConfig + })); + assert.strictEqual(Object.keys(data).length, 1); + } }); }); - it('should allow putting 50 tags', done => { + it('should allow putting 50 tags', async () => { const taggingConfig = generateMultipleTagConfig(50); - s3.putObjectTagging({ Bucket: bucketName, Key: objectName, - Tagging: taggingConfig }, done); + await s3.send(new PutObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + Tagging: taggingConfig + })); }); - it('should return BadRequest if putting more that 50 tags', done => { + it('should return BadRequest if putting more than 50 tags', async () => { const taggingConfig = generateMultipleTagConfig(51); - s3.putObjectTagging({ Bucket: bucketName, Key: objectName, - Tagging: taggingConfig }, err => { - _checkError(err, 'BadRequest', 400); - done(); - }); + try { + await s3.send(new PutObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + Tagging: taggingConfig + })); + assert.fail('Expected BadRequest error'); + } catch (err) { + checkError(err, 'BadRequest', 400); + } }); - it('should return InvalidTag if using the same key twice', done => { - s3.putObjectTagging({ Bucket: bucketName, Key: objectName, - Tagging: { TagSet: [ - { - Key: 'key1', - Value: 'value1', - }, - { - Key: 'key1', - Value: 'value2', - }, - ] }, - }, err => { - _checkError(err, 'InvalidTag', 400); - done(); - }); + + it('should put tag set', async () => { + await s3.send(new PutObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + Tagging: taggingConfig, + })); + + const data = await s3.send(new GetObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + })); + + assert.deepStrictEqual(data.TagSet, taggingConfig.TagSet); }); - it('should return InvalidTag if key is an empty string', done => { - s3.putObjectTagging({ Bucket: bucketName, Key: objectName, - Tagging: { TagSet: [ - { - Key: '', - Value: 'value1', - }, - ] }, - }, err => { - _checkError(err, 'InvalidTag', 400); - done(); - }); + it('should return InvalidTag if using the same key twice', async () => { + try { + await s3.send(new PutObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + Tagging: { TagSet: [ + { + Key: 'key1', + Value: 'value1', + }, + { + Key: 'key1', + Value: 'value2', + }, + ] }, + })); + throw new Error('Expected InvalidRequest error'); + } catch (err) { + checkError(err, 'InvalidTag', 400); + } }); - it('should be able to put an empty Tag set', done => { - s3.putObjectTagging({ Bucket: bucketName, Key: objectName, - Tagging: { TagSet: [] }, - }, (err, data) => { - assert.ifError(err, `Found unexpected err ${err}`); - assert.strictEqual(Object.keys(data).length, 0); - done(); - }); + it('should return InvalidTag if key is an empty string', async () => { + try { + await s3.send(new PutObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + Tagging: { + TagSet: [ + { + Key: '', + Value: 'value1', + }, + ] + } + })); + assert.fail('Expected InvalidTag error'); + } catch (err) { + checkError(err, 'InvalidTag', 400); + } }); - it('should return NoSuchKey put tag to a non-existing object', done => { - s3.putObjectTagging({ + it('should be able to put an empty Tag set', async () => { + const data = await s3.send(new PutObjectTaggingCommand({ Bucket: bucketName, - Key: 'nonexisting', - Tagging: taggingConfig, - }, err => { - _checkError(err, 'NoSuchKey', 404); - done(); - }); + Key: objectName, + Tagging: { TagSet: [] } + })); + assert.strictEqual(data.$metadata.httpStatusCode, 200); + }); + + it('should return NoSuchKey put tag to a non-existing object', + async () => { + try { + await s3.send(new PutObjectTaggingCommand({ + Bucket: bucketName, + Key: 'nonexisting', + Tagging: taggingConfig, + })); + throw new Error('Expected NoSuchKey error'); + } catch (err) { + checkError(err, 'NoSuchKey', 404); + } }); it('should return 403 AccessDenied putting tag with another account', - done => { - otherAccountS3.putObjectTagging({ Bucket: bucketName, Key: - objectName, Tagging: taggingConfig, - }, err => { - _checkError(err, 'AccessDenied', 403); - done(); - }); + async () => { + try { + await otherAccountS3.send(new PutObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + Tagging: taggingConfig, + })); + throw new Error('Expected AccessDenied error'); + } catch (err) { + checkError(err, 'AccessDenied', 403); + } }); it('should return 403 AccessDenied putting tag with a different ' + 'account to an object with ACL "public-read-write"', - done => { - s3.putObjectAcl({ Bucket: bucketName, Key: objectName, - ACL: 'public-read-write' }, err => { - if (err) { - return done(err); - } - return otherAccountS3.putObjectTagging({ Bucket: bucketName, - Key: objectName, Tagging: taggingConfig, - }, err => { - _checkError(err, 'AccessDenied', 403); - done(); - }); - }); + async () => { + await s3.send(new PutObjectCommand({ + Bucket: bucketName, + Key: objectName, + ACL: 'public-read-write', + })); + + try { + await otherAccountS3.send(new PutObjectTaggingCommand({ + Bucket: bucketName, + Key: objectName, + Tagging: taggingConfig, + })); + throw new Error('Expected AccessDenied error'); + } catch (err) { + checkError(err, 'AccessDenied', 403); + } }); it('should return 403 AccessDenied putting tag to an object ' + - 'in a bucket created with a different account', - done => { - async.waterfall([ - next => s3.putBucketAcl({ Bucket: bucketName, ACL: - 'public-read-write' }, err => next(err)), - next => otherAccountS3.putObject({ Bucket: bucketName, Key: - objectNameAcl }, err => next(err)), - next => otherAccountS3.putObjectTagging({ Bucket: bucketName, - Key: objectNameAcl, Tagging: taggingConfig, - }, err => next(err)), - ], err => { - _checkError(err, 'AccessDenied', 403); - done(); - }); + ' in a bucket created with a different account', + async () => { + await s3.send(new PutBucketAclCommand({ + Bucket: bucketName, + ACL: 'public-read-write', + })); + await otherAccountS3.send(new PutObjectCommand({ + Bucket: bucketName, + Key: objectNameAcl, + })); + + try { + await otherAccountS3.send(new PutObjectTaggingCommand({ + Bucket: bucketName, + Key: objectNameAcl, + Tagging: taggingConfig, + })); + throw new Error('Expected AccessDenied error'); + } catch (err) { + checkError(err, 'AccessDenied', 403); + } }); it('should put tag to an object in a bucket created with same ' + - 'account', done => { - async.waterfall([ - next => s3.putBucketAcl({ Bucket: bucketName, ACL: - 'public-read-write' }, err => next(err)), - next => otherAccountS3.putObject({ Bucket: bucketName, Key: - objectNameAcl }, err => next(err)), - next => s3.putObjectTagging({ Bucket: bucketName, - Key: objectNameAcl, Tagging: taggingConfig, - }, err => next(err)), - ], done); + 'account', async () => { + await s3.send(new PutBucketAclCommand({ + Bucket: bucketName, + ACL: 'public-read-write', + })); + await otherAccountS3.send(new PutObjectCommand({ + Bucket: bucketName, + Key: objectNameAcl, + })); + + await s3.send(new PutObjectTaggingCommand({ + Bucket: bucketName, + Key: objectNameAcl, + Tagging: taggingConfig, + })); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/putObjectLegalHold.js b/tests/functional/aws-node-sdk/test/object/putObjectLegalHold.js index 6ab82f4619..af43270d83 100644 --- a/tests/functional/aws-node-sdk/test/object/putObjectLegalHold.js +++ b/tests/functional/aws-node-sdk/test/object/putObjectLegalHold.js @@ -1,12 +1,17 @@ const assert = require('assert'); -const AWS = require('aws-sdk'); +const { + CreateBucketCommand, + PutObjectCommand, + DeleteObjectCommand, + PutObjectLegalHoldCommand, + PutBucketPolicyCommand, +} = require('@aws-sdk/client-s3'); const { errorInstances } = require('arsenal'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); const checkError = require('../../lib/utility/checkError'); const changeObjectLock = require('../../../../utilities/objectLock-util'); -const { VALIDATE_CREDENTIALS, SIGN } = AWS.EventListeners.Core; const bucket = 'mock-bucket-lock'; const unlockedBucket = 'mock-bucket-no-lock'; @@ -54,39 +59,36 @@ describeSkipIfCeph('PUT object legal hold', () => { const otherAccountS3 = otherAccountBucketUtility.s3; let versionId; - beforeEach(() => { - process.stdout.write('Putting buckets and objects\n'); - return s3.createBucket({ + beforeEach(async () => { + await s3.send(new CreateBucketCommand({ Bucket: bucket, ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.createBucket({ Bucket: unlockedBucket }).promise()) - .then(() => s3.putObject({ Bucket: unlockedBucket, Key: key }).promise()) - .then(() => s3.putObject({ Bucket: bucket, Key: key }).promise()) - .then(res => { - versionId = res.VersionId; - }) - .catch(err => { - process.stdout.write('Error in beforeEach\n'); - throw err; - }); + })); + await s3.send(new CreateBucketCommand({ Bucket: unlockedBucket })); + await s3.send(new PutObjectCommand({ Bucket: unlockedBucket, Key: key })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: key })); + const res = await s3.send(new PutObjectCommand({ Bucket: bucket, Key: key })); + versionId = res.VersionId; }); afterEach(() => { process.stdout.write('Emptying and deleting buckets\n'); - return bucketUtil.empty(bucket) - .then(() => bucketUtil.empty(unlockedBucket)) - .then(() => bucketUtil.deleteMany([bucket, unlockedBucket])) - .catch(err => { - process.stdout.write('Error in afterEach\n'); - throw err; - }); + return new Promise(resolve => { + changeObjectLock([{ bucket, key, versionId }], '', () => { + resolve(); + }); + }) + .then(() => bucketUtil.empty(bucket, true)) + .then(() => bucketUtil.empty(unlockedBucket, true)) + .then(() => bucketUtil.deleteMany([bucket, unlockedBucket])); }); it('should return AccessDenied putting legal hold with another account', done => { const params = createLegalHoldParams(bucket, key, 'ON'); - otherAccountS3.putObjectLegalHold(params, err => { + otherAccountS3.send(new PutObjectLegalHoldCommand(params)).then(() => { + throw new Error('Expected AccessDenied error'); + }).catch(err => { checkError(err, 'AccessDenied', 403); done(); }); @@ -94,19 +96,23 @@ describeSkipIfCeph('PUT object legal hold', () => { it('should return NoSuchKey error if key does not exist', done => { const params = createLegalHoldParams(bucket, 'keynotexist', 'ON'); - s3.putObjectLegalHold(params, err => { + s3.send(new PutObjectLegalHoldCommand(params)).then(() => { + throw new Error('Expected NoSuchKey error'); + }).catch(err => { checkError(err, 'NoSuchKey', 404); done(); }); }); it('should return NoSuchVersion error if version does not exist', done => { - s3.putObjectLegalHold({ + s3.send(new PutObjectLegalHoldCommand({ Bucket: bucket, Key: key, VersionId: '012345678901234567890123456789012', LegalHold: mockLegalHold.on, - }, err => { + })).then(() => { + throw new Error('Expected NoSuchVersion error'); + }).catch(err => { checkError(err, 'NoSuchVersion', 404); done(); }); @@ -115,7 +121,9 @@ describeSkipIfCeph('PUT object legal hold', () => { it('should return InvalidRequest error putting legal hold to object ' + 'in bucket with no object lock enabled', done => { const params = createLegalHoldParams(unlockedBucket, key, 'ON'); - s3.putObjectLegalHold(params, err => { + s3.send(new PutObjectLegalHoldCommand(params)).then(() => { + throw new Error('Expected InvalidRequest error'); + }).catch(err => { checkError(err, 'InvalidRequest', 400); done(); }); @@ -123,46 +131,53 @@ describeSkipIfCeph('PUT object legal hold', () => { it('should return MethodNotAllowed if object version is delete marker', done => { - s3.deleteObject({ Bucket: bucket, Key: key }, err => { - assert.ifError(err); + s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: key })) + .then(() => { const params = createLegalHoldParams(bucket, key, 'ON'); - s3.putObjectLegalHold(params, err => { - checkError(err, 'MethodNotAllowed', 405); - done(); - }); + return s3.send(new PutObjectLegalHoldCommand(params)); + }) + .then(() => { + throw new Error('Expected MethodNotAllowed error'); + }) + .catch(err => { + checkError(err, 'MethodNotAllowed', 405); + done(); }); }); it('should put object legal hold ON', done => { const params = createLegalHoldParams(bucket, key, 'ON'); - s3.putObjectLegalHold(params, err => { - assert.ifError(err); + s3.send(new PutObjectLegalHoldCommand(params)).then(() => { changeObjectLock([{ bucket, key, versionId }], '', done); }); }); + it('should put object legal hold OFF', done => { const params = createLegalHoldParams(bucket, key, 'OFF'); - s3.putObjectLegalHold(params, err => { - assert.ifError(err); - changeObjectLock([{ bucket, key, versionId }], '', done); + s3.send(new PutObjectLegalHoldCommand(params)).then(() => { + changeObjectLock([{ bucket, key, versionId }], '', done); }); }); - it('should error if request has empty or undefined Status', done => { + it('should return error if request has empty or undefined Status', done => { const params = createLegalHoldParams(bucket, key, ''); - s3.putObjectLegalHold(params, err => { + s3.send(new PutObjectLegalHoldCommand(params)).then(() => { + throw new Error('Expected MalformedXML error'); + }).catch(err => { checkError(err, 'MalformedXML', 400); changeObjectLock([{ bucket, key, versionId }], '', done); }); }); it('should return error if request does not contain Status', done => { - s3.putObjectLegalHold({ + s3.send(new PutObjectLegalHoldCommand({ Bucket: bucket, Key: key, LegalHold: {}, - }, err => { + })).then(() => { + throw new Error('Expected MalformedXML error'); + }).catch(err => { checkError(err, 'MalformedXML', 400); changeObjectLock([{ bucket, key, versionId }], '', done); }); @@ -170,15 +185,19 @@ describeSkipIfCeph('PUT object legal hold', () => { it('expects params.LegalHold.Status to be a string', done => { const params = createLegalHoldParams(bucket, key, true); - s3.putObjectLegalHold(params, err => { - checkError(err, 'InvalidParameterType'); + s3.send(new PutObjectLegalHoldCommand(params)).then(() => { + throw new Error('Expected InvalidParameterType error'); + }).catch(err => { + checkError(err, 'MalformedXML', 400); changeObjectLock([{ bucket, key, versionId }], '', done); }); }); it('expects Status request xml must be one of "ON", "OFF"', done => { const params = createLegalHoldParams(bucket, key, 'on'); - s3.putObjectLegalHold(params, err => { + s3.send(new PutObjectLegalHoldCommand(params)).then(() => { + throw new Error('Expected MalformedXML error'); + }).catch(err => { checkError(err, 'MalformedXML', 400); changeObjectLock([{ bucket, key, versionId }], '', done); }); @@ -186,8 +205,7 @@ describeSkipIfCeph('PUT object legal hold', () => { it('should support request with versionId parameter', done => { const params = createLegalHoldParams(bucket, key, 'ON', versionId); - s3.putObjectLegalHold(params, err => { - assert.ifError(err); + s3.send(new PutObjectLegalHoldCommand(params)).then(() => { changeObjectLock([{ bucket, key, versionId }], '', done); }); }); @@ -208,13 +226,17 @@ describeSkipIfCeph('PUT object legal hold iam action and version id', () => { function awsRequest(auth, operation, params, callback) { if (auth) { - bucketUtil.s3[operation](params, callback); + const CommandClass = eval(operation); + s3.send(new CommandClass(params)) + .then(data => callback(null, data)) + .catch(err => callback(err)); } else { - const unauthBucketUtil = new BucketUtility('default', sigCfg); - const request = unauthBucketUtil.s3[operation](params); - request.removeListener('validate', VALIDATE_CREDENTIALS); - request.removeListener('sign', SIGN); - request.send(callback); + const unauthBucketUtil = new BucketUtility('default', sigCfg, true); + const unauthS3 = unauthBucketUtil.s3; + const CommandClass = eval(operation); + unauthS3.send(new CommandClass(params)) + .then(data => callback(null, data)) + .catch(err => callback(err)); } } @@ -227,18 +249,18 @@ describeSkipIfCeph('PUT object legal hold iam action and version id', () => { function cbWithError(done) { return err => { - assert.strictEqual(err.statusCode, errorInstances.AccessDenied.code); + assert.strictEqual(err.$metadata.httpStatusCode, errorInstances.AccessDenied.code); done(); }; } beforeEach(() => { process.stdout.write('Setting up bucket policy legal hold tests\n'); - return s3.createBucket({ + return s3.send(new CreateBucketCommand({ Bucket: testBucket, ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.putObject({ Bucket: testBucket, Key: key }).promise()) + })) + .then(() => s3.send(new PutObjectCommand({ Bucket: testBucket, Key: key }))) .then(res => { versionId = res.VersionId; }) @@ -248,14 +270,9 @@ describeSkipIfCeph('PUT object legal hold iam action and version id', () => { }); }); - afterEach(() => { - process.stdout.write('Cleaning up bucket policy legal hold tests\n'); - return bucketUtil.empty(testBucket) - .then(() => bucketUtil.deleteMany([testBucket])) - .catch(err => { - process.stdout.write('Error in afterEach\n'); - throw err; - }); + afterEach(async () => { + await bucketUtil.empty(testBucket, true); + await bucketUtil.deleteMany([testBucket]); }); const policyTestCases = [ @@ -285,10 +302,12 @@ describeSkipIfCeph('PUT object legal hold iam action and version id', () => { Version: '2012-10-17', Statement: [statement], }; - s3.putBucketPolicy({ + s3.send(new PutBucketPolicyCommand({ Bucket: testBucket, Policy: JSON.stringify(bucketPolicy), - }, err => { + })).then(() => { + done(); + }).catch(err => { assert.ifError(err); done(); }); @@ -296,17 +315,17 @@ describeSkipIfCeph('PUT object legal hold iam action and version id', () => { if (testCase.expectedResult === 'allow') { afterEach(() => - s3.putObjectLegalHold({ + s3.send(new PutObjectLegalHoldCommand({ Bucket: testBucket, Key: key, LegalHold: { Status: 'OFF' }, - }).promise() - .then(() => s3.putObjectLegalHold({ + })) + .then(() => s3.send(new PutObjectLegalHoldCommand({ Bucket: testBucket, Key: key, VersionId: versionId, LegalHold: { Status: 'OFF' }, - }).promise()) + }))) ); } @@ -316,7 +335,7 @@ describeSkipIfCeph('PUT object legal hold iam action and version id', () => { Key: key, LegalHold: legalHoldConfig, }; - awsRequest(false, 'putObjectLegalHold', params, testCase.callback(done)); + awsRequest(false, 'PutObjectLegalHoldCommand', params, testCase.callback(done)); }); it(`should ${testCase.expectedResult} unauthenticated putObjectLegalHold with VersionId`, done => { @@ -326,7 +345,7 @@ describeSkipIfCeph('PUT object legal hold iam action and version id', () => { LegalHold: legalHoldConfig, VersionId: versionId, }; - awsRequest(false, 'putObjectLegalHold', params, testCase.callback(done)); + awsRequest(false, 'PutObjectLegalHoldCommand', params, testCase.callback(done)); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/putPart.js b/tests/functional/aws-node-sdk/test/object/putPart.js index c847e9e194..ec66956a03 100644 --- a/tests/functional/aws-node-sdk/test/object/putPart.js +++ b/tests/functional/aws-node-sdk/test/object/putPart.js @@ -1,4 +1,10 @@ const assert = require('assert'); +const { + CreateBucketCommand, + CreateMultipartUploadCommand, + AbortMultipartUploadCommand, + UploadPartCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -12,55 +18,59 @@ describe('PUT object', () => { let s3; let uploadId; - beforeEach(() => { + beforeEach(async () => { bucketUtil = new BucketUtility('default', sigCfg); s3 = bucketUtil.s3; - return s3.createBucket({ Bucket: bucket }).promise() - .then(() => s3.createMultipartUpload({ - Bucket: bucket, Key: key }).promise()) - .then(res => { - uploadId = res.UploadId; - return uploadId; - }) - .catch(err => { - process.stdout.write(`Error in beforeEach: ${err}\n`); - throw err; - }); + + await s3.send(new CreateBucketCommand({ Bucket: bucket })); + const res = await s3.send(new CreateMultipartUploadCommand({ + Bucket: bucket, + Key: key + })); + uploadId = res.UploadId; }); - afterEach(() => { - process.stdout.write('Emptying bucket'); - return s3.abortMultipartUpload({ - Bucket: bucket, Key: key, UploadId: uploadId, - }).promise() - .then(() => bucketUtil.empty(bucket)) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteOne(bucket); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + afterEach(async () => { + await s3.send(new AbortMultipartUploadCommand({ + Bucket: bucket, + Key: key, + UploadId: uploadId, + })); + await bucketUtil.empty(bucket); + await bucketUtil.deleteOne(bucket); }); it('should return Not Implemented error for obj. encryption using ' + - 'customer-provided encryption keys', done => { - const params = { Bucket: bucket, Key: 'key', PartNumber: 0, - UploadId: uploadId, SSECustomerAlgorithm: 'AES256' }; - s3.uploadPart(params, err => { - assert.strictEqual(err.code, 'NotImplemented'); - done(); - }); + 'customer-provided encryption keys', async () => { + const params = { + Bucket: bucket, + Key: 'key', + PartNumber: 0, + UploadId: uploadId, + SSECustomerAlgorithm: 'AES256' + }; + try { + await s3.send(new UploadPartCommand(params)); + throw new Error('Expected NotImplemented error'); + } catch (err) { + assert.strictEqual(err.name, 'NotImplemented'); + } }); - it('should return InvalidArgument if negative PartNumber', done => { - const params = { Bucket: bucket, Key: 'key', PartNumber: -1, - UploadId: uploadId }; - s3.uploadPart(params, err => { - assert.strictEqual(err.code, 'InvalidArgument'); - done(); - }); + it('should return InvalidArgument if negative PartNumber', async () => { + const params = { + Bucket: bucket, + Key: 'key', + PartNumber: -1, + UploadId: uploadId + }; + + try { + await s3.send(new UploadPartCommand(params)); + assert.fail('Expected InvalidArgument error'); + } catch (err) { + assert.strictEqual(err.name, 'InvalidArgument'); + } }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/putRetention.js b/tests/functional/aws-node-sdk/test/object/putRetention.js index 2f99138d00..2f0d5637ff 100644 --- a/tests/functional/aws-node-sdk/test/object/putRetention.js +++ b/tests/functional/aws-node-sdk/test/object/putRetention.js @@ -1,13 +1,19 @@ const assert = require('assert'); const moment = require('moment'); -const AWS = require('aws-sdk'); +const { promisify } = require('util'); +const { + CreateBucketCommand, + PutObjectCommand, + DeleteObjectCommand, + PutObjectRetentionCommand, + PutBucketPolicyCommand +} = require('@aws-sdk/client-s3'); const { errorInstances } = require('arsenal'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); const checkError = require('../../lib/utility/checkError'); const changeObjectLock = require('../../../../utilities/objectLock-util'); -const { VALIDATE_CREDENTIALS, SIGN } = AWS.EventListeners.Core; const bucketName = 'lockenabledputbucket'; const unlockedBucket = 'locknotenabledputbucket'; @@ -15,12 +21,14 @@ const objectName = 'putobjectretentionobject'; const retentionConfig = { Mode: 'GOVERNANCE', - RetainUntilDate: moment().add(1, 'd').add(123, 'ms').toISOString(), + RetainUntilDate: moment().add(1, 'd').add(123, 'ms'), }; const isCEPH = process.env.CI_CEPH !== undefined; const describeSkipIfCeph = isCEPH ? describe.skip : describe; +const changeObjectLockPromise = promisify(changeObjectLock); + describeSkipIfCeph('PUT object retention', () => { withV4(sigCfg => { const bucketUtil = new BucketUtility('default', sigCfg); @@ -29,121 +37,108 @@ describeSkipIfCeph('PUT object retention', () => { const otherAccountS3 = otherAccountBucketUtility.s3; let versionId; - beforeEach(() => { - process.stdout.write('Putting buckets and objects\n'); - return s3.createBucket({ + beforeEach(async () => { + await s3.send(new CreateBucketCommand({ Bucket: bucketName, ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.createBucket({ Bucket: unlockedBucket }).promise()) - .then(() => s3.putObject({ Bucket: unlockedBucket, Key: objectName }).promise()) - .then(() => s3.putObject({ Bucket: bucketName, Key: objectName }).promise()) - .then(res => { - versionId = res.VersionId; - }) - .catch(err => { - process.stdout.write('Error in beforeEach\n'); - throw err; - }); + })); + await s3.send(new CreateBucketCommand({ Bucket: unlockedBucket })); + await s3.send(new PutObjectCommand({ Bucket: unlockedBucket, Key: objectName })); + const putRes = await s3.send(new PutObjectCommand({ Bucket: bucketName, Key: objectName })); + versionId = putRes.VersionId; }); - afterEach(() => { - process.stdout.write('Emptying and deleting buckets\n'); - return bucketUtil.empty(bucketName) - .then(() => bucketUtil.empty(unlockedBucket)) - .then(() => bucketUtil.deleteMany([bucketName, unlockedBucket])) - .catch(err => { - process.stdout.write('Error in afterEach\n'); - throw err; - }); + afterEach(async () => { + await bucketUtil.empty(bucketName, true); + await bucketUtil.empty(unlockedBucket, true); + await bucketUtil.deleteMany([bucketName, unlockedBucket]); }); - it('should return AccessDenied putting retention with another account', - done => { - otherAccountS3.putObjectRetention({ - Bucket: bucketName, - Key: objectName, - Retention: retentionConfig, - }, err => { + it('should return AccessDenied putting retention with another account', async () => { + try { + await otherAccountS3.send(new PutObjectRetentionCommand({ + Bucket: bucketName, + Key: objectName, + Retention: retentionConfig, + })); + assert.fail('Expected error'); + } catch (err) { checkError(err, 'AccessDenied', 403); - done(); - }); + } }); - it('should return NoSuchKey error if key does not exist', done => { - s3.putObjectRetention({ - Bucket: bucketName, - Key: 'thiskeydoesnotexist', - Retention: retentionConfig, - }, err => { + it('should return NoSuchKey error if key does not exist', async () => { + try { + await s3.send(new PutObjectRetentionCommand({ + Bucket: bucketName, + Key: 'thiskeydoesnotexist', + Retention: retentionConfig, + })); + assert.fail('Expected error'); + } catch (err) { checkError(err, 'NoSuchKey', 404); - done(); - }); + } }); - it('should return NoSuchVersion error if version does not exist', done => { - s3.putObjectRetention({ - Bucket: bucketName, - Key: objectName, - VersionId: '012345678901234567890123456789012', - Retention: retentionConfig, - }, err => { + it('should return NoSuchVersion error if version does not exist', async () => { + try { + await s3.send(new PutObjectRetentionCommand({ + Bucket: bucketName, + Key: objectName, + VersionId: '012345678901234567890123456789012', + Retention: retentionConfig, + })); + assert.fail('Expected error'); + } catch (err) { checkError(err, 'NoSuchVersion', 404); - done(); - }); + } }); - it('should return InvalidRequest error putting retention to object ' + - 'in bucket with no object lock enabled', done => { - s3.putObjectRetention({ - Bucket: unlockedBucket, - Key: objectName, - Retention: retentionConfig, - }, err => { + it('should return InvalidRequest error putting retention to object in bucket with no object lock ' + + 'enabled', async () => { + try { + await s3.send(new PutObjectRetentionCommand({ + Bucket: unlockedBucket, + Key: objectName, + Retention: retentionConfig, + })); + assert.fail('Expected error'); + } catch (err) { checkError(err, 'InvalidRequest', 400); - done(); - }); + } }); - it('should return MethodNotAllowed if object version is delete marker', - done => { - s3.deleteObject({ Bucket: bucketName, Key: objectName }, err => { - assert.ifError(err); - s3.putObjectRetention({ + it('should return MethodNotAllowed if object version is delete marker', async () => { + await s3.send(new DeleteObjectCommand({ Bucket: bucketName, Key: objectName })); + try { + await s3.send(new PutObjectRetentionCommand({ Bucket: bucketName, Key: objectName, Retention: retentionConfig, - }, err => { - checkError(err, 'MethodNotAllowed', 405); - done(); - }); - }); + })); + assert.fail('Expected error'); + } catch (err) { + checkError(err, 'MethodNotAllowed', 405); + } }); - it('should put object retention', done => { - s3.putObjectRetention({ + it('should put object retention', async () => { + await s3.send(new PutObjectRetentionCommand({ Bucket: bucketName, Key: objectName, Retention: retentionConfig, - }, err => { - assert.ifError(err); - changeObjectLock([ - { bucket: bucketName, key: objectName, versionId }], '', done); - }); + })); + await changeObjectLockPromise([{ bucket: bucketName, key: objectName, versionId }], ''); }); - it('should support request with versionId parameter', done => { - s3.putObjectRetention({ + it('should support request with versionId parameter', async () => { + await s3.send(new PutObjectRetentionCommand({ Bucket: bucketName, Key: objectName, Retention: retentionConfig, VersionId: versionId, - }, err => { - assert.ifError(err); - changeObjectLock([ - { bucket: bucketName, key: objectName, versionId }, - ], '', done); - }); + })); + await changeObjectLockPromise([{ bucket: bucketName, key: objectName, versionId }], ''); }); }); }); @@ -160,16 +155,21 @@ describeSkipIfCeph('PUT object retention iam action and version id', () => { function awsRequest(auth, operation, params, callback) { if (auth) { - bucketUtil.s3[operation](params, callback); + const CommandClass = eval(operation); + s3.send(new CommandClass(params)) + .then(data => callback(null, data)) + .catch(err => callback(err)); } else { - const unauthBucketUtil = new BucketUtility('default', sigCfg); - const request = unauthBucketUtil.s3[operation](params); - request.removeListener('validate', VALIDATE_CREDENTIALS); - request.removeListener('sign', SIGN); - request.send(callback); + const unauthBucketUtil = new BucketUtility('default', sigCfg, true); + const unauthS3 = unauthBucketUtil.s3; + const CommandClass = eval(operation); + unauthS3.send(new CommandClass(params)) + .then(data => callback(null, data)) + .catch(err => callback(err)); } } + function cbNoError(done) { return err => { assert.ifError(err); @@ -179,35 +179,23 @@ describeSkipIfCeph('PUT object retention iam action and version id', () => { function cbWithError(done) { return err => { - assert.strictEqual(err.statusCode, errorInstances.AccessDenied.code); + assert.strictEqual(err.$metadata.httpStatusCode, errorInstances.AccessDenied.code); done(); }; } - beforeEach(() => { - process.stdout.write('Setting up bucket policy retention tests\n'); - return s3.createBucket({ + beforeEach(async () => { + await s3.send(new CreateBucketCommand({ Bucket: testBucket, ObjectLockEnabledForBucket: true, - }).promise() - .then(() => s3.putObject({ Bucket: testBucket, Key: objectName }).promise()) - .then(res => { - versionId = res.VersionId; - }) - .catch(err => { - process.stdout.write('Error in beforeEach\n'); - throw err; - }); + })); + const res = await s3.send(new PutObjectCommand({ Bucket: testBucket, Key: objectName })); + versionId = res.VersionId; }); - afterEach(() => { - process.stdout.write('Cleaning up bucket policy retention tests\n'); - return bucketUtil.empty(testBucket, true) - .then(() => bucketUtil.deleteMany([testBucket])) - .catch(err => { - process.stdout.write('Error in afterEach\n'); - throw err; - }); + afterEach(async () => { + await bucketUtil.empty(testBucket, true); + await bucketUtil.deleteMany([testBucket]); }); const policyTestCases = [ @@ -237,10 +225,12 @@ describeSkipIfCeph('PUT object retention iam action and version id', () => { Version: '2012-10-17', Statement: [statement], }; - s3.putBucketPolicy({ + s3.send(new PutBucketPolicyCommand({ Bucket: testBucket, Policy: JSON.stringify(bucketPolicy), - }, err => { + })).then(() => { + done(); + }).catch(err => { assert.ifError(err); done(); }); @@ -252,7 +242,7 @@ describeSkipIfCeph('PUT object retention iam action and version id', () => { Key: objectName, Retention: retentionConfig, }; - awsRequest(false, 'putObjectRetention', params, testCase.callback(done)); + awsRequest(false, 'PutObjectRetentionCommand', params, testCase.callback(done)); }); it(`should ${testCase.expectedResult} unauthenticated putObjectRetention with VersionId`, done => { @@ -262,7 +252,7 @@ describeSkipIfCeph('PUT object retention iam action and version id', () => { Retention: retentionConfig, VersionId: versionId, }; - awsRequest(false, 'putObjectRetention', params, testCase.callback(done)); + awsRequest(false, 'PutObjectRetentionCommand', params, testCase.callback(done)); }); }); }); diff --git a/tests/functional/aws-node-sdk/test/object/putVersion.js b/tests/functional/aws-node-sdk/test/object/putVersion.js index f90d712536..1c3e2b571e 100644 --- a/tests/functional/aws-node-sdk/test/object/putVersion.js +++ b/tests/functional/aws-node-sdk/test/object/putVersion.js @@ -8,11 +8,20 @@ const { DummyRequestLogger } = require('../../../../unit/helpers'); const checkError = require('../../lib/utility/checkError'); const { getMetadata, fakeMetadataArchive, isNullKeyMetadataV1 } = require('../utils/init'); const { hasColdStorage } = require('../../lib/utility/test-utils'); +const { CreateBucketCommand, + PutObjectCommand, + HeadObjectCommand, + GetObjectCommand, + PutObjectAclCommand, + PutObjectTaggingCommand, + PutObjectLegalHoldCommand, + ListObjectsCommand, + DeleteObjectCommand, + PutBucketVersioningCommand } = require('@aws-sdk/client-s3'); const { LOCATION_NAME_DMF, } = require('../../../../constants'); - const log = new DummyRequestLogger(); const bucketName = 'bucket1putversion32'; @@ -25,13 +34,27 @@ const archive = { restoreRequestedDays: 5, }; -function putObjectVersion(s3, params, vid, next) { +async function putObjectVersion(s3, params, vid, next) { const paramsWithBody = { ...params, Body: '123' }; - const request = s3.putObject(paramsWithBody); - request.on('build', () => { - request.httpRequest.headers['x-scal-s3-version-id'] = vid; - }); - return request.send(next); + const command = new PutObjectCommand(paramsWithBody); + command.middlewareStack.add( + next => async args => { + // eslint-disable-next-line no-param-reassign + args.request.headers['x-scal-s3-version-id'] = vid; + return next(args); + }, + { + step: 'build', + name: 'addVersionIdHeader', // Add a name to identify the middleware + } + ); + + try { + const res = await s3.send(command); + next(null, res); + } catch (err) { + next(err); + } } function checkVersionsAndUpdate(versionsBefore, versionsAfter, indexes) { @@ -66,22 +89,19 @@ describe('PUT object with x-scal-s3-version-id header', () => { s3 = bucketUtil.s3; async.series([ next => metadata.setup(next), - next => s3.createBucket({ Bucket: bucketName }, next), - next => s3.createBucket({ Bucket: bucketNameMD, ObjectLockEnabledForBucket: true, }, next), + next => s3.send(new CreateBucketCommand({ Bucket: bucketName })).then(() => { + next(); + }), + next => s3.send(new CreateBucketCommand({ Bucket: bucketNameMD, + ObjectLockEnabledForBucket: true })).then(() => { + next(); + }), ], done); }); - afterEach(() => { - process.stdout.write('Emptying bucket'); - return bucketUtil.emptyMany([bucketName, bucketNameMD]) - .then(() => { - process.stdout.write('Deleting bucket'); - return bucketUtil.deleteMany([bucketName, bucketNameMD]); - }) - .catch(err => { - process.stdout.write('Error in afterEach'); - throw err; - }); + afterEach(async () => { + await bucketUtil.emptyMany([bucketName, bucketNameMD]); + await bucketUtil.deleteMany([bucketName, bucketNameMD]); }); describe('error handling validation (without cold storage location)', () => { @@ -93,10 +113,15 @@ describe('PUT object with x-scal-s3-version-id header', () => { } }; const params = { Bucket: bucketName, Key: objectName }; + let vId; async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), + next => s3.send(new PutBucketVersioningCommand(vParams)).then(() => next()), + next => s3.send(new PutObjectCommand(params)).then(res => { + vId = res.VersionId; + return next(); + }), + next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), next => putObjectVersion(s3, params, 'aJLWKz4Ko9IjBBgXKj5KQT.G9UHv0g7P', err => { checkError(err, 'InvalidArgument', 400); return next(); @@ -131,8 +156,8 @@ describe('PUT object with x-scal-s3-version-id header', () => { const params = { Bucket: bucketName, Key: objectName }; async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), + next => s3.send(new PutBucketVersioningCommand(vParams)).then(() => next()), + next => s3.send(new PutObjectCommand(params)).then(() => next()), next => putObjectVersion(s3, params, '393833343735313131383832343239393939393952473030312020313031', err => { checkError(err, 'NoSuchVersion', 404); @@ -148,7 +173,7 @@ describe('PUT object with x-scal-s3-version-id header', () => { const params = { Bucket: bucketName, Key: objectName }; async.series([ - next => s3.putObject(params, next), + next => s3.send(new PutObjectCommand(params)).then(() => next()), next => putObjectVersion(s3, params, '', err => { checkError(err, 'InvalidObjectState', 403); return next(); @@ -170,11 +195,11 @@ describe('PUT object with x-scal-s3-version-id header', () => { let vId; async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.deleteObject(params, (err, res) => { + next => s3.send(new PutBucketVersioningCommand(vParams)).then(() => next()), + next => s3.send(new PutObjectCommand(params)).then(() => next()), + next => s3.send(new DeleteObjectCommand(params)).then(res => { vId = res.VersionId; - return next(err); + return next(); }), next => putObjectVersion(s3, params, vId, err => { checkError(err, 'MethodNotAllowed', 405); @@ -189,14 +214,15 @@ describe('PUT object with x-scal-s3-version-id header', () => { describeSkipNullMdV1('with cold storage location', () => { it('should overwrite an object', done => { - const params = { Bucket: bucketName, Key: objectName }; + const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; let objMDAfter; let versionsBefore; let versionsAfter; - async.series([ - next => s3.putObject(params, next), + next => s3.send(new PutObjectCommand(params)).then(() => { + next(); + }), next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { objMDBefore = objMD; @@ -243,10 +269,10 @@ describe('PUT object with x-scal-s3-version-id header', () => { let vId; async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, (err, res) => { + next => s3.send(new PutBucketVersioningCommand(vParams)).then(() => next()), + next => s3.send(new PutObjectCommand(params)).then(res => { vId = res.VersionId; - return next(err); + return next(); }), next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { @@ -294,10 +320,10 @@ describe('PUT object with x-scal-s3-version-id header', () => { let vId; async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, (err, res) => { + next => s3.send(new PutBucketVersioningCommand(vParams)).then(() => next()), + next => s3.send(new PutObjectCommand(params)).then(res => { vId = res.VersionId; - return next(err); + return next(); }), next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { @@ -344,9 +370,9 @@ describe('PUT object with x-scal-s3-version-id header', () => { let objMDAfter; async.series([ - next => s3.putObject(params, next), - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), + next => s3.send(new PutObjectCommand(params)).then(() => next()), + next => s3.send(new PutBucketVersioningCommand(vParams)).then(() => next()), + next => s3.send(new PutObjectCommand(params)).then(() => next()), next => fakeMetadataArchive(bucketName, objectName, 'null', archive, next), next => getMetadata(bucketName, objectName, 'null', (err, objMD) => { objMDBefore = objMD; @@ -393,11 +419,11 @@ describe('PUT object with x-scal-s3-version-id header', () => { let vId; async.series([ - next => s3.putObject(params, next), - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, (err, res) => { + next => s3.send(new PutObjectCommand(params)).then(() => next()), + next => s3.send(new PutBucketVersioningCommand(vParams)).then(() => next()), + next => s3.send(new PutObjectCommand(params)).then(res => { vId = res.VersionId; - return next(err); + return next(); }), next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), next => getMetadata(bucketName, objectName, vId, (err, objMD) => { @@ -450,10 +476,10 @@ describe('PUT object with x-scal-s3-version-id header', () => { let versionsAfter; async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putBucketVersioning(sParams, next), - next => s3.putObject(params, next), + next => s3.send(new PutBucketVersioningCommand(vParams)).then(() => next()), + next => s3.send(new PutObjectCommand(params)).then(() => next()), + next => s3.send(new PutBucketVersioningCommand(sParams)).then(() => next()), + next => s3.send(new PutObjectCommand(params)).then(() => next()), next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { objMDBefore = objMD; @@ -500,14 +526,14 @@ describe('PUT object with x-scal-s3-version-id header', () => { let vId; async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putObject(params, (err, res) => { + next => s3.send(new PutBucketVersioningCommand(vParams)).then(() => next()), + next => s3.send(new PutObjectCommand(params)).then(() => next()), + next => s3.send(new PutObjectCommand(params)).then(res => { vId = res.VersionId; - return next(err); + return next(); }), next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), - next => s3.putObject(params, next), + next => s3.send(new PutObjectCommand(params)).then(() => next()), next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { versionsBefore = res.Versions; return next(err); @@ -553,11 +579,11 @@ describe('PUT object with x-scal-s3-version-id header', () => { let vId; async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putObject(params, (err, res) => { + next => s3.send(new PutBucketVersioningCommand(vParams)).then(() => next()), + next => s3.send(new PutObjectCommand(params)).then(() => next()), + next => s3.send(new PutObjectCommand(params)).then(res => { vId = res.VersionId; - return next(err); + return next(); }), next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { @@ -611,11 +637,11 @@ describe('PUT object with x-scal-s3-version-id header', () => { let vId; async.series([ - next => s3.putBucketVersioning(vParams, next), - next => s3.putObject(params, next), - next => s3.putObject(params, (err, res) => { + next => s3.send(new PutBucketVersioningCommand(vParams)).then(() => next()), + next => s3.send(new PutObjectCommand(params)).then(() => next()), + next => s3.send(new PutObjectCommand(params)).then(res => { vId = res.VersionId; - return next(err); + return next(); }), next => fakeMetadataArchive(bucketName, objectName, vId, archive, next), next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { @@ -626,7 +652,7 @@ describe('PUT object with x-scal-s3-version-id header', () => { objMDBefore = objMD; return next(err); }), - next => s3.putBucketVersioning(sParams, next), + next => s3.send(new PutBucketVersioningCommand(sParams)).then(() => next()).catch(next), next => putObjectVersion(s3, params, vId, next), next => getMetadata(bucketName, objectName, vId, (err, objMD) => { objMDAfter = objMD; @@ -663,7 +689,7 @@ describe('PUT object with x-scal-s3-version-id header', () => { let versionsAfter; async.series([ - next => s3.putObject(params, next), + next => s3.send(new PutObjectCommand(params)).then(() => next()), next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), next => metadata.listObject(bucketName, mdListingParams, log, (err, res) => { versionsBefore = res.Versions; @@ -673,7 +699,7 @@ describe('PUT object with x-scal-s3-version-id header', () => { objMDBefore = objMD; return next(err); }), - next => s3.putBucketVersioning(vParams, next), + next => s3.send(new PutBucketVersioningCommand(vParams)).then(() => next()), next => putObjectVersion(s3, params, 'null', next), next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { objMDAfter = objMD; @@ -707,7 +733,7 @@ describe('PUT object with x-scal-s3-version-id header', () => { }; async.series([ - next => s3.putObject(params, next), + next => s3.send(new PutObjectCommand(params)).then(() => next()), next => fakeMetadataArchive(bucketName, objectName, undefined, archiveCompleted, next), next => putObjectVersion(s3, params, '', err => { checkError(err, 'InvalidObjectState', 403); @@ -728,49 +754,56 @@ describe('PUT object with x-scal-s3-version-id header', () => { const params = { Bucket: bucketName, Key: objectName }; let objMDBefore; let objMDAfter; + async.series([ next => { if (versioning === 'versioned') { - return s3.putBucketVersioning({ + return s3.send(new PutBucketVersioningCommand({ Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled' } - }, next); + })).then(() => next()); } else if (versioning === 'suspended') { - return s3.putBucketVersioning({ + return s3.send(new PutBucketVersioningCommand({ Bucket: bucketName, VersioningConfiguration: { Status: 'Suspended' } - }, next); + })).then(() => next()); } return next(); }, - next => s3.putObject(params, next), + next => s3.send(new PutObjectCommand(params)).then(() => next()), next => fakeMetadataArchive(bucketName, objectName, undefined, archive, next), next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { objMDBefore = objMD; return next(err); }), - next => metadata.listObject(bucketName, mdListingParams, log, next), + next => metadata.listObject(bucketName, mdListingParams, log, err => next(err)), next => putObjectVersion(s3, params, '', next), next => getMetadata(bucketName, objectName, undefined, (err, objMD) => { objMDAfter = objMD; return next(err); }), - next => s3.listObjects({ Bucket: bucketName }, (err, res) => { - assert.ifError(err); + next => s3.send(new ListObjectsCommand({ Bucket: bucketName })).then(res => { assert.strictEqual(res.Contents.length, 1); assert.strictEqual(res.Contents[0].StorageClass, LOCATION_NAME_DMF); return next(); - }), - next => s3.headObject(params, (err, res) => { + }).catch(err => { assert.ifError(err); + return next(err); + }), + next => s3.send(new HeadObjectCommand(params)).then(res => { assert.strictEqual(res.StorageClass, LOCATION_NAME_DMF); return next(); - }), - next => s3.getObject(params, (err, res) => { + }).catch(err => { assert.ifError(err); + return next(err); + }), + next => s3.send(new GetObjectCommand(params)).then(res => { assert.strictEqual(res.StorageClass, LOCATION_NAME_DMF); return next(); + }).catch(err => { + assert.ifError(err); + return next(err); }), ], err => { assert.strictEqual(err, null, `Expected success got error ${JSON.stringify(err)}`); @@ -797,76 +830,76 @@ describe('PUT object with x-scal-s3-version-id header', () => { }); it('should "copy" all but non data-related metadata (data encryption, data size...)', done => { - const params = { - Bucket: bucketNameMD, - Key: objectName - }; - const putParams = { - ...params, - Metadata: { - 'custom-user-md': 'custom-md', - }, - WebsiteRedirectLocation: 'http://custom-redirect' - }; - const aclParams = { - ...params, - // email of user Bart defined in authdata.json - GrantFullControl: 'emailaddress=sampleaccount1@sampling.com', - }; - const tagParams = { - ...params, - Tagging: { - TagSet: [{ - Key: 'tag1', - Value: 'value1' - }, { - Key: 'tag2', - Value: 'value2' - }] - } - }; - const legalHoldParams = { - ...params, - LegalHold: { - Status: 'ON' - }, - }; - const acl = { - 'Canned': '', - 'FULL_CONTROL': [ - // canonicalID of user Bart - '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be', - ], - 'WRITE_ACP': [], - 'READ': [], - 'READ_ACP': [], - }; - const tags = { tag1: 'value1', tag2: 'value2' }; - const replicationInfo = { - 'status': 'COMPLETED', - 'backends': [ - { - 'site': 'azure-normal', - 'status': 'COMPLETED', - 'dataStoreVersionId': '', + const params = { + Bucket: bucketNameMD, + Key: objectName + }; + const putParams = { + ...params, + Metadata: { + 'custom-user-md': 'custom-md', + }, + WebsiteRedirectLocation: 'http://custom-redirect' + }; + const aclParams = { + ...params, + // email of user Bart defined in authdata.json + GrantFullControl: 'emailaddress=sampleaccount1@sampling.com', + }; + const tagParams = { + ...params, + Tagging: { + TagSet: [{ + Key: 'tag1', + Value: 'value1' + }, { + Key: 'tag2', + Value: 'value2' + }] + } + }; + const legalHoldParams = { + ...params, + LegalHold: { + Status: 'ON' }, - ], - 'content': [ - 'DATA', - 'METADATA', - ], - 'destination': 'arn:aws:s3:::versioned', - 'storageClass': 'azure-normal', - 'role': 'arn:aws:iam::root:role/s3-replication-role', - 'storageType': 'azure', - 'dataStoreVersionId': '', - 'isNFS': null, + }; + const acl = { + 'Canned': '', + 'FULL_CONTROL': [ + // canonicalID of user Bart + '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be', + ], + 'WRITE_ACP': [], + 'READ': [], + 'READ_ACP': [], + }; + const tags = { tag1: 'value1', tag2: 'value2' }; + const replicationInfo = { + 'status': 'COMPLETED', + 'backends': [ + { + 'site': 'azure-normal', + 'status': 'COMPLETED', + 'dataStoreVersionId': '', + }, + ], + 'content': [ + 'DATA', + 'METADATA', + ], + 'destination': 'arn:aws:s3:::versioned', + 'storageClass': 'azure-normal', + 'role': 'arn:aws:iam::root:role/s3-replication-role', + 'storageType': 'azure', + 'dataStoreVersionId': '', + 'isNFS': null, }; async.series([ - next => s3.putObject(putParams, next), - next => s3.putObjectAcl(aclParams, next), - next => s3.putObjectTagging(tagParams, next), - next => s3.putObjectLegalHold(legalHoldParams, next), + next => s3.send(new PutObjectCommand(putParams)).then(() => next()), + next => s3.send(new PutObjectAclCommand(aclParams)).then(() => next()), + next => s3.send(new PutObjectTaggingCommand(tagParams)).then(() => next()), + next => s3.send(new PutObjectLegalHoldCommand(legalHoldParams)).then(() => next()), next => getMetadata(bucketNameMD, objectName, undefined, (err, objMD) => { if (err) { return next(err); @@ -909,7 +942,7 @@ describe('PUT object with x-scal-s3-version-id header', () => { // removing legal hold to be able to clean the bucket after the test next => { legalHoldParams.LegalHold.Status = 'OFF'; - return s3.putObjectLegalHold(legalHoldParams, next); + return s3.send(new PutObjectLegalHoldCommand(legalHoldParams)).then(() => next()); }, ], done); }); diff --git a/tests/functional/aws-node-sdk/test/object/rangeTest.js b/tests/functional/aws-node-sdk/test/object/rangeTest.js index f2d1df8faf..143ba8a1f6 100644 --- a/tests/functional/aws-node-sdk/test/object/rangeTest.js +++ b/tests/functional/aws-node-sdk/test/object/rangeTest.js @@ -3,6 +3,15 @@ const { exec, execFile } = require('child_process'); const { writeFile, createReadStream } = require('fs'); const assert = require('assert'); +const { + CreateBucketCommand, + CreateMultipartUploadCommand, + UploadPartCommand, + CompleteMultipartUploadCommand, + AbortMultipartUploadCommand, + PutObjectCommand, + GetObjectCommand, +} = require('@aws-sdk/client-s3'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); @@ -37,11 +46,11 @@ function getOuterRange(range, bytes) { // Get the ranged object from a bucket. Write the response body to a file, then // use getRangeExec to check that all the bytes are in the correct location. function checkRanges(range, bytes) { - return s3.getObject({ + return s3.send(new GetObjectCommand({ Bucket: bucket, Key: key, Range: `bytes=${range}`, - }).promise() + })) .then(res => { const { begin, end } = getOuterRange(range, bytes); const total = (end - begin) + 1; @@ -75,13 +84,13 @@ async function uploadParts(bytes, uploadId) { `skip=${part - 1}`, 'count=1', ]); - await s3.uploadPart({ + await s3.send(new UploadPartCommand({ Bucket: bucket, Key: key, PartNumber: part, UploadId: uploadId, Body: createReadStream(`${name}.mpuPart${part}`), - }).promise(); + })); } catch (error) { throw new Error(`Error uploading part ${part}: ${error.message}`); } @@ -107,17 +116,17 @@ describeSkipIfCeph('aws-node-sdk range tests', () => { let uploadId; beforeEach(() => - s3.createBucket({ Bucket: bucket }).promise() - .then(() => s3.createMultipartUpload({ + s3.send(new CreateBucketCommand({ Bucket: bucket })) + .then(() => s3.send(new CreateMultipartUploadCommand({ Bucket: bucket, Key: key, - }).promise()) + }))) .then(res => { uploadId = res.UploadId; }) .then(() => createHashedFile(fileSize)) .then(() => uploadParts(fileSize, uploadId)) - .then(res => s3.completeMultipartUpload({ + .then(res => s3.send(new CompleteMultipartUploadCommand({ Bucket: bucket, Key: key, UploadId: uploadId, @@ -133,15 +142,15 @@ describeSkipIfCeph('aws-node-sdk range tests', () => { }, ], }, - }).promise()) + }))) ); afterEach(() => bucketUtil.empty(bucket) - .then(() => s3.abortMultipartUpload({ + .then(() => s3.send(new AbortMultipartUploadCommand({ Bucket: bucket, Key: key, UploadId: uploadId, - }).promise()) + }))) .catch(err => new Promise((resolve, reject) => { if (err.code !== 'NoSuchUpload') { reject(err); @@ -174,13 +183,13 @@ describeSkipIfCeph('aws-node-sdk range tests', () => { const fileSize = 2000; beforeEach(() => - s3.createBucket({ Bucket: bucket }).promise() + s3.send(new CreateBucketCommand({ Bucket: bucket })) .then(() => createHashedFile(fileSize)) - .then(() => s3.putObject({ + .then(() => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: createReadStream(`hashedFile.${fileSize}`), - }).promise())); + })))); afterEach(() => bucketUtil.empty(bucket) @@ -231,13 +240,13 @@ describeSkipIfCeph('aws-node-sdk range tests', () => { const fileSize = 2900; beforeEach(() => - s3.createBucket({ Bucket: bucket }).promise() + s3.send(new CreateBucketCommand({ Bucket: bucket })) .then(() => createHashedFile(fileSize)) - .then(() => s3.putObject({ + .then(() => s3.send(new PutObjectCommand({ Bucket: bucket, Key: key, Body: createReadStream(`hashedFile.${fileSize}`), - }).promise())); + })))); afterEach(() => bucketUtil.empty(bucket) diff --git a/tests/functional/aws-node-sdk/test/object/websiteGet.js b/tests/functional/aws-node-sdk/test/object/websiteGet.js index 63054f3396..e66144385f 100644 --- a/tests/functional/aws-node-sdk/test/object/websiteGet.js +++ b/tests/functional/aws-node-sdk/test/object/websiteGet.js @@ -1,17 +1,45 @@ const assert = require('assert'); -const async = require('async'); + +const { + S3Client, + CreateBucketCommand, + DeleteBucketCommand, + PutBucketWebsiteCommand, + PutObjectCommand, + DeleteObjectCommand, + PutBucketPolicyCommand, +} = require('@aws-sdk/client-s3'); const fs = require('fs'); const path = require('path'); - -const { S3 } = require('aws-sdk'); +const async = require('async'); const conf = require('../../../../../lib/Config').config; const getConfig = require('../support/config'); const { makeRequest } = require('../../../raw-node/utils/makeRequest'); const { WebsiteConfigTester } = require('../../lib/utility/website-util'); -const config = getConfig('default', { signatureVersion: 'v4' }); -const s3 = new S3(config); +const config = getConfig('default'); +const s3Client = new S3Client(config); +const s3 = { + createBucket: (params, cb) => { + s3Client.send(new CreateBucketCommand(params)).then(d => cb(null, d)).catch(cb); + }, + deleteBucket: (params, cb) => { + s3Client.send(new DeleteBucketCommand(params)).then(d => cb(null, d)).catch(cb); + }, + putBucketWebsite: (params, cb) => { + s3Client.send(new PutBucketWebsiteCommand(params)).then(d => cb(null, d)).catch(cb); + }, + putObject: (params, cb) => { + s3Client.send(new PutObjectCommand(params)).then(d => cb(null, d)).catch(cb); + }, + deleteObject: (params, cb) => { + s3Client.send(new DeleteObjectCommand(params)).then(d => cb(null, d)).catch(cb); + }, + putBucketPolicy: (params, cb) => { + s3Client.send(new PutBucketPolicyCommand(params)).then(d => cb(null, d)).catch(cb); + }, +}; const transport = conf.https ? 'https' : 'http'; const bucket = process.env.AWS_ON_AIR ? 'awsbucketwebsitetester' : diff --git a/tests/functional/aws-node-sdk/test/object/websiteGetWithACL.js b/tests/functional/aws-node-sdk/test/object/websiteGetWithACL.js index b257ac96a2..6661094e2f 100644 --- a/tests/functional/aws-node-sdk/test/object/websiteGetWithACL.js +++ b/tests/functional/aws-node-sdk/test/object/websiteGetWithACL.js @@ -1,11 +1,11 @@ -const { S3 } = require('aws-sdk'); +const { S3Client } = require('@aws-sdk/client-s3'); const conf = require('../../../../../lib/Config').config; const getConfig = require('../support/config'); const { WebsiteConfigTester } = require('../../lib/utility/website-util'); const config = getConfig('default', { signatureVersion: 'v4' }); -const s3 = new S3(config); +const s3 = new S3Client(config); // Note: To run these tests locally, you may need to edit the machine's // /etc/hosts file to include the following line: diff --git a/tests/functional/aws-node-sdk/test/object/websiteHead.js b/tests/functional/aws-node-sdk/test/object/websiteHead.js index d0f81d2198..0d26fda09a 100644 --- a/tests/functional/aws-node-sdk/test/object/websiteHead.js +++ b/tests/functional/aws-node-sdk/test/object/websiteHead.js @@ -1,17 +1,23 @@ const assert = require('assert'); -const async = require('async'); const fs = require('fs'); const path = require('path'); -const { S3 } = require('aws-sdk'); +const { + S3Client, + CreateBucketCommand, + DeleteBucketCommand, + PutBucketWebsiteCommand, + PutObjectCommand, + DeleteObjectCommand, + PutBucketPolicyCommand, +} = require('@aws-sdk/client-s3'); const conf = require('../../../../../lib/Config').config; const getConfig = require('../support/config'); const { WebsiteConfigTester } = require('../../lib/utility/website-util'); -const config = getConfig('default', { signatureVersion: 'v4' }); -const s3 = new S3(config); - +const config = getConfig('default'); +const s3 = new S3Client(config); // Note: To run these tests locally, you may need to edit the machine's // /etc/hosts file to include the following line: // `127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com` @@ -90,9 +96,9 @@ describe('Head request on bucket website endpoint', () => { }); describe('with existing bucket', () => { - beforeEach(done => s3.createBucket({ Bucket: bucket }, done)); + beforeEach(() => s3.send(new CreateBucketCommand({ Bucket: bucket }))); - afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); + afterEach(() => s3.send(new DeleteBucketCommand({ Bucket: bucket }))); it('should return 404 when no website configuration', done => { const expectedHeaders = { @@ -105,32 +111,22 @@ describe('Head request on bucket website endpoint', () => { }); describe('with existing configuration', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, Key: 'index.html', - ACL: 'public-read', + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'index.html', + ACL: 'public-read', Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), ContentType: 'text/html', Metadata: { test: 'value', }, - }, - err => { - assert.strictEqual(err, null); - done(); - }); - }); + })); }); - afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, - err => done(err)); - }); + afterEach(() => s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: 'index.html' }))); it('should return indexDocument headers if no key ' + 'requested', done => { @@ -145,14 +141,14 @@ describe('Head request on bucket website endpoint', () => { }); describe('with path prefix in request with/without key', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, - Key: 'pathprefix/index.html', + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + }); + await s3.send(new PutObjectCommand({ Bucket: bucket, + Key: 'pathprefix/index.html', ACL: 'public-read', Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), @@ -160,15 +156,12 @@ describe('Head request on bucket website endpoint', () => { Metadata: { test: 'value', }, - }, done); - }); + })).catch(err => { + assert.strictEqual(err, null); + }); }); - afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: - 'pathprefix/index.html' }, - done); - }); + afterEach(async () => s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: 'pathprefix/index.html' }))); it('should serve indexDocument if path request without key', done => { @@ -185,24 +178,23 @@ describe('Head request on bucket website endpoint', () => { }); describe('with private key', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, - Key: 'index.html', - ACL: 'private', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html' }, done); + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + }); + await s3.send(new PutObjectCommand({ Bucket: bucket, + Key: 'index.html', + ACL: 'private', + Body: fs.readFileSync(path.join(__dirname, + '/websiteFiles/index.html')), + ContentType: 'text/html' })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); }); }); - afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, done); - }); + afterEach(() => s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: 'index.html' }))); it('should return 403 if key is private', done => { const expectedHeaders = { @@ -215,10 +207,10 @@ describe('Head request on bucket website endpoint', () => { }); describe('with nonexisting index document key', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })); }); it('should return 403 if nonexisting index document key', done => { @@ -232,14 +224,16 @@ describe('Head request on bucket website endpoint', () => { }); describe(`redirect all requests to ${redirectEndpoint}`, () => { - beforeEach(done => { + beforeEach(async () => { const redirectAllTo = { HostName: 'www.google.com', }; const webConfig = new WebsiteConfigTester(null, null, redirectAllTo); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + }); }); it(`should redirect to ${redirectEndpoint}`, done => { @@ -264,15 +258,17 @@ describe('Head request on bucket website endpoint', () => { // Note: these tests will all redirect to https even if // conf does not have https since protocol in website config // specifies https - beforeEach(done => { + beforeEach(async () => { const redirectAllTo = { HostName: 'www.google.com', Protocol: 'https', }; const webConfig = new WebsiteConfigTester(null, null, redirectAllTo); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + }); }); it('should redirect to https://google.com', done => { @@ -293,25 +289,24 @@ describe('Head request on bucket website endpoint', () => { }); describe('with custom error document', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html', 'error.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, - Key: 'error.html', - ACL: 'public-read', + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + }); + await s3.send(new PutObjectCommand({ Bucket: bucket, + Key: 'error.html', + ACL: 'public-read', Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/error.html')), - ContentType: 'text/html' }, done); + ContentType: 'text/html' })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); }); }); - afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'error.html' }, done); - }); + afterEach(() => s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: 'error.html' }))); it('should return regular error headers regardless of whether ' + 'custom error document', done => { @@ -325,7 +320,7 @@ describe('Head request on bucket website endpoint', () => { }); describe('redirect to hostname with error code condition', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { HttpErrorCodeReturnedEquals: '403', @@ -334,8 +329,10 @@ describe('Head request on bucket website endpoint', () => { HostName: 'www.google.com', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + }); }); it(`should redirect to ${redirectEndpoint} if error 403` + @@ -349,7 +346,7 @@ describe('Head request on bucket website endpoint', () => { }); describe('redirect to hostname with prefix condition', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { KeyPrefixEquals: 'about/', @@ -358,8 +355,10 @@ describe('Head request on bucket website endpoint', () => { HostName: 'www.google.com', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + }); }); it(`should redirect to ${redirectEndpoint}about if ` + @@ -374,7 +373,7 @@ describe('Head request on bucket website endpoint', () => { describe('redirect to hostname with prefix and error condition', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { KeyPrefixEquals: 'about/', @@ -384,8 +383,10 @@ describe('Head request on bucket website endpoint', () => { HostName: 'www.google.com', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + }); }); it(`should redirect to ${redirectEndpoint} if ` + @@ -399,7 +400,7 @@ describe('Head request on bucket website endpoint', () => { }); describe('redirect with multiple redirect rules', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); const conditions = { KeyPrefixEquals: 'about/', @@ -412,8 +413,10 @@ describe('Head request on bucket website endpoint', () => { }; webConfig.addRoutingRule(redirectOne, conditions); webConfig.addRoutingRule(redirectTwo, conditions); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + }); }); it('should redirect based on first rule', done => { @@ -427,7 +430,7 @@ describe('Head request on bucket website endpoint', () => { describe('redirect with protocol', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { KeyPrefixEquals: 'about/', @@ -437,8 +440,10 @@ describe('Head request on bucket website endpoint', () => { HostName: 'www.google.com', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + }); }); it('should redirect to https://www.google.com/about if ' + @@ -452,7 +457,7 @@ describe('Head request on bucket website endpoint', () => { }); describe('redirect to key using ReplaceKeyWith', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { HttpErrorCodeReturnedEquals: '403', @@ -461,14 +466,13 @@ describe('Head request on bucket website endpoint', () => { ReplaceKeyWith: 'redirect.html', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + }); }); - afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'redirect.html' }, - err => done(err)); - }); + afterEach(() => s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: 'redirect.html' }))); it('should redirect to specified file if 403 error ' + 'error occured', done => { @@ -481,7 +485,7 @@ describe('Head request on bucket website endpoint', () => { }); describe('redirect using ReplaceKeyPrefixWith', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { HttpErrorCodeReturnedEquals: '403', @@ -491,8 +495,10 @@ describe('Head request on bucket website endpoint', () => { ReplaceKeyPrefixWith: 'about', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + }); }); it(`should redirect to ${redirectEndpoint}about if ` + @@ -507,7 +513,7 @@ describe('Head request on bucket website endpoint', () => { describe('redirect requests with prefix /about to redirect/', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { KeyPrefixEquals: 'about/', @@ -516,14 +522,14 @@ describe('Head request on bucket website endpoint', () => { ReplaceKeyPrefixWith: 'redirect/', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + }); }); - afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'redirect/index.html' }, - err => done(err)); - }); + afterEach(async () => s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: 'redirect/index.html' }))); + it('should redirect to "redirect/" object if key prefix is equal ' + 'to "about/"', done => { @@ -537,7 +543,7 @@ describe('Head request on bucket website endpoint', () => { describe('redirect requests, with both prefix and error code ' + 'condition', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { KeyPrefixEquals: 'about/', @@ -547,14 +553,13 @@ describe('Head request on bucket website endpoint', () => { ReplaceKeyPrefixWith: 'redirect/', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, done); + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })).catch(err => { + assert.strictEqual(err, null, `Found unexpected err ${err}`); + }); }); - afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'redirect/index.html' }, - err => done(err)); - }); + afterEach(() => s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: 'redirect/index.html' }))); it('should redirect to "redirect" object if key prefix is equal ' + 'to "about/" and there is a 403 error satisfying the ' + @@ -569,13 +574,11 @@ describe('Head request on bucket website endpoint', () => { }); describe('object redirect to /', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, Key: 'index.html', + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'index.html', ACL: 'public-read', Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), @@ -584,18 +587,10 @@ describe('Head request on bucket website endpoint', () => { test: 'value', }, WebsiteRedirectLocation: '/', - }, - err => { - assert.strictEqual(err, null); - done(); - }); - }); + })); }); - afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, - err => done(err)); - }); + afterEach(() => s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: 'index.html' }))); it('should redirect to /', done => { const expectedHeaders = { @@ -607,48 +602,35 @@ describe('Head request on bucket website endpoint', () => { }); describe('with bucket policy', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putBucketPolicy({ Bucket: bucket, Policy: JSON.stringify( - { - Version: '2012-10-17', - Statement: [{ - Sid: 'PublicReadGetObject', - Effect: 'Allow', - Principal: '*', - Action: ['s3:GetObject'], - Resource: [ - `arn:aws:s3:::${bucket}/index.html`, - `arn:aws:s3:::${bucket}/access.html`, - ], - }], - } - ) }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, Key: 'index.html', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - Metadata: { - test: 'value', - } }, - err => { - assert.strictEqual(err, null); - done(); - }); - }); - }); + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })); + await s3.send(new PutBucketPolicyCommand({ Bucket: bucket, Policy: JSON.stringify( + { + Version: '2012-10-17', + Statement: [{ + Sid: 'PublicReadGetObject', + Effect: 'Allow', + Principal: '*', + Action: ['s3:GetObject'], + Resource: [ + `arn:aws:s3:::${bucket}/index.html`, + `arn:aws:s3:::${bucket}/access.html`, + ], + }], + } + )})); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'index.html', + Body: fs.readFileSync(path.join(__dirname, + '/websiteFiles/index.html')), + ContentType: 'text/html', + Metadata: { + test: 'value', + }})); }); - afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, - err => done(err)); - }); + afterEach(() => s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: 'index.html' }))); it('should return indexDocument headers if no key ' + 'requested', done => { @@ -678,7 +660,7 @@ describe('Head request on bucket website endpoint', () => { }); describe('with routing rule on index', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { KeyPrefixEquals: 'index.html', @@ -687,30 +669,20 @@ describe('Head request on bucket website endpoint', () => { ReplaceKeyWith: 'whatever.html', }; webConfig.addRoutingRule(redirect, condition); - s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, err => { - assert.strictEqual(err, - null, `Found unexpected err ${err}`); - s3.putObject({ Bucket: bucket, Key: 'index.html', - ACL: 'public-read', - Body: fs.readFileSync(path.join(__dirname, - '/websiteFiles/index.html')), - ContentType: 'text/html', - Metadata: { - test: 'value', - }, + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'index.html', + ACL: 'public-read', + Body: fs.readFileSync(path.join(__dirname, + '/websiteFiles/index.html')), + ContentType: 'text/html', + Metadata: { + test: 'value', }, - err => { - assert.strictEqual(err, null); - done(); - }); - }); + })); }); - afterEach(done => { - s3.deleteObject({ Bucket: bucket, Key: 'index.html' }, - err => done(err)); - }); + afterEach(() => s3.send(new DeleteObjectCommand({ Bucket: bucket, Key: 'index.html' }))); it('should not redirect if index key is not explicit', done => { WebsiteConfigTester.makeHeadRequest(undefined, endpoint, @@ -719,7 +691,7 @@ describe('Head request on bucket website endpoint', () => { }); describe('without trailing / for recursive index check', () => { - beforeEach(done => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); const object = { Bucket: bucket, @@ -727,50 +699,41 @@ describe('Head request on bucket website endpoint', () => { '/websiteFiles/index.html')), ContentType: 'text/html', }; - async.waterfall([ - next => s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }, next), - (data, next) => s3.putBucketPolicy({ Bucket: bucket, - Policy: JSON.stringify({ - Version: '2012-10-17', - Statement: [{ - Sid: 'PublicReadGetObject', - Effect: 'Allow', - Principal: '*', - Action: ['s3:GetObject'], - Resource: [ - `arn:aws:s3:::${bucket}/original_key_file`, - `arn:aws:s3:::${bucket}/original_key_nofile`, - `arn:aws:s3:::${bucket}/file/*`, - `arn:aws:s3:::${bucket}/nofile/*`, - ], - }], - }), - }, next), - (data, next) => s3.putObject(Object.assign({}, object, - { Key: 'original_key_file/index.html' }), next), - (data, next) => s3.putObject(Object.assign({}, object, - { Key: 'file/index.html' }), next), // the redirect 302 - (data, next) => s3.putObject(Object.assign({}, object, - { Key: 'no_access_file/index.html' }), next), - ], err => { - assert.ifError(err); - done(); - }); - }); - - afterEach(done => { - async.waterfall([ - next => s3.deleteObject({ Bucket: bucket, - Key: 'original_key_file/index.html' }, next), - (data, next) => s3.deleteObject({ Bucket: bucket, - Key: 'file/index.html' }, next), - (data, next) => s3.deleteObject({ Bucket: bucket, - Key: 'no_access_file/index.html' }, next), - ], err => { - assert.ifError(err); - done(); - }); + + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })); + await s3.send(new PutBucketPolicyCommand({ Bucket: bucket, + Policy: JSON.stringify({ + Version: '2012-10-17', + Statement: [{ + Sid: 'PublicReadGetObject', + Effect: 'Allow', + Principal: '*', + Action: ['s3:GetObject'], + Resource: [ + `arn:aws:s3:::${bucket}/original_key_file`, + `arn:aws:s3:::${bucket}/original_key_nofile`, + `arn:aws:s3:::${bucket}/file/*`, + `arn:aws:s3:::${bucket}/nofile/*`, + ], + }], + }) + })); + await s3.send(new PutObjectCommand(Object.assign({}, object, + { Key: 'original_key_file/index.html' }))); + await s3.send(new PutObjectCommand(Object.assign({}, object, + { Key: 'file/index.html' }))); + await s3.send(new PutObjectCommand(Object.assign({}, object, + { Key: 'no_access_file/index.html' }))); + }); + + afterEach(async () => { + await s3.send(new DeleteObjectCommand({ Bucket: bucket, + Key: 'original_key_file/index.html' })); + await s3.send(new DeleteObjectCommand({ Bucket: bucket, + Key: 'file/index.html' })); + await s3.send(new DeleteObjectCommand({ Bucket: bucket, + Key: 'no_access_file/index.html' })); }); it('should redirect 302 with trailing / on folder with index', done => { diff --git a/tests/functional/aws-node-sdk/test/object/websiteHeadWithACL.js b/tests/functional/aws-node-sdk/test/object/websiteHeadWithACL.js index 1defd8730d..79ecf2d2b5 100644 --- a/tests/functional/aws-node-sdk/test/object/websiteHeadWithACL.js +++ b/tests/functional/aws-node-sdk/test/object/websiteHeadWithACL.js @@ -1,11 +1,11 @@ -const { S3 } = require('aws-sdk'); +const { S3Client } = require('@aws-sdk/client-s3'); const conf = require('../../../../../lib/Config').config; const getConfig = require('../support/config'); const { WebsiteConfigTester } = require('../../lib/utility/website-util'); -const config = getConfig('default', { signatureVersion: 'v4' }); -const s3 = new S3(config); +const config = getConfig('default'); +const s3 = new S3Client(config); // Note: To run these tests locally, you may need to edit the machine's // /etc/hosts file to include the following line: diff --git a/tests/functional/aws-node-sdk/test/object/websiteRuleMixing.js b/tests/functional/aws-node-sdk/test/object/websiteRuleMixing.js index ca61c25a76..71acdda127 100644 --- a/tests/functional/aws-node-sdk/test/object/websiteRuleMixing.js +++ b/tests/functional/aws-node-sdk/test/object/websiteRuleMixing.js @@ -1,5 +1,11 @@ const fs = require('fs'); const path = require('path'); +const { + CreateBucketCommand, + DeleteBucketCommand, + PutBucketWebsiteCommand, + PutObjectCommand, +} = require('@aws-sdk/client-s3'); const BucketUtility = require('../../lib/utility/bucket-util'); const conf = require('../../../../../lib/Config').config; @@ -27,31 +33,31 @@ const redirectEndpoint = conf.https ? 'https://www.google.com/' : describe('User visits bucket website endpoint and requests resource ' + 'that has x-amz-website-redirect-location header ::', () => { - beforeEach(done => s3.createBucket({ Bucket: bucket }, done)); + beforeEach(async () => await s3.send(new CreateBucketCommand({ Bucket: bucket }))); - afterEach(done => s3.deleteBucket({ Bucket: bucket }, done)); + afterEach(async () => await s3.send(new DeleteBucketCommand({ Bucket: bucket }))); describe('when x-amz-website-redirect-location: /redirect.html', () => { - beforeEach(() => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }).promise() - .then(() => s3.putObject({ Bucket: bucket, + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'index.html', ACL: 'public-read', Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), ContentType: 'text/html', - WebsiteRedirectLocation: '/redirect.html' }).promise()) - .then(() => s3.putObject({ Bucket: bucket, + WebsiteRedirectLocation: '/redirect.html' })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'redirect.html', ACL: 'public-read', Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/redirect.html')), - ContentType: 'text/html' }).promise()); + ContentType: 'text/html' })); }); - afterEach(() => bucketUtil.empty(bucket)); + afterEach(async () => await bucketUtil.empty(bucket)); it('should serve redirect file on GET request', done => { WebsiteConfigTester.checkHTML({ @@ -74,20 +80,20 @@ describe('User visits bucket website endpoint and requests resource ' + describe('when x-amz-website-redirect-location: https://www.google.com', () => { - beforeEach(() => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }).promise() - .then(() => s3.putObject({ Bucket: bucket, + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'index.html', ACL: 'public-read', Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), ContentType: 'text/html', - WebsiteRedirectLocation: 'https://www.google.com' }).promise()); + WebsiteRedirectLocation: 'https://www.google.com' })); }); - afterEach(() => bucketUtil.empty(bucket)); + afterEach(async () => await bucketUtil.empty(bucket)); it('should redirect to https://www.google.com', done => { WebsiteConfigTester.checkHTML({ @@ -110,19 +116,19 @@ describe('User visits bucket website endpoint and requests resource ' + }); describe('when key with header is private', () => { - beforeEach(() => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }).promise() - .then(() => s3.putObject({ Bucket: bucket, + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'index.html', Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), ContentType: 'text/html', - WebsiteRedirectLocation: 'https://www.google.com' }).promise()); + WebsiteRedirectLocation: 'https://www.google.com' })); }); - afterEach(() => bucketUtil.empty(bucket)); + afterEach(async () => await bucketUtil.empty(bucket)); it('should return 403 instead of x-amz-website-redirect-location ' + 'header location', done => { @@ -145,7 +151,7 @@ describe('User visits bucket website endpoint and requests resource ' + describe('when key with header is private' + 'and website config has error condition routing rule', () => { - beforeEach(() => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { HttpErrorCodeReturnedEquals: '403', @@ -154,23 +160,23 @@ describe('User visits bucket website endpoint and requests resource ' + HostName: 'www.google.com', }; webConfig.addRoutingRule(redirect, condition); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }).promise() - .then(() => s3.putObject({ Bucket: bucket, + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'index.html', Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), ContentType: 'text/html', - WebsiteRedirectLocation: '/redirect.html' }).promise()) - .then(() => s3.putObject({ Bucket: bucket, + WebsiteRedirectLocation: '/redirect.html' })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'redirect.html', ACL: 'public-read', Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/redirect.html')), - ContentType: 'text/html' }).promise()); + ContentType: 'text/html' })); }); - afterEach(() => bucketUtil.empty(bucket)); + afterEach(async () => await bucketUtil.empty(bucket)); it(`should redirect to ${redirectEndpoint} since error 403 ` + 'occurred instead of x-amz-website-redirect-location header ' + @@ -197,24 +203,24 @@ describe('User visits bucket website endpoint and requests resource ' + }); describe(`with redirect all requests to ${redirectEndpoint}`, () => { - beforeEach(() => { + beforeEach(async () => { const redirectAllTo = { HostName: 'www.google.com', }; const webConfig = new WebsiteConfigTester(null, null, redirectAllTo); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }).promise() - .then(() => s3.putObject({ Bucket: bucket, + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'index.html', ACL: 'public-read', Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), ContentType: 'text/html', - WebsiteRedirectLocation: '/redirect.html' }).promise()); + WebsiteRedirectLocation: '/redirect.html' })); }); - afterEach(() => bucketUtil.empty(bucket)); + afterEach(async () => await bucketUtil.empty(bucket)); it(`should redirect to ${redirectEndpoint} instead of ` + 'x-amz-website-redirect-location header location on GET request', @@ -241,7 +247,7 @@ describe('User visits bucket website endpoint and requests resource ' + describe('with routing rule redirect to hostname with prefix condition', () => { - beforeEach(() => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { KeyPrefixEquals: 'about/', @@ -250,18 +256,18 @@ describe('User visits bucket website endpoint and requests resource ' + HostName: 'www.google.com', }; webConfig.addRoutingRule(redirect, condition); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }).promise() - .then(() => s3.putObject({ Bucket: bucket, + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'about/index.html', ACL: 'public-read', Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), ContentType: 'text/html', - WebsiteRedirectLocation: '/redirect.html' }).promise()); + WebsiteRedirectLocation: '/redirect.html' })); }); - afterEach(() => bucketUtil.empty(bucket)); + afterEach(async () => await bucketUtil.empty(bucket)); it(`should redirect GET request to ${redirectEndpoint}about/ ` + 'instead of about/ key x-amz-website-redirect-location ' + @@ -287,7 +293,7 @@ describe('User visits bucket website endpoint and requests resource ' + }); describe('with routing rule replaceKeyWith', () => { - beforeEach(() => { + beforeEach(async () => { const webConfig = new WebsiteConfigTester('index.html'); const condition = { KeyPrefixEquals: 'index.html', @@ -296,24 +302,24 @@ describe('User visits bucket website endpoint and requests resource ' + ReplaceKeyWith: 'redirect.html', }; webConfig.addRoutingRule(redirect, condition); - return s3.putBucketWebsite({ Bucket: bucket, - WebsiteConfiguration: webConfig }).promise() - .then(() => s3.putObject({ Bucket: bucket, + await s3.send(new PutBucketWebsiteCommand({ Bucket: bucket, + WebsiteConfiguration: webConfig })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'index.html', ACL: 'public-read', Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/index.html')), ContentType: 'text/html', - WebsiteRedirectLocation: 'https://www.google.com' }).promise()) - .then(() => s3.putObject({ Bucket: bucket, + WebsiteRedirectLocation: 'https://www.google.com' })); + await s3.send(new PutObjectCommand({ Bucket: bucket, Key: 'redirect.html', ACL: 'public-read', Body: fs.readFileSync(path.join(__dirname, '/websiteFiles/redirect.html')), - ContentType: 'text/html' }).promise()); + ContentType: 'text/html' })); }); - afterEach(() => bucketUtil.empty(bucket)); + afterEach(async () => await bucketUtil.empty(bucket)); it('should replace key instead of redirecting to key ' + 'x-amz-website-redirect-location header location on GET request', diff --git a/tests/functional/aws-node-sdk/test/support/config.js b/tests/functional/aws-node-sdk/test/support/config.js index 4d75aa6c7f..d18294a71f 100644 --- a/tests/functional/aws-node-sdk/test/support/config.js +++ b/tests/functional/aws-node-sdk/test/support/config.js @@ -30,7 +30,7 @@ const DEFAULT_MEM_OPTIONS = { maxAttempts: 3, requestHandler: new NodeHttpHandler({ connectionTimeout: 5000, - socketTimeout: 5000, + requestTimeout: 5000, httpAgent: new (ssl ? https : http).Agent({ maxSockets: 200, keepAlive: true, diff --git a/tests/functional/aws-node-sdk/test/versioning/multiObjectDelete.js b/tests/functional/aws-node-sdk/test/versioning/multiObjectDelete.js index 6014e5dd51..d04e972bf3 100644 --- a/tests/functional/aws-node-sdk/test/versioning/multiObjectDelete.js +++ b/tests/functional/aws-node-sdk/test/versioning/multiObjectDelete.js @@ -4,6 +4,13 @@ const async = require('async'); const withV4 = require('../support/withV4'); const BucketUtility = require('../../lib/utility/bucket-util'); const { removeAllVersions } = require('../../lib/utility/versioning-util'); +const { DeleteObjectsCommand, + DeleteObjectCommand, + PutObjectCommand, + CreateBucketCommand, + DeleteBucketCommand, + PutBucketVersioningCommand, + ListObjectVersionsCommand} = require('@aws-sdk/client-s3'); const bucketName = `multi-object-delete-${Date.now()}`; const key = 'key'; @@ -41,32 +48,29 @@ describe('Multi-Object Versioning Delete Success', function success() { beforeEach(done => { async.waterfall([ - next => s3.createBucket({ Bucket: bucketName }, + next => s3.send(new CreateBucketCommand({ Bucket: bucketName }), err => next(err)), - next => s3.putBucketVersioning({ + next => s3.send(new PutBucketVersioningCommand({ Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', }, - }, err => next(err)), + })).then(res => next(null, res)).catch(err => next(err)), next => { const objects = []; for (let i = 1; i < 1001; i++) { objects.push(`${key}${i}`); } async.mapLimit(objects, 20, (key, next) => { - s3.putObject({ + s3.send(new PutObjectCommand({ Bucket: bucketName, Key: key, Body: 'somebody', - }, (err, res) => { - if (err) { - return next(err); - } + })).then(res => { // eslint-disable-next-line no-param-reassign res.Key = key; return next(null, res); - }); + }).catch(err => next(err)); }, (err, results) => { if (err) { return next(err); @@ -78,31 +82,24 @@ describe('Multi-Object Versioning Delete Success', function success() { ], err => done(err)); }); - afterEach(done => { - removeAllVersions({ Bucket: bucketName }, err => { - if (err) { - return done(err); - } - return s3.deleteBucket({ Bucket: bucketName }, err => { - assert.strictEqual(err, null, - `Error deleting bucket: ${err}`); - return done(); - }); - }); + afterEach(async () => { + await removeAllVersions({ Bucket: bucketName }); + await s3.send(new DeleteBucketCommand({ Bucket: bucketName })); }); + it('should batch delete 1000 objects quietly', () => { const objects = objectsRes.slice(0, 1000).map(obj => ({ Key: obj.Key, VersionId: obj.VersionId })); - return s3.deleteObjects({ + return s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, Quiet: true, }, - }).promise().then(res => { - assert.strictEqual(res.Deleted.length, 0); - assert.strictEqual(res.Errors.length, 0); + })).then(res => { + assert.strictEqual(res.Deleted, undefined); + assert.strictEqual(res.Errors, undefined); }).catch(err => { checkNoError(err); }); @@ -111,18 +108,18 @@ describe('Multi-Object Versioning Delete Success', function success() { it('should batch delete 1000 objects', () => { const objects = objectsRes.slice(0, 1000).map(obj => ({ Key: obj.Key, VersionId: obj.VersionId })); - return s3.deleteObjects({ + return s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, Quiet: false, }, - }).promise().then(res => { + })).then(res => { assert.strictEqual(res.Deleted.length, 1000); // order of returned objects not sorted assert.deepStrictEqual(sortList(res.Deleted), sortList(objects)); - assert.strictEqual(res.Errors.length, 0); + assert.strictEqual(res.Errors, undefined); }).catch(err => { checkNoError(err); }); @@ -133,12 +130,12 @@ describe('Multi-Object Versioning Delete Success', function success() { const objects = objectsRes.slice(0, 1000).map(obj => ({ Key: obj.Key, VersionId: obj.VersionId })); objects[0].VersionId = 'invalid-version-id'; - return s3.deleteObjects({ + return s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, }, - }).promise().then(res => { + })).then(res => { assert.strictEqual(res.Deleted.length, 999); assert.strictEqual(res.Errors.length, 1); assert.strictEqual(res.Errors[0].Code, 'NoSuchVersion'); @@ -153,14 +150,14 @@ describe('Multi-Object Versioning Delete Success', function success() { const objects = objectsRes.slice(0, 1000).map(obj => ({ Key: obj.Key, VersionId: obj.VersionId })); objects[0].VersionId = nonExistingId; - return s3.deleteObjects({ + return s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, }, - }).promise().then(res => { + })).then(res => { assert.strictEqual(res.Deleted.length, 1000); - assert.strictEqual(res.Errors.length, 0); + assert.strictEqual(res.Errors, undefined); const foundVersionId = res.Deleted.find(entry => entry.VersionId === nonExistingId); assert(foundVersionId); @@ -173,16 +170,15 @@ describe('Multi-Object Versioning Delete Success', function success() { it('should not crash when deleting a null versionId that does not exist', () => { const objects = [{ Key: objectsRes[0].Key, VersionId: 'null' }]; - return s3.deleteObjects({ + return s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: objects, }, - }).promise().then(res => { + })).then(res => { assert.deepStrictEqual(res.Deleted, [{ Key: objectsRes[0].Key, VersionId: 'null' }]); - assert.strictEqual(res.Errors.length, 0); - }) - .catch(err => { + assert.strictEqual(res.Errors, undefined); + }).catch(err => { checkNoError(err); }); }); @@ -197,128 +193,113 @@ describe('Multi-Object Versioning Delete - deleting delete marker', beforeEach(done => { async.waterfall([ - next => s3.createBucket({ Bucket: bucketName }, - err => next(err)), - next => s3.putBucketVersioning({ + next => s3.send(new CreateBucketCommand({ Bucket: bucketName })).then(() => + next()).catch(err => next(err)), + next => s3.send(new PutBucketVersioningCommand({ Bucket: bucketName, VersioningConfiguration: { Status: 'Enabled', }, - }, err => next(err)), + })).then(() => next()).catch(err => next(err)), ], done); }); - afterEach(done => { - removeAllVersions({ Bucket: bucketName }, err => { - if (err) { - return done(err); - } - return s3.deleteBucket({ Bucket: bucketName }, err => { - assert.strictEqual(err, null, - `Error deleting bucket: ${err}`); - return done(); - }); - }); + afterEach(async () => { + await removeAllVersions({ Bucket: bucketName }); + await s3.deleteBucket({ Bucket: bucketName }); }); it('should send back VersionId and DeleteMarkerVersionId both equal ' + - 'to deleteVersionId', done => { - async.waterfall([ - next => s3.putObject({ Bucket: bucketName, Key: key }, - err => next(err)), - next => s3.deleteObject({ Bucket: bucketName, - Key: key }, (err, data) => { - const deleteVersionId = data.VersionId; - next(err, deleteVersionId); - }), - (deleteVersionId, next) => s3.deleteObjects({ Bucket: - bucketName, - Delete: { - Objects: [ - { - Key: key, - VersionId: deleteVersionId, - }, - ], - } }, (err, data) => { - assert.strictEqual(data.Deleted[0].DeleteMarker, true); - assert.strictEqual(data.Deleted[0].VersionId, - deleteVersionId); - assert.strictEqual(data.Deleted[0].DeleteMarkerVersionId, - deleteVersionId); - next(err); - }), - ], err => done(err)); + 'to deleteVersionId', async () => { + await new Promise((resolve, reject) => { + async.waterfall([ + next => s3.send(new PutObjectCommand({ Bucket: bucketName, Key: key })).then(() => + next()).catch(err => next(err)), + next => s3.send(new DeleteObjectCommand({ Bucket: bucketName, + Key: key })).then(data => { + const deleteVersionId = data.VersionId; + next(null, deleteVersionId); + }).catch(err => next(err)), + (deleteVersionId, next) => s3.send(new DeleteObjectsCommand({ Bucket: + bucketName, + Delete: { + Objects: [ + { + Key: key, + VersionId: deleteVersionId, + }, + ], + } })).then(data => { + assert.strictEqual(data.Deleted[0].DeleteMarker, true); + assert.strictEqual(data.Deleted[0].VersionId, + deleteVersionId); + assert.strictEqual(data.Deleted[0].DeleteMarkerVersionId, + deleteVersionId); + next(null, data); + }).catch(err => next(err)), + ], err => { + if (err) { + reject(err); + } else { + resolve(); + } + }); + }); }); it('should send back a DeleteMarkerVersionId matching the versionId ' + 'stored for the object if trying to delete an object that does not exist', done => { - s3.deleteObjects({ Bucket: bucketName, + s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: [ { Key: key, }, ], - } }, (err, data) => { - if (err) { - return done(err); - } + } })).then(data => { const versionIdFromDeleteObjects = data.Deleted[0].DeleteMarkerVersionId; assert.strictEqual(data.Deleted[0].DeleteMarker, true); - return s3.listObjectVersions({ Bucket: bucketName }, - (err, data) => { - if (err) { - return done(err); - } - const versionIdFromListObjectVersions = + return s3.send(new ListObjectVersionsCommand({ Bucket: bucketName })).then(data => { + const versionIdFromListObjectVersions = data.DeleteMarkers[0].VersionId; assert.strictEqual(versionIdFromDeleteObjects, versionIdFromListObjectVersions); return done(); - }); - }); + }).catch(err => done(err)); + }).catch(err => done(err)); }); it('should send back a DeleteMarkerVersionId matching the versionId ' + 'stored for the object if object exists but no version was specified', done => { async.waterfall([ - next => s3.putObject({ Bucket: bucketName, Key: key }, - (err, data) => { - const versionId = data.VersionId; - next(err, versionId); - }), - (versionId, next) => s3.deleteObjects({ Bucket: - bucketName, + next => s3.putObject({ Bucket: bucketName, Key: key }).then(data => { + const versionId = data.VersionId; + next(null, versionId); + }).catch(err => next(err)), + (versionId, next) => s3.send(new DeleteObjectsCommand({ Bucket: bucketName, Delete: { Objects: [ { Key: key, }, ], - } }, (err, data) => { - if (err) { - return next(err); - } + } })).then(data => { assert.strictEqual(data.Deleted[0].DeleteMarker, true); const deleteVersionId = data.Deleted[0]. DeleteMarkerVersionId; assert.notEqual(deleteVersionId, versionId); - return next(err, deleteVersionId, versionId); - }), - (deleteVersionId, versionId, next) => s3.listObjectVersions( - { Bucket: bucketName }, (err, data) => { - if (err) { - return next(err); - } + return next(null, deleteVersionId, versionId); + }).catch(err => next(err)), + (deleteVersionId, versionId, next) => s3.send(new ListObjectVersionsCommand( + { Bucket: bucketName })).then(data => { assert.strictEqual(deleteVersionId, data.DeleteMarkers[0].VersionId); assert.strictEqual(versionId, data.Versions[0].VersionId); return next(); - }), + }).catch(err => next(err)), ], err => done(err)); }); }); diff --git a/tests/functional/metadata/MixedVersionFormat.js b/tests/functional/metadata/MixedVersionFormat.js index 61ad261f36..a72c708375 100644 --- a/tests/functional/metadata/MixedVersionFormat.js +++ b/tests/functional/metadata/MixedVersionFormat.js @@ -1,5 +1,12 @@ const assert = require('assert'); const async = require('async'); +const { + PutObjectCommand, + GetObjectCommand, + ListObjectsCommand, + PutBucketVersioningCommand, + ListObjectVersionsCommand +} = require('@aws-sdk/client-s3'); const withV4 = require('../aws-node-sdk/test/support/withV4'); const BucketUtility = require('../aws-node-sdk/lib/utility/bucket-util'); const MongoClient = require('mongodb').MongoClient; @@ -113,8 +120,16 @@ describe('Mongo backend mixed bucket format versions', () => { }; const masterKey = vFormat === 'v0' ? `${vFormat}-object-1` : `\x7fM${vFormat}-object-1`; async.series([ - next => s3.putObject(paramsObj1, next), - next => s3.putObject(paramsObj2, next), + next => { + s3.send(new PutObjectCommand(paramsObj1)) + .then(() => next()) + .catch(next); + }, + next => { + s3.send(new PutObjectCommand(paramsObj2)) + .then(() => next()) + .catch(next); + }, // check if data stored in the correct format next => getObject(`${vFormat}-bucket`, masterKey, (err, doc) => { assert.ifError(err); @@ -122,16 +137,23 @@ describe('Mongo backend mixed bucket format versions', () => { return next(); }), // test if we can get object - next => s3.getObject(paramsObj1, next), + next => { + s3.send(new GetObjectCommand(paramsObj1)) + .then(() => next()) + .catch(next); + }, // test if we can list objects - next => s3.listObjects({ Bucket: `${vFormat}-bucket` }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.Contents.length, 2); - const keys = data.Contents.map(obj => obj.Key); - assert(keys.includes(`${vFormat}-object-1`)); - assert(keys.includes(`${vFormat}-object-2`)); - return next(); - }) + next => { + s3.send(new ListObjectsCommand({ Bucket: `${vFormat}-bucket` })) + .then(data => { + assert.strictEqual(data.Contents.length, 2); + const keys = data.Contents.map(obj => obj.Key); + assert(keys.includes(`${vFormat}-object-1`)); + assert(keys.includes(`${vFormat}-object-2`)); + next(); + }) + .catch(next); + } ], done); }); @@ -151,11 +173,28 @@ describe('Mongo backend mixed bucket format versions', () => { } }; const masterKey = vFormat === 'v0' ? `${vFormat}-object-1` : `\x7fM${vFormat}-object-1`; + async.series([ - next => s3.putBucketVersioning(versioningParams, next), - next => s3.putObject(paramsObj1, next), - next => s3.putObject(paramsObj1, next), - next => s3.putObject(paramsObj2, next), + next => { + s3.send(new PutBucketVersioningCommand(versioningParams)) + .then(() => next()) + .catch(next); + }, + next => { + s3.send(new PutObjectCommand(paramsObj1)) + .then(() => next()) + .catch(next); + }, + next => { + s3.send(new PutObjectCommand(paramsObj1)) + .then(() => next()) + .catch(next); + }, + next => { + s3.send(new PutObjectCommand(paramsObj2)) + .then(() => next()) + .catch(next); + }, // check if data stored in the correct version format next => getObject(`${vFormat}-bucket`, masterKey, (err, doc) => { assert.ifError(err); @@ -163,28 +202,38 @@ describe('Mongo backend mixed bucket format versions', () => { return next(); }), // test if we can get object - next => s3.getObject(paramsObj1, next), + next => { + s3.send(new GetObjectCommand(paramsObj1)) + .then(() => next()) + .catch(next); + }, // test if we can list objects - next => s3.listObjects({ Bucket: `${vFormat}-bucket` }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.Contents.length, 2); - const keys = data.Contents.map(obj => obj.Key); - assert(keys.includes(`${vFormat}-object-1`)); - assert(keys.includes(`${vFormat}-object-2`)); - return next(); - }), + next => { + s3.send(new ListObjectsCommand({ Bucket: `${vFormat}-bucket` })) + .then(data => { + assert.strictEqual(data.Contents.length, 2); + const keys = data.Contents.map(obj => obj.Key); + assert(keys.includes(`${vFormat}-object-1`)); + assert(keys.includes(`${vFormat}-object-2`)); + next(); + }) + .catch(next); + }, // test if we can list object versions - next => s3.listObjectVersions({ Bucket: `${vFormat}-bucket` }, (err, data) => { - assert.ifError(err); - assert.strictEqual(data.Versions.length, 3); - const versionPerObject = {}; - data.Versions.forEach(version => { - versionPerObject[version.Key] = (versionPerObject[version.Key] || 0) + 1; - }); - assert.strictEqual(versionPerObject[`${vFormat}-object-1`], 2); - assert.strictEqual(versionPerObject[`${vFormat}-object-2`], 1); - return next(); - }) + next => { + s3.send(new ListObjectVersionsCommand({ Bucket: `${vFormat}-bucket` })) + .then(data => { + assert.strictEqual(data.Versions.length, 3); + const versionPerObject = {}; + data.Versions.forEach(version => { + versionPerObject[version.Key] = (versionPerObject[version.Key] || 0) + 1; + }); + assert.strictEqual(versionPerObject[`${vFormat}-object-1`], 2); + assert.strictEqual(versionPerObject[`${vFormat}-object-2`], 1); + next(); + }) + .catch(next); + } ], done); }); }); diff --git a/tests/utilities/bucketTagging-util.js b/tests/utilities/bucketTagging-util.js index fe4978510e..337780c27a 100644 --- a/tests/utilities/bucketTagging-util.js +++ b/tests/utilities/bucketTagging-util.js @@ -5,11 +5,11 @@ function assertError(err, expectedErr) { if (expectedErr === null) { assert.strictEqual(err, null, `expected no error but got '${err}'`); } else { - assert.strictEqual(err.code, expectedErr, 'incorrect error response ' + - `code: should be '${expectedErr}' but got '${err.code}'`); - assert.strictEqual(err.statusCode, errors[expectedErr].code, + assert.strictEqual(err.Code, expectedErr, 'incorrect error response ' + + `code: should be '${expectedErr}' but got '${err.Code}'`); + assert.strictEqual(err.$metadata.httpStatusCode, errors[expectedErr].code, 'incorrect error status code: should be ' + - `${errors[expectedErr].code}, but got '${err.statusCode}'`); + `${errors[expectedErr].code}, but got '${err.$metadata.httpStatusCode}'`); } } diff --git a/yarn.lock b/yarn.lock index 506b5279b0..a60982957e 100644 --- a/yarn.lock +++ b/yarn.lock @@ -858,6 +858,20 @@ "@smithy/types" "^4.8.0" tslib "^2.6.2" +"@aws-sdk/s3-request-presigner@^3.901.0": + version "3.917.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/s3-request-presigner/-/s3-request-presigner-3.917.0.tgz#b68a257ad40c0694c868f8c6b92440a06ae8d197" + integrity sha512-V1cSM6yQv8lV1Obrp5ti8iXLCRKq45OQETANkiMWRbAwTbzKQml0EfP08BFS+LKtSl2gJfO9tH7O2RgRuqhUuQ== + dependencies: + "@aws-sdk/signature-v4-multi-region" "3.916.0" + "@aws-sdk/types" "3.914.0" + "@aws-sdk/util-format-url" "3.914.0" + "@smithy/middleware-endpoint" "^4.3.5" + "@smithy/protocol-http" "^5.3.3" + "@smithy/smithy-client" "^4.9.1" + "@smithy/types" "^4.8.0" + tslib "^2.6.2" + "@aws-sdk/signature-v4-multi-region@3.916.0": version "3.916.0" resolved "https://registry.yarnpkg.com/@aws-sdk/signature-v4-multi-region/-/signature-v4-multi-region-3.916.0.tgz#d70e3dc9ca2cb3f65923283600a0a6e9a6c4ec7f" @@ -949,6 +963,16 @@ "@smithy/util-endpoints" "^3.2.3" tslib "^2.6.2" +"@aws-sdk/util-format-url@3.914.0": + version "3.914.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/util-format-url/-/util-format-url-3.914.0.tgz#6592dd713faa311200fc9ae9295a79618f33e2ca" + integrity sha512-QpdkoQjvPaYyzZwgk41vFyHQM5s0DsrsbQ8IoPUggQt4HaJUvmL1ShwMcSldbgdzwiRMqXUK8q7jrqUvkYkY6w== + dependencies: + "@aws-sdk/types" "3.914.0" + "@smithy/querystring-builder" "^4.2.3" + "@smithy/types" "^4.8.0" + tslib "^2.6.2" + "@aws-sdk/util-locate-window@^3.0.0": version "3.723.0" resolved "https://registry.yarnpkg.com/@aws-sdk/util-locate-window/-/util-locate-window-3.723.0.tgz#174551bfdd2eb36d3c16e7023fd7e7ee96ad0fa9" @@ -1798,6 +1822,14 @@ resolved "https://registry.yarnpkg.com/@sinonjs/text-encoding/-/text-encoding-0.7.3.tgz#282046f03e886e352b2d5f5da5eb755e01457f3f" integrity sha512-DE427ROAphMQzU4ENbliGYrBSYPXF+TtLg9S8vzeA+OF4ZKzoDdzfL8sxuMUGS/lgRhM6j1URSk9ghf7Xo1tyA== +"@smithy/abort-controller@^3.1.9": + version "3.1.9" + resolved "https://registry.yarnpkg.com/@smithy/abort-controller/-/abort-controller-3.1.9.tgz#47d323f754136a489e972d7fd465d534d72fcbff" + integrity sha512-yiW0WI30zj8ZKoSYNx90no7ugVn3khlyH/z5W8qtKBtVE6awRALbhSG+2SAHA1r6bO/6M9utxYKVZ3PCJ1rWxw== + dependencies: + "@smithy/types" "^3.7.2" + tslib "^2.6.2" + "@smithy/abort-controller@^4.1.1": version "4.1.1" resolved "https://registry.yarnpkg.com/@smithy/abort-controller/-/abort-controller-4.1.1.tgz#9b3872ab6b2c061486175c281dadc0a853260533" @@ -2225,6 +2257,17 @@ "@smithy/types" "^4.8.0" tslib "^2.6.2" +"@smithy/node-http-handler@^3.0.0": + version "3.3.3" + resolved "https://registry.yarnpkg.com/@smithy/node-http-handler/-/node-http-handler-3.3.3.tgz#94dbb3f15342b656ceba2b26e14aa741cace8919" + integrity sha512-BrpZOaZ4RCbcJ2igiSNG16S+kgAc65l/2hmxWdmhyoGWHTLlzQzr06PXavJp9OBlPEG/sHlqdxjWmjzV66+BSQ== + dependencies: + "@smithy/abort-controller" "^3.1.9" + "@smithy/protocol-http" "^4.1.8" + "@smithy/querystring-builder" "^3.0.11" + "@smithy/types" "^3.7.2" + tslib "^2.6.2" + "@smithy/node-http-handler@^4.2.1": version "4.2.1" resolved "https://registry.yarnpkg.com/@smithy/node-http-handler/-/node-http-handler-4.2.1.tgz#d7ab8e31659030d3d5a68f0982f15c00b1e67a0c" @@ -2295,6 +2338,15 @@ "@smithy/types" "^4.8.0" tslib "^2.6.2" +"@smithy/querystring-builder@^3.0.11": + version "3.0.11" + resolved "https://registry.yarnpkg.com/@smithy/querystring-builder/-/querystring-builder-3.0.11.tgz#2ed04adbe725671824c5613d0d6f9376d791a909" + integrity sha512-u+5HV/9uJaeLj5XTb6+IEF/dokWWkEqJ0XiaRRogyREmKGUgZnNecLucADLdauWFKUNbQfulHFEZEdjwEBjXRg== + dependencies: + "@smithy/types" "^3.7.2" + "@smithy/util-uri-escape" "^3.0.0" + tslib "^2.6.2" + "@smithy/querystring-builder@^4.1.1": version "4.1.1" resolved "https://registry.yarnpkg.com/@smithy/querystring-builder/-/querystring-builder-4.1.1.tgz#4d35c1735de8214055424045a117fa5d1d5cdec1"