Skip to content

Commit

Permalink
Merge pull request #8396 from nadavMiz/concurrency_tests
Browse files Browse the repository at this point in the history
NSFS | add more concurrecy tests
  • Loading branch information
nadavMiz authored Oct 15, 2024
2 parents ac9cf10 + 01cefc6 commit b171929
Showing 1 changed file with 68 additions and 2 deletions.
70 changes: 68 additions & 2 deletions src/test/unit_tests/jest_tests/test_versioning_concurrency.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ describe('test versioning concurrency', () => {
expect(failed_operations).toHaveLength(0);
}, test_timeout);

// same as s3tests_boto3/functional/test_s3.py::test_versioning_concurrent_multi_object_delete,
// same as s3tests_boto3/functional/test_s3.py::test_versioning_concurrent_multi_object_delete,
// this test has a bug, it tries to create the bucket twice and fails
// https://github.com/ceph/s3-tests/blob/master/s3tests_boto3/functional/test_s3.py#L1642
// see - https://github.com/ceph/s3-tests/issues/588
Expand Down Expand Up @@ -488,7 +488,73 @@ describe('test versioning concurrency', () => {
expect(successful_operations2).toHaveLength(num_of_concurrency2);
expect(failed_operations1).toHaveLength(0);
const versions = await nsfs.list_object_versions({ bucket: bucket }, DUMMY_OBJECT_SDK);
expect(versions.objects.length).toBe(num_of_concurrency1 + 1); // num_of_concurrency1 is the number of versions uploaded when versioning was enabled + 1 null version
expect(versions.objects.length).toBe(num_of_concurrency1 + 1); // num_of_concurrency1 is the number of versions uploaded when versioning was enabled + 1 null version
}, test_timeout);

it('multiple delete different keys', async () => {
const bucket = 'bucket1';
const key_prefix = 'key_deleted';
const versions_arr = [];
const num_deletes = 5;
const num_objects = 7;

for (let i = 0; i < num_objects; i++) {
const random_data = Buffer.from(String(i));
const body = buffer_utils.buffer_to_read_stream(random_data);
const res = await nsfs.upload_object({ bucket, key: key_prefix + i, source_stream: body }, DUMMY_OBJECT_SDK).catch(err => console.log('put error - ', err));
versions_arr.push(res.version_id);
}
const number_of_successful_operations = [];
const failed_operations = [];
for (let i = 0; i < num_deletes; i++) {
nsfs.delete_object({ bucket, key: key_prefix + i, version_id: versions_arr[i]}, DUMMY_OBJECT_SDK)
.then(res => number_of_successful_operations.push(res))
.catch(err => failed_operations.push(err));
}
await P.delay(2000);
expect(number_of_successful_operations).toHaveLength(num_deletes);
expect(failed_operations).toHaveLength(0);
const list_objects = await nsfs.list_objects({bucket}, DUMMY_OBJECT_SDK);
let num_objs = 0;
list_objects.objects.forEach(obj => {
if (obj.key.startsWith(key_prefix)) {
num_objs += 1;
}
});
expect(num_objs).toBe(num_objects - num_deletes);

}, test_timeout);

it('copy-object to same target', async () => {
const num_copies = 5;
const bucket = 'bucket1';
const copy_source = {bucket, key: 'key1'};
const random_data = Buffer.from("test data, it is test data");
const body = buffer_utils.buffer_to_read_stream(random_data);
const source_res = await nsfs.upload_object({ bucket: copy_source.bucket,
key: copy_source.key, source_stream: body }, DUMMY_OBJECT_SDK);
copy_source.version_id = source_res.version_id;

const key = 'key3';
const versions_arr = [];
const failed_operations = [];
// copy key1 5 times to key3
for (let i = 0; i < num_copies; i++) {
nsfs.upload_object({ bucket, key, source_stream: body, copy_source }, DUMMY_OBJECT_SDK)
.then(res => versions_arr.push(res.etag))
.catch(err => failed_operations.push(err));
}
await P.delay(1000);
expect(versions_arr).toHaveLength(num_copies);
expect(failed_operations).toHaveLength(0);
const list_objects = await nsfs.list_object_versions({bucket}, DUMMY_OBJECT_SDK);
let num_versions = 0;
list_objects.objects.forEach(obj => {
if (obj.key === key) {
num_versions += 1;
}
});
expect(num_versions).toBe(num_copies);
}, test_timeout);
});

Expand Down

0 comments on commit b171929

Please sign in to comment.