diff --git a/config.js b/config.js index 391a574b3f..f72c3c69bd 100644 --- a/config.js +++ b/config.js @@ -637,6 +637,9 @@ config.INLINE_MAX_SIZE = 4096; // Object SDK bucket cache expiration time config.OBJECT_SDK_BUCKET_CACHE_EXPIRY_MS = 60000; +// Object SDK bucket_namespace_cache allow stat of the config file +config.NC_ENABLE_BUCKET_NS_CACHE_STAT_VALIDATION = true; + ////////////////////////////// // OPERATOR RELATED // ////////////////////////////// diff --git a/docs/NooBaaNonContainerized/CI&Tests.md b/docs/NooBaaNonContainerized/CI&Tests.md index 0c8eb776cd..1ebc40fda1 100644 --- a/docs/NooBaaNonContainerized/CI&Tests.md +++ b/docs/NooBaaNonContainerized/CI&Tests.md @@ -90,6 +90,7 @@ The following is a list of `NC mocha test` files - 1. `test_nc_nsfs_cli.js` - Tests NooBaa CLI. 2. `test_nc_nsfs_health` - Tests NooBaa Health CLI. 3. `test_nsfs_glacier_backend.js` - Tests NooBaa Glacier Backend. +4. `test_nc_with_a_couple_of_forks.js` - Tests the `bucket_namespace_cache` when running with a couple of forks. Please notice that it uses `nc_coretest` with setup that includes a couple of forks. #### NC Jest test files The following is a list of `NC jest tests` files - @@ -114,7 +115,7 @@ The following is a list of `NC jest tests` files - #### nc_coretest.js File * The `nc_coretest.js` is a file that runs setup and teardown before/after NC integration tests run. * Moreover, `nc_coretest.js` includes mapping between RPC API calls to NooBaa CLI calls in order to be able to run same integration tests on both containerized and non containerized deployments. -* Use `NC_CORETEST=true` environment variable when running NC NSFS integration test (test_bucketspace.js). +* Use `NC_CORETEST=true` environment variable when running NC NSFS integration test (test_nsfs_integration.js). ##### Differences Between Containerized and Non Containerized * `new_buckets_path` - @@ -164,7 +165,7 @@ Consequently, there are now distinct test files, each with a unique scope - 1. `test_namespace_fs.js` - Tests NamespaceFS API. 2. `test_ns_list_objects.js` - Tests NamespaceFS list objects API. 3. `test_nsfs_access.js` - Tests uid and gid accessibility of Napi native code. -4. `test_bucketspace.js` - Tests s3 flows on top of NSFS namespace resources. +4. `test_nsfs_integration.js` - Tests s3 flows on top of NSFS namespace resources. 5. `test_nb_native_fs.js` - Tests Napi native code. 6. `test_nb_native_gpfs.js` - Tests Napi native code on top of GPFS. 7. `test_nsfs_versioning.js` - Tests NamespaceFS versioning API. diff --git a/src/endpoint/s3/s3_bucket_logging.js b/src/endpoint/s3/s3_bucket_logging.js index 0b75eb3f5f..6bdd543dae 100644 --- a/src/endpoint/s3/s3_bucket_logging.js +++ b/src/endpoint/s3/s3_bucket_logging.js @@ -11,6 +11,7 @@ const {compose_notification_req, check_notif_relevant} = require('../../util/not async function send_bucket_op_logs(req, res) { if (req.params && req.params.bucket && !(req.op_name === 'put_bucket' || + req.op_name === 'delete_bucket' || req.op_name === 'put_bucket_notification' || req.op_name === 'get_bucket_notification' )) { @@ -19,8 +20,14 @@ async function send_bucket_op_logs(req, res) { //so we aggregate and issue the writes only in the end const writes_aggregate = []; - const bucket_info = await req.object_sdk.read_bucket_sdk_config_info(req.params.bucket); - dbg.log2("read_bucket_sdk_config_info = ", bucket_info); + let bucket_info; + try { + bucket_info = await req.object_sdk.read_bucket_sdk_config_info(req.params.bucket); + dbg.log2("read_bucket_sdk_config_info = ", bucket_info); + } catch (err) { + dbg.warn(`send_bucket_op_logs of bucket ${req.params.bucket} got an error:`, err); + return; + } if (is_bucket_logging_enabled(bucket_info)) { dbg.log2("Bucket logging is enabled for Bucket : ", req.params.bucket); diff --git a/src/endpoint/s3/s3_rest.js b/src/endpoint/s3/s3_rest.js index d940981b2a..9e753cb984 100755 --- a/src/endpoint/s3/s3_rest.js +++ b/src/endpoint/s3/s3_rest.js @@ -73,7 +73,7 @@ async function s3_rest(req, res) { try { await s3_logging.send_bucket_op_logs(req, res); // logging again with error } catch (err1) { - dbg.error("Could not log bucket operation:", err1); + dbg.error("Could not log bucket operation (after handle_error):", err1); } } } @@ -134,7 +134,7 @@ async function handle_request(req, res) { try { await s3_logging.send_bucket_op_logs(req); // logging intension - no result } catch (err) { - dbg.error("Could not log bucket operation:", err); + dbg.error(`Could not log bucket operation (before operation ${req.op_name}):`, err); } } @@ -165,7 +165,7 @@ async function handle_request(req, res) { try { await s3_logging.send_bucket_op_logs(req, res); // logging again with result } catch (err) { - dbg.error("Could not log bucket operation:", err); + dbg.error(`Could not log bucket operation (after operation ${req.op_name}):`, err); } } diff --git a/src/sdk/bucketspace_fs.js b/src/sdk/bucketspace_fs.js index a069a5933d..e3d3788b2b 100644 --- a/src/sdk/bucketspace_fs.js +++ b/src/sdk/bucketspace_fs.js @@ -104,9 +104,9 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { const bucket = await this.config_fs.get_bucket_by_name(name); nsfs_schema_utils.validate_bucket_schema(bucket); - const is_valid = await this.check_bucket_config(bucket); + const is_valid = await this.check_stat_bucket_storage_path(bucket.path); if (!is_valid) { - dbg.warn('BucketSpaceFS: one or more bucket config check is failed for bucket : ', name); + dbg.warn('BucketSpaceFS: invalid storage path for bucket : ', name); } const nsr = { @@ -155,6 +155,11 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { } } } + try { + bucket.stat = await this.config_fs.stat_bucket_config_file(bucket.name.unwrap()); + } catch (err) { + dbg.warn(`BucketspaceFS.read_bucket_sdk_info could not stat_bucket_config_file ${bucket.name.unwrap()}`); + } return bucket; } catch (err) { const rpc_error = translate_error_codes(err, entity_enum.BUCKET); @@ -164,17 +169,37 @@ class BucketSpaceFS extends BucketSpaceSimpleFS { } } - async check_bucket_config(bucket) { - const bucket_storage_path = bucket.path; + /** + * check_stat_bucket_storage_path will return the true + * if there is stat output on the bucket storage path + * (in case the stat throws an error it would return false) + * @param {string} bucket_storage_path + * @returns {Promise} + */ + async check_stat_bucket_storage_path(bucket_storage_path) { try { await nb_native().fs.stat(this.fs_context, bucket_storage_path); - //TODO: Bucket owner check return true; } catch (err) { return false; } } + /** + * check_same_stat will return true the config file was not changed + * @param {nb.NativeFSStats} bucket_stat + * @returns Promise<{boolean>} + */ + async check_same_stat(bucket_name, bucket_stat) { + try { + const current_stat = await this.config_fs.stat_bucket_config_file(bucket_name); + if (current_stat) { + return current_stat.ino === bucket_stat.ino && current_stat.mtimeNsBigint === bucket_stat.mtimeNsBigint; + } + } catch (err) { + dbg.warn('check_same_stat: current_stat got an error', err, 'ignoring...'); + } + } //////////// // BUCKET // diff --git a/src/sdk/config_fs.js b/src/sdk/config_fs.js index 01b6346f74..67f45a0115 100644 --- a/src/sdk/config_fs.js +++ b/src/sdk/config_fs.js @@ -885,6 +885,17 @@ class ConfigFS { return new_path; } + /** + * stat_bucket_config_file will return the stat output on bucket config file + * please notice that stat might throw an error - you should wrap it with try-catch and handle the error + * @param {string} bucket_name + * @returns {Promise} + */ + stat_bucket_config_file(bucket_name) { + const bucket_config_path = this.get_bucket_path_by_name(bucket_name); + return nb_native().fs.stat(this.fs_context, bucket_config_path); + } + /** * is_bucket_exists returns true if bucket config path exists in config dir * @param {string} bucket_name diff --git a/src/sdk/nb.d.ts b/src/sdk/nb.d.ts index 74865a3746..a98071294a 100644 --- a/src/sdk/nb.d.ts +++ b/src/sdk/nb.d.ts @@ -824,6 +824,7 @@ interface BucketSpace { read_account_by_access_key({ access_key: string }): Promise; read_bucket_sdk_info({ name: string }): Promise; + check_same_stat(bucket_name: string, bucket_stat: nb.NativeFSStats); // only implemented in bucketspace_fs list_buckets(params: object, object_sdk: ObjectSDK): Promise; read_bucket(params: object): Promise; diff --git a/src/sdk/object_sdk.js b/src/sdk/object_sdk.js index 9823750fe9..a722655817 100644 --- a/src/sdk/object_sdk.js +++ b/src/sdk/object_sdk.js @@ -286,6 +286,15 @@ class ObjectSDK { async _validate_bucket_namespace(data, params) { const time = Date.now(); + // stat check (only in bucketspace FS) + const bs = this._get_bucketspace(); + const ns_allow_stat_bucket = Boolean(bs.check_same_stat); + if (ns_allow_stat_bucket && config.NC_ENABLE_BUCKET_NS_CACHE_STAT_VALIDATION) { + const same_stat = await bs.check_same_stat(params.name, data.bucket.stat); + if (!same_stat) { // config file of bucket was changed + return false; + } + } if (time <= data.valid_until) return true; const bucket = await this._get_bucketspace().read_bucket_sdk_info({ name: params.name }); if (_.isEqual(bucket, data.bucket)) { diff --git a/src/test/system_tests/ceph_s3_tests/run_ceph_nsfs_test_on_test_container.sh b/src/test/system_tests/ceph_s3_tests/run_ceph_nsfs_test_on_test_container.sh index ec502899d9..65f6d93641 100644 --- a/src/test/system_tests/ceph_s3_tests/run_ceph_nsfs_test_on_test_container.sh +++ b/src/test/system_tests/ceph_s3_tests/run_ceph_nsfs_test_on_test_container.sh @@ -30,7 +30,7 @@ export FS_ROOT_2=/tmp/nsfs_root2/ # 1. Create configuration directory # 2. Create config.json file mkdir -p ${CONFIG_DIR} -config='{"ALLOW_HTTP":true}' +config='{"ALLOW_HTTP":true, "ENDPOINT_FORKS":2}' echo "$config" > ${CONFIG_DIR}/config.json # 1. Create root directory for bucket creation diff --git a/src/test/unit_tests/nc_coretest.js b/src/test/unit_tests/nc_coretest.js index 6261d407f6..0194419ffe 100644 --- a/src/test/unit_tests/nc_coretest.js +++ b/src/test/unit_tests/nc_coretest.js @@ -41,6 +41,7 @@ dbg.set_module_level(dbg_level, 'core'); const P = require('../../util/promise'); let _setup = false; +let _nsfs_process = false; const SYSTEM = NC_CORETEST; const EMAIL = NC_CORETEST; @@ -58,13 +59,17 @@ const NC_CORETEST_CONFIG_FS = new ConfigFS(NC_CORETEST_CONFIG_DIR_PATH); const nsrs_to_root_paths = {}; let nsfs_process; +let current_setup_options = {}; /** * setup will setup nc coretest - * @param {object} options + * currently the setup_options we use are for the nsfs process: fork and debug + * @param {object} setup_options */ -function setup(options = {}) { +function setup(setup_options = {}) { + console.log(`in setup - variables values: _setup ${_setup} _nsfs_process ${_nsfs_process}`); + current_setup_options = setup_options; if (_setup) return; _setup = true; @@ -76,30 +81,9 @@ function setup(options = {}) { await config_dir_setup(); await admin_account_creation(); await first_bucket_creation(); - - await announce('start nsfs script'); - const logStream = fs.createWriteStream('src/logfile.txt', { flags: 'a' }); - - nsfs_process = child_process.spawn('node', ['src/cmd/nsfs.js'], { - detached: true - }); - nsfs_process.stdout.pipe(logStream); - nsfs_process.stderr.pipe(logStream); - - - nsfs_process.on('exit', (code, signal) => { - dbg.error(`nsfs.js exited code=${code}, signal=${signal}`); - logStream.end(); - }); - - nsfs_process.on('error', err => { - dbg.error(`nsfs.js exited with error`, err); - logStream.end(); - }); + await start_nsfs_process(setup_options); // TODO - run health - // wait 2 seconds before announcing nc coretes is ready - await P.delay(2000); await announce(`nc coretest ready... (took ${((Date.now() - start) / 1000).toFixed(1)} sec)`); }); @@ -107,8 +91,7 @@ function setup(options = {}) { mocha.after('nc-coretest-after', async function() { this.timeout(60000); // eslint-disable-line no-invalid-this try { - await announce('stop nsfs script'); - if (nsfs_process) nsfs_process.kill('SIGKILL'); + await stop_nsfs_process(); await config_dir_teardown(); await announce('nc coretest done ...'); } catch (err) { @@ -117,6 +100,71 @@ function setup(options = {}) { }); } +/** + * start_nsfs_process starts the NSFS process and attach the logs in a file + * the setup_options we support: + * 1. forks (number of processes to run) + * 2. debug (for logs printing level) + * @param {object} setup_options + */ +async function start_nsfs_process(setup_options) { + console.log(`in start_nsfs_process - variables values: _setup ${_setup} _nsfs_process ${_nsfs_process}`); + const { forks, debug } = setup_options; + console.log(`setup_options: forks ${forks} debug ${debug}`); + if (_nsfs_process) return; + await announce('start nsfs script'); + const logStream = fs.createWriteStream('nsfs_integration_test_log.txt', { flags: 'a' }); + + const arguments_for_command = ['src/cmd/nsfs.js']; + if (forks) { + arguments_for_command.push('--forks'); + arguments_for_command.push(`${forks}`); + } + if (debug) { + arguments_for_command.push('--debug'); + arguments_for_command.push(`${debug}`); + } + nsfs_process = child_process.spawn('node', arguments_for_command, { + detached: true + }); + + _nsfs_process = true; + + nsfs_process.stdout.pipe(logStream); + nsfs_process.stderr.pipe(logStream); + + + nsfs_process.on('exit', (code, signal) => { + dbg.error(`nsfs.js exited code=${code}, signal=${signal}`); + logStream.end(); + }); + + nsfs_process.on('error', err => { + dbg.error(`nsfs.js exited with error`, err); + logStream.end(); + }); + // wait for the process to be ready (else would see ECONNREFUSED issue) + await P.delay(5000); +} + +/** + * stop_nsfs_process stops the NSFS + */ +async function stop_nsfs_process() { + console.log(`in stop_nsfs_process - variables values: _setup ${_setup} _nsfs_process ${_nsfs_process}`); + _nsfs_process = false; + await announce('stop nsfs script'); + if (nsfs_process) nsfs_process.kill('SIGKILL'); +} + +/** + * get_current_setup_options returns the current_setup_options + * currently the setup_options we use are for the nsfs process: fork and debug + */ +function get_current_setup_options() { + return current_setup_options; +} + /** * announce will pretty print msg * @param {string} msg @@ -472,6 +520,9 @@ const rpc_cli_funcs_to_manage_nsfs_cli_cmds = { }; exports.setup = setup; +exports.get_current_setup_options = get_current_setup_options; +exports.stop_nsfs_process = stop_nsfs_process; +exports.start_nsfs_process = start_nsfs_process; exports.no_setup = _.noop; exports.log = log; exports.SYSTEM = SYSTEM; diff --git a/src/test/unit_tests/nc_index.js b/src/test/unit_tests/nc_index.js index 145120e3b8..5497e05b9b 100644 --- a/src/test/unit_tests/nc_index.js +++ b/src/test/unit_tests/nc_index.js @@ -13,7 +13,7 @@ require('./test_nb_native_fs'); require('./test_nc_nsfs_cli'); require('./test_nc_nsfs_health'); require('./test_nsfs_access'); -require('./test_bucketspace'); +require('./test_nsfs_integration'); require('./test_bucketspace_fs'); require('./test_nsfs_glacier_backend'); require('./test_s3_bucket_policy'); @@ -21,6 +21,9 @@ require('./test_nsfs_versioning'); require('./test_bucketspace_versioning'); require('./test_nc_bucket_logging'); +// running with a couple of forks - please notice and add only relevant tests here +require('./test_nc_with_a_couple_of_forks.js'); // please notice that we use a different setup + // TODO: uncomment when supported //require('./test_s3_ops'); //require('./test_s3_list_objects'); diff --git a/src/test/unit_tests/sudo_index.js b/src/test/unit_tests/sudo_index.js index 77b10c6120..fa3017e028 100644 --- a/src/test/unit_tests/sudo_index.js +++ b/src/test/unit_tests/sudo_index.js @@ -16,7 +16,7 @@ if (test_utils.invalid_nsfs_root_permissions()) { // // CORE require('./test_nsfs_access'); -require('./test_bucketspace'); +require('./test_nsfs_integration'); require('./test_bucketspace_versioning'); require('./test_bucketspace_fs'); require('./test_nsfs_versioning'); diff --git a/src/test/unit_tests/test_nc_with_a_couple_of_forks.js b/src/test/unit_tests/test_nc_with_a_couple_of_forks.js new file mode 100644 index 0000000000..caebdfab03 --- /dev/null +++ b/src/test/unit_tests/test_nc_with_a_couple_of_forks.js @@ -0,0 +1,77 @@ +/* Copyright (C) 2024 NooBaa */ +'use strict'; + +const path = require('path'); +const _ = require('lodash'); +const P = require('../../util/promise'); +const mocha = require('mocha'); +const assert = require('assert'); +const fs_utils = require('../../util/fs_utils'); +const { TMP_PATH, generate_nsfs_account, get_new_buckets_path_by_test_env, generate_s3_client, get_coretest_path } = require('../system_tests/test_utils'); + +const coretest_path = get_coretest_path(); +const coretest = require(coretest_path); +const setup_options = { forks: 2, debug: 5 }; +coretest.setup(setup_options); +const { rpc_client, EMAIL, get_current_setup_options, stop_nsfs_process, start_nsfs_process } = coretest; + +const CORETEST_ENDPOINT = coretest.get_http_address(); + +const config_root = path.join(TMP_PATH, 'test_nc_cache_stat'); +// on NC - new_buckets_path is full absolute path +// on Containerized - new_buckets_path is the directory +const new_bucket_path_param = get_new_buckets_path_by_test_env(config_root, '/'); + +const bucket_name = 'bucket1'; +let s3_admin; + +mocha.describe('operations with a couple of forks', async function() { + this.timeout(50000); // eslint-disable-line no-invalid-this + const bucket_path = path.join(TMP_PATH, 'bucket1'); + + mocha.before(async () => { + // we want to make sure that we run this test with a couple of forks (by default setup it is 0) + const current_setup_options = get_current_setup_options(); + const same_setup = _.isEqual(current_setup_options, setup_options); + if (!same_setup) { + console.log('current_setup_options', current_setup_options, 'same_setup', same_setup); + await stop_nsfs_process(); + await start_nsfs_process(setup_options); + } + + await fs_utils.create_fresh_path(bucket_path); + await fs_utils.file_must_exist(bucket_path); + const res = await generate_nsfs_account(rpc_client, EMAIL, new_bucket_path_param, { admin: true }); + s3_admin = generate_s3_client(res.access_key, res.secret_key, CORETEST_ENDPOINT); + }); + + mocha.after(async () => { + fs_utils.folder_delete(`${config_root}`); + }); + + mocha.it('versioning change with a couple of forks', async function() { + const res_bucket_create = await s3_admin.createBucket({ Bucket: bucket_name }); + assert.equal(res_bucket_create.$metadata.httpStatusCode, 200); + + // 1 request for bucket versioning enabled (can be handled by any of the forks) + await s3_admin.putBucketVersioning({ Bucket: bucket_name, VersioningConfiguration: { MFADelete: 'Disabled', Status: 'Enabled' } }); + + // a couple of requests for get bucket versioning + const failed_operations = []; + const successful_operations = []; + const num_of_concurrency = 10; + for (let i = 0; i < num_of_concurrency; i++) { + s3_admin.getBucketVersioning({ Bucket: bucket_name }) + .catch(err => failed_operations.push(err)) + .then(res => successful_operations.push(res)); + } + await P.delay(2000); + assert.equal(successful_operations.length, num_of_concurrency); + assert.equal(failed_operations.length, 0); + const all_res_with_enabled = successful_operations.every(res => res.Status === 'Enabled'); + assert.ok(all_res_with_enabled); + + // cleanup + await s3_admin.deleteBucket({ Bucket: bucket_name }); + }); +}); diff --git a/src/test/unit_tests/test_bucketspace.js b/src/test/unit_tests/test_nsfs_integration.js similarity index 99% rename from src/test/unit_tests/test_bucketspace.js rename to src/test/unit_tests/test_nsfs_integration.js index 79ac766f6d..13ac710a38 100644 --- a/src/test/unit_tests/test_bucketspace.js +++ b/src/test/unit_tests/test_nsfs_integration.js @@ -19,7 +19,7 @@ const fetch = require('node-fetch'); const P = require('../../util/promise'); const cloud_utils = require('../../util/cloud_utils'); const SensitiveString = require('../../util/sensitive_string'); -const S3Error = require('../../../src/endpoint/s3/s3_errors').S3Error; +const S3Error = require('../../endpoint/s3/s3_errors').S3Error; const test_utils = require('../system_tests/test_utils'); const { stat, open } = require('../../util/nb_native')().fs; const { get_process_fs_context } = require('../../util/native_fs_utils');