From 75e08fc9458994997e2c9210b11bcce1a7a8894e Mon Sep 17 00:00:00 2001 From: Utkarsh Srivastava Date: Mon, 13 May 2024 17:16:38 +0530 Subject: [PATCH 1/3] filter out failed recalls Signed-off-by: Utkarsh Srivastava update comments Signed-off-by: Utkarsh Srivastava fix linting issue Signed-off-by: Utkarsh Srivastava revamp restore code and add tests Signed-off-by: Utkarsh Srivastava add more tests Signed-off-by: Utkarsh Srivastava add cleanup to the test Signed-off-by: Utkarsh Srivastava address self review comments Signed-off-by: Utkarsh Srivastava fix partial failure of finalize_restore Signed-off-by: Utkarsh Srivastava improving task_show output parsing Signed-off-by: Utkarsh Srivastava fix parsing edge case Signed-off-by: Utkarsh Srivastava fix formatting Signed-off-by: Utkarsh Srivastava add error logging Signed-off-by: Utkarsh Srivastava (cherry picked from commit b1c3570b18a0e50e7b4bab4ce2345e199529eb8d) --- config.js | 5 + src/sdk/nsfs_glacier_backend/backend.js | 8 + src/sdk/nsfs_glacier_backend/tapecloud.js | 394 +++++++++++------- .../unit_tests/test_nsfs_glacier_backend.js | 172 +++++++- 4 files changed, 421 insertions(+), 158 deletions(-) diff --git a/config.js b/config.js index db4ddae570..6531ee9e40 100644 --- a/config.js +++ b/config.js @@ -755,6 +755,11 @@ config.NSFS_GLACIER_BACKEND = 'TAPECLOUD'; // TAPECLOUD Glacier backend specific configs config.NSFS_GLACIER_TAPECLOUD_BIN_DIR = '/opt/ibm/tapecloud/bin'; +// If set to true will disable cleanup of the task show output +// Should be used only for debugging or else will keep filling +// up the disk space. +config.NSFS_GLACIER_TAPECLOUD_PRESERVE_TASK_SHOW_OUTPUT = false; + // NSFS_GLACIER_MIGRATE_INTERVAL indicates the interval between runs // of `manage_nsfs glacier migrate` config.NSFS_GLACIER_MIGRATE_INTERVAL = 15 * 60 * 1000; diff --git a/src/sdk/nsfs_glacier_backend/backend.js b/src/sdk/nsfs_glacier_backend/backend.js index ff7658d410..4a778f4b67 100644 --- a/src/sdk/nsfs_glacier_backend/backend.js +++ b/src/sdk/nsfs_glacier_backend/backend.js @@ -263,6 +263,14 @@ class GlacierBackend { const restore_status = GlacierBackend.get_restore_status(stat.xattr, new Date(), file); if (!restore_status) return false; + // We don't check for pre-existing expiry here, it can happen in 2 cases + // 1. A restore is already going and someone somehow initiated this second + // call. In that case we might see partial extended attributes such that + // both request as well a future expiry time exists. + // 2. A restore request was partially processed and then failed before + // removing the request extended attribute. In such case, NSFS would still + // report the object restore status to be `ONGOING` and we are going + // to allow a retry of that entry. return restore_status.state === GlacierBackend.RESTORE_STATUS_ONGOING; } } diff --git a/src/sdk/nsfs_glacier_backend/tapecloud.js b/src/sdk/nsfs_glacier_backend/tapecloud.js index e9d2956437..a69b695cad 100644 --- a/src/sdk/nsfs_glacier_backend/tapecloud.js +++ b/src/sdk/nsfs_glacier_backend/tapecloud.js @@ -16,136 +16,177 @@ const dbg = require('../../util/debug_module')(__filename); const ERROR_DUPLICATE_TASK = "GLESM431E"; -const MIGRATE_SCRIPT = 'migrate'; -const RECALL_SCRIPT = 'recall'; -const TASK_SHOW_SCRIPT = 'task_show'; -const PROCESS_EXPIRED_SCRIPT = 'process_expired'; -const LOW_FREE_SPACE_SCRIPT = 'low_free_space'; function get_bin_path(bin_name) { return path.join(config.NSFS_GLACIER_TAPECLOUD_BIN_DIR, bin_name); } -/** - * @param {*} task_id - * @param {(entry: string) => Promise} recorder - */ -async function record_failed_tasks(task_id, recorder) { - const fs_context = get_process_fs_context(); - const tmp = path.join(os.tmpdir(), `eeadm_task_out_${Date.now()}`); - - let temp_fh = null; - let reader = null; - try { - temp_fh = await nb_native().fs.open(fs_context, tmp, 'rw'); - - const proc = spawn(get_bin_path(TASK_SHOW_SCRIPT), [task_id], { - stdio: ['pipe', temp_fh.fd, temp_fh.fd], - }); - - const [errcode] = await events.once(proc, 'exit'); - if (errcode) { - throw new Error('process exited with non-zero exit code:', errcode); - } +class TapeCloudUtils { + static MIGRATE_SCRIPT = 'migrate'; + static RECALL_SCRIPT = 'recall'; + static TASK_SHOW_SCRIPT = 'task_show'; + static PROCESS_EXPIRED_SCRIPT = 'process_expired'; + static LOW_FREE_SPACE_SCRIPT = 'low_free_space'; - reader = new NewlineReader(fs_context, tmp); - await reader.forEach(async line => { - if (!line.startsWith("Fail")) return; + /** + * @param {*} task_id + * @param {(entry: string) => Promise} failure_recorder + * @param {(entry: string) => Promise} [success_recorder] + */ + static async record_task_status(task_id, failure_recorder, success_recorder) { + const fs_context = get_process_fs_context(); + const tmp = path.join(os.tmpdir(), `eeadm_task_out_${Date.now()}`); + + let temp_fh = null; + let reader = null; + try { + temp_fh = await nb_native().fs.open(fs_context, tmp, 'rw'); - const parsed = line.split(/\s+/); - if (parsed.length !== 6) { - throw new Error('failed to parse task show'); - } + const proc = spawn(get_bin_path(TapeCloudUtils.TASK_SHOW_SCRIPT), [task_id], { + stdio: ['pipe', temp_fh.fd, temp_fh.fd], + }); - if (parsed[1] !== ERROR_DUPLICATE_TASK) { - // Column 5 is the filename (refer tapecloud [eeadm] manual) - await recorder(parsed[5]); + const [errcode] = await events.once(proc, 'exit'); + if (errcode) { + throw new Error('process exited with non-zero exit code:', errcode); } - return true; - }); - } finally { - if (temp_fh) { - await temp_fh.close(fs_context); - await nb_native().fs.unlink(fs_context, tmp); - } + reader = new NewlineReader(fs_context, tmp); + await reader.forEach(async line => { + const failure_case = line.startsWith("Fail"); + const success_case = line.startsWith("Success"); + + if (!failure_case && !success_case) return; + + // Success recorder is optional - early exit + // if we don't have a recorder to record success + if (success_case && !success_recorder) return; - if (reader) { - await reader.close(); + // (refer tapecloud [eeadm] manual) + const [metadata, filename] = line.split(' -- '); + + if (!filename) { + dbg.error('invalid task show output - ', 'line:', line); + return; + } + + const parsed_meta = metadata.split(/\s+/); + if (parsed_meta.length !== 4) { + dbg.error('failed to parse "task show" output -', 'line:', line); + return; + } + + if (failure_case) { + const failure_code = parsed_meta[1]; + if (failure_code !== ERROR_DUPLICATE_TASK) { + dbg.warn('failed to migrate', filename, 'will record in failure/retry log'); + await failure_recorder(filename); + } + } + + if (success_case) { + await success_recorder(filename); + } + + return true; + }); + } finally { + if (temp_fh) { + await temp_fh.close(fs_context); + + // Preserve the tmp file + if (config.NSFS_GLACIER_TAPECLOUD_PRESERVE_TASK_SHOW_OUTPUT) { + dbg.log0("preserved TASK_SHOW_SCRIPT output at - " + tmp); + } else { + await nb_native().fs.unlink(fs_context, tmp); + } + } + + if (reader) { + await reader.close(); + } } } -} -/** - * tapecloud_failure_handler takes the error and runs task_show on the task - * ID to identify the failed entries and record them to the recorder - * @param {*} error - * @param {(entry: string) => Promise} recorder - */ -async function tapecloud_failure_handler(error, recorder) { - const { stdout } = error; - - // Find the line in the stdout which has the line 'task ID is, ' and extract id - const match = stdout.match(/task ID is (\d+)/); - if (match.length !== 2) { - throw error; - } + /** + * tapecloud_failure_handler takes the error and runs task_show on the task + * ID to identify the failed entries and record them to the recorder + * @param {*} error + * @param {(entry: string) => Promise} failure_recorder + * @param {(entry: string) => Promise} [success_recorder] + */ + static async tapecloud_failure_handler(error, failure_recorder, success_recorder) { + const { stdout } = error; - const task_id = match[1]; + // Find the line in the stdout which has the line 'task ID is, ' and extract id + const match = stdout.match(/task ID is (\d+)/); + if (match.length !== 2) { + throw error; + } - // Fetch task status and see what failed - await record_failed_tasks(task_id, recorder); -} + const task_id = match[1]; -/** - * migrate takes name of a file which contains the list - * of the files to be migrated to tape. - * - * The file should be in the following format: - * - * The function returns the names of the files which failed - * to migrate. - * @param {string} file filename - * @param {(entry: string) => Promise} recorder - */ -async function migrate(file, recorder) { - try { - dbg.log1("Starting migration for file", file); - const out = await exec(`${get_bin_path(MIGRATE_SCRIPT)} ${file}`, { return_stdout: true }); - dbg.log4("migrate finished with:", out); - dbg.log1("Finished migration for file", file); - } catch (error) { - await tapecloud_failure_handler(error, recorder); + // Fetch task status and see what failed + await TapeCloudUtils.record_task_status(task_id, failure_recorder, success_recorder); } -} -/** - * recall takes name of a file which contains the list - * of the files to be recall to tape. - * - * The file should be in the following format: - * - * The function returns the names of the files which failed - * to recall. - * @param {string} file filename - * @param {(entry: string) => Promise} recorder - */ -async function recall(file, recorder) { - try { - dbg.log1("Starting recall for file", file); - const out = await exec(`${get_bin_path(RECALL_SCRIPT)} ${file}`, { return_stdout: true }); - dbg.log4("recall finished with:", out); - dbg.log1("Finished recall for file", file); - } catch (error) { - await tapecloud_failure_handler(error, recorder); + /** + * migrate takes name of a file which contains the list + * of the files to be migrated to tape. + * + * The file should be in the following format: + * + * The function returns the names of the files which failed + * to migrate. + * @param {string} file filename + * @param {(entry: string) => Promise} failure_recorder + * @returns {Promise} Indicates success if true + */ + static async migrate(file, failure_recorder) { + try { + dbg.log1("Starting migration for file", file); + const out = await exec(`${get_bin_path(TapeCloudUtils.MIGRATE_SCRIPT)} ${file}`, { return_stdout: true }); + dbg.log4("migrate finished with:", out); + dbg.log1("Finished migration for file", file); + return true; + } catch (error) { + await TapeCloudUtils.tapecloud_failure_handler(error, failure_recorder); + return false; + } + } + + /** + * recall takes name of a file which contains the list + * of the files to be recall to tape. + * + * The file should be in the following format: + * + * The function returns the names of the files which failed + * to recall. + * @param {string} file filename + * @param {(entry: string) => Promise} failure_recorder + * @param {(entry: string) => Promise} success_recorder + * @returns {Promise} Indicates success if true + */ + static async recall(file, failure_recorder, success_recorder) { + try { + dbg.log1("Starting recall for file", file); + const out = await exec(`${get_bin_path(TapeCloudUtils.RECALL_SCRIPT)} ${file}`, { return_stdout: true }); + dbg.log4("recall finished with:", out); + dbg.log1("Finished recall for file", file); + return true; + } catch (error) { + await TapeCloudUtils.tapecloud_failure_handler(error, failure_recorder, success_recorder); + return false; + } } -} -async function process_expired() { - dbg.log1("Starting process_expired"); - const out = await exec(`${get_bin_path(PROCESS_EXPIRED_SCRIPT)}`, { return_stdout: true }); - dbg.log4("process_expired finished with:", out); - dbg.log1("Finished process_expired"); + static async process_expired() { + dbg.log1("Starting process_expired"); + const out = await exec(`${get_bin_path(TapeCloudUtils.PROCESS_EXPIRED_SCRIPT)}`, { return_stdout: true }); + dbg.log4("process_expired finished with:", out); + dbg.log1("Finished process_expired"); + } } class TapeCloudGlacierBackend extends GlacierBackend { @@ -228,38 +269,27 @@ class TapeCloudGlacierBackend extends GlacierBackend { } }, async batch => { - await this._recall(batch, failure_recorder); - - const batch_file = new LogFile(fs_context, batch); - await batch_file.collect_and_process(async (entry_path, batch_recorder) => { - const entry = new NewlineReaderEntry(fs_context, entry_path); - let fh = null; - try { - fh = await entry.open(); - - const stat = await fh.stat(fs_context, { - xattr_get_keys: [ - GlacierBackend.XATTR_RESTORE_REQUEST, - ] - }); - - const days = Number(stat.xattr[GlacierBackend.XATTR_RESTORE_REQUEST]); - const expires_on = GlacierBackend.generate_expiry( - new Date(), - days, - config.NSFS_GLACIER_EXPIRY_TIME_OF_DAY, - config.NSFS_GLACIER_EXPIRY_TZ, - ); - - await fh.replacexattr(fs_context, { - [GlacierBackend.XATTR_RESTORE_EXPIRY]: expires_on.toISOString(), - }, GlacierBackend.XATTR_RESTORE_REQUEST); - } catch (error) { - dbg.error(`failed to process ${entry.path}`, error); - } finally { - if (fh) await fh.close(fs_context); + const success = await this._recall( + batch, + async entry_path => { + dbg.log2('TapeCloudGlacierBackend.restore.partial_failure - entry:', entry_path); + await failure_recorder(entry_path); + }, + async entry_path => { + dbg.log2('TapeCloudGlacierBackend.restore.partial_success - entry:', entry_path); + await this._finalize_restore(fs_context, entry_path); } - }); + ); + + // We will iterate through the entire log file iff and we get a success message from + // the recall call. + if (success) { + const batch_file = new LogFile(fs_context, batch); + await batch_file.collect_and_process(async (entry_path, batch_recorder) => { + dbg.log2('TapeCloudGlacierBackend.restore.batch - entry:', entry_path); + await this._finalize_restore(fs_context, entry_path); + }); + } }); return true; } catch (error) { @@ -277,7 +307,7 @@ class TapeCloudGlacierBackend extends GlacierBackend { } async low_free_space() { - const result = await exec(get_bin_path(LOW_FREE_SPACE_SCRIPT), { return_stdout: true }); + const result = await exec(get_bin_path(TapeCloudUtils.LOW_FREE_SPACE_SCRIPT), { return_stdout: true }); return result.toLowerCase().trim() === 'true'; } @@ -285,34 +315,94 @@ class TapeCloudGlacierBackend extends GlacierBackend { /** * _migrate should perform migration - * + * * NOTE: Must be overwritten for tests - * @param {string} file - * @param {(entry: string) => Promise} recorder + * @param {string} file + * @param {(entry: string) => Promise} recorder + * @returns {Promise} */ async _migrate(file, recorder) { - return migrate(file, recorder); + return TapeCloudUtils.migrate(file, recorder); } /** * _recall should perform recall - * + * * NOTE: Must be overwritten for tests - * @param {string} file - * @param {(entry: string) => Promise} recorder + * @param {string} file + * @param {(entry: string) => Promise} failure_recorder + * @param {(entry: string) => Promise} success_recorder + * @returns {Promise} */ - async _recall(file, recorder) { - return recall(file, recorder); + async _recall(file, failure_recorder, success_recorder) { + return TapeCloudUtils.recall(file, failure_recorder, success_recorder); } /** * _process_expired should process expired objects - * + * * NOTE: Must be overwritten for tests */ async _process_expired() { - return process_expired(); + return TapeCloudUtils.process_expired(); + } + + /** + * finalizes the restore by setting the required EAs + * + * @param {nb.NativeFSContext} fs_context + * @param {string} entry_path + */ + async _finalize_restore(fs_context, entry_path) { + dbg.log2('TapeCloudGlacierBackend.restore._finalize_restore - entry:', entry_path); + + const entry = new NewlineReaderEntry(fs_context, entry_path); + let fh = null; + try { + fh = await entry.open(); + + const stat = await fh.stat(fs_context, { + xattr_get_keys: [ + GlacierBackend.XATTR_RESTORE_REQUEST, + ] + }); + + const days = Number(stat.xattr[GlacierBackend.XATTR_RESTORE_REQUEST]); + + // In case of invocation on the same file multiple times, + // this xattr will not be present hence `days` will be NaN + if (isNaN(days)) { + dbg.warn("TapeCloudGlacierBackend._finalize_restore: days is NaN - skipping restore for", entry_path); + return; + } + + const expires_on = GlacierBackend.generate_expiry( + new Date(), + days, + config.NSFS_GLACIER_EXPIRY_TIME_OF_DAY, + config.NSFS_GLACIER_EXPIRY_TZ, + ); + + // First set the expiry so that we don't lose the number of days in + // case of a partial failure. `replacexattr` first clears the xattrs + // and then proceeds to set the xattr which makes it highly prone to + // partial failure such that we lose the attribute forever and + // consequently never really process the restore request (until it + // is submitted again). + + await fh.replacexattr(fs_context, { + [GlacierBackend.XATTR_RESTORE_EXPIRY]: expires_on.toISOString(), + }); + + await fh.replacexattr(fs_context, undefined, GlacierBackend.XATTR_RESTORE_REQUEST); + } catch (error) { + dbg.error(`failed to process ${entry.path}`, error); + throw error; + } finally { + if (fh) await fh.close(fs_context); + } } } exports.TapeCloudGlacierBackend = TapeCloudGlacierBackend; +exports.TapeCloudUtils = TapeCloudUtils; diff --git a/src/test/unit_tests/test_nsfs_glacier_backend.js b/src/test/unit_tests/test_nsfs_glacier_backend.js index 2a061f8427..fe079bf754 100644 --- a/src/test/unit_tests/test_nsfs_glacier_backend.js +++ b/src/test/unit_tests/test_nsfs_glacier_backend.js @@ -1,7 +1,7 @@ /* Copyright (C) 2024 NooBaa */ 'use strict'; -const fs = require('fs'); +const { promises: fs } = require('fs'); const util = require('util'); const path = require('path'); const mocha = require('mocha'); @@ -14,11 +14,11 @@ const s3_utils = require('../../endpoint/s3/s3_utils'); const buffer_utils = require('../../util/buffer_utils'); const endpoint_stats_collector = require('../../sdk/endpoint_stats_collector'); const { NewlineReader } = require('../../util/file_reader'); -const { TapeCloudGlacierBackend } = require('../../sdk/nsfs_glacier_backend/tapecloud'); +const { TapeCloudGlacierBackend, TapeCloudUtils } = require('../../sdk/nsfs_glacier_backend/tapecloud'); const { PersistentLogger } = require('../../util/persistent_logger'); const { GlacierBackend } = require('../../sdk/nsfs_glacier_backend/backend'); +const nb_native = require('../../util/nb_native'); -const mkdtemp = util.promisify(fs.mkdtemp); const inspect = (x, max_arr = 5) => util.inspect(x, { colors: true, depth: null, maxArrayLength: max_arr }); function make_dummy_object_sdk() { @@ -58,7 +58,7 @@ mocha.describe('nsfs_glacier', async () => { glacier_ns._is_storage_class_supported = async () => true; mocha.before(async () => { - config.NSFS_GLACIER_LOGS_DIR = await mkdtemp(path.join(os.tmpdir(), 'nsfs-wal-')); + config.NSFS_GLACIER_LOGS_DIR = await fs.mkdtemp(path.join(os.tmpdir(), 'nsfs-wal-')); // Replace the logger by custom one @@ -90,8 +90,8 @@ mocha.describe('nsfs_glacier', async () => { const backend = new TapeCloudGlacierBackend(); // Patch backend for test - backend._migrate = async () => { /**noop */ }; - backend._recall = async () => { /**noop */ }; + backend._migrate = async () => true; + backend._recall = async () => true; backend._process_expired = async () => { /**noop*/ }; mocha.it('upload to GLACIER should work', async () => { @@ -168,6 +168,87 @@ mocha.describe('nsfs_glacier', async () => { assert(now <= md.restore_status.expiry_time.getTime()); }); + mocha.it('restore-object should not restore failed item', async () => { + const now = Date.now(); + const data = crypto.randomBytes(100); + const failed_restore_key = `${restore_key}_failured`; + const success_restore_key = `${restore_key}_success`; + + const failed_params = { + bucket: upload_bkt, + key: failed_restore_key, + storage_class: s3_utils.STORAGE_CLASS_GLACIER, + xattr, + days: 1, + source_stream: buffer_utils.buffer_to_read_stream(data) + }; + + const success_params = { + bucket: upload_bkt, + key: success_restore_key, + storage_class: s3_utils.STORAGE_CLASS_GLACIER, + xattr, + days: 1, + source_stream: buffer_utils.buffer_to_read_stream(data) + }; + + const failed_file_path = glacier_ns._get_file_path(failed_params); + const success_file_path = glacier_ns._get_file_path(success_params); + + const failure_backend = new TapeCloudGlacierBackend(); + failure_backend._migrate = async () => true; + failure_backend._process_expired = async () => { /**noop*/ }; + failure_backend._recall = async (_file, failure_recorder, success_recorder) => { + // This unintentionally also replicates duplicate entries in WAL + await failure_recorder(failed_file_path); + + // This unintentionally also replicates duplicate entries in WAL + await success_recorder(success_file_path); + + return false; + }; + + const upload_res_1 = await glacier_ns.upload_object(failed_params, dummy_object_sdk); + console.log('upload_object response', inspect(upload_res_1)); + + const upload_res_2 = await glacier_ns.upload_object(success_params, dummy_object_sdk); + console.log('upload_object response', inspect(upload_res_2)); + + const restore_res_1 = await glacier_ns.restore_object(failed_params, dummy_object_sdk); + assert(restore_res_1); + + const restore_res_2 = await glacier_ns.restore_object(success_params, dummy_object_sdk); + assert(restore_res_2); + + const fs_context = glacier_ns.prepare_fs_context(dummy_object_sdk); + + // Issue restore + await NamespaceFS.restore_wal._process(async file => { + await failure_backend.restore(fs_context, file, async () => { /*noop*/ }); + + // Don't delete the file + return false; + }); + + // Ensure success object is restored + const success_md = await glacier_ns.read_object_md(success_params, dummy_object_sdk); + + assert(!success_md.restore_status.ongoing); + + const expected_expiry = GlacierBackend.generate_expiry(new Date(), success_params.days, '', config.NSFS_GLACIER_EXPIRY_TZ); + assert(expected_expiry.getTime() >= success_md.restore_status.expiry_time.getTime()); + assert(now <= success_md.restore_status.expiry_time.getTime()); + + // Ensure failed object is NOT restored + const failure_stats = await nb_native().fs.stat( + fs_context, + failed_file_path, + ); + + assert(!failure_stats.xattr[GlacierBackend.XATTR_RESTORE_EXPIRY] || failure_stats.xattr[GlacierBackend.XATTR_RESTORE_EXPIRY] === ''); + assert(failure_stats.xattr[GlacierBackend.XATTR_RESTORE_REQUEST]); + }); + mocha.it('generate_expiry should round up the expiry', () => { const now = new Date(); const midnight = new Date(); @@ -227,4 +308,83 @@ mocha.describe('nsfs_glacier', async () => { assert(exp6.getUTCSeconds() === 0); }); }); + + mocha.describe('tapecloud_utils', () => { + const MOCK_TASK_SHOW_DATA = `Random irrelevant data to +Result Failure Code Failed time Node -- File name +Fail GLESM451W 2023/11/08T02:38:47 1 -- /ibm/gpfs/NoobaaTest/file.aaai +Fail GLESM451W 2023/11/08T02:38:47 1 -- /ibm/gpfs/NoobaaTest/file.aaaj +Fail GLESL401E 2023/11/08T02:38:44 1 -- /ibm/gpfs/NoobaaTest/noobaadata +Success - - - -- /ibm/gpfs/NoobaaTest/testingdata/file.aaaa +Success - - - -- /ibm/gpfs/NoobaaTest/testingdata/file.aaab +Success - - - -- /ibm/gpfs/NoobaaTest/testingdata/file.aaaj`; + + const MOCK_TASK_SHOW_SCRIPT = `#!/bin/bash +cat < { + config.NSFS_GLACIER_TAPECLOUD_BIN_DIR = await fs.mkdtemp(tapecloud_bin_temp); + + await fs.writeFile( + path.join(config.NSFS_GLACIER_TAPECLOUD_BIN_DIR, TapeCloudUtils.TASK_SHOW_SCRIPT), + MOCK_TASK_SHOW_SCRIPT, + ); + + await fs.chmod(path.join(config.NSFS_GLACIER_TAPECLOUD_BIN_DIR, TapeCloudUtils.TASK_SHOW_SCRIPT), 0o777); + }); + + mocha.it('record_task_status', async () => { + const expected_failed_records = [ + '/ibm/gpfs/NoobaaTest/file.aaai', + '/ibm/gpfs/NoobaaTest/file.aaaj', + '/ibm/gpfs/NoobaaTest/noobaadata', + ]; + const expected_success_records = [ + '/ibm/gpfs/NoobaaTest/testingdata/file.aaaa', + '/ibm/gpfs/NoobaaTest/testingdata/file.aaab', + '/ibm/gpfs/NoobaaTest/testingdata/file.aaaj', + ]; + + const failed_records = []; + const success_records = []; + + await TapeCloudUtils.record_task_status( + 0, + async record => { + failed_records.push(record); + }, + async record => { + success_records.push(record); + }, + ); + + assert.deepStrictEqual(failed_records, expected_failed_records); + assert.deepStrictEqual(success_records, expected_success_records); + + // Clear out the arrays + failed_records.length = 0; + success_records.length = 0; + + await TapeCloudUtils.record_task_status( + 0, + async record => { + failed_records.push(record); + } + ); + + assert.deepStrictEqual(failed_records, expected_failed_records); + assert.deepStrictEqual(success_records, []); + }); + + mocha.after(async () => { + config.NSFS_GLACIER_TAPECLOUD_BIN_DIR = init_tapedir_bin; + + await fs.rm(tapecloud_bin_temp, { recursive: true, force: true }); + }); + }); }); From 6e2ba6287a29ff4e56fc2a1c9597fbf46c9a68ff Mon Sep 17 00:00:00 2001 From: Utkarsh Srivastava Date: Tue, 28 May 2024 19:04:15 +0530 Subject: [PATCH 2/3] extend Ceph commit date to an year Signed-off-by: Utkarsh Srivastava --- src/test/system_tests/ceph_s3_tests/test_ceph_s3_deploy.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/system_tests/ceph_s3_tests/test_ceph_s3_deploy.sh b/src/test/system_tests/ceph_s3_tests/test_ceph_s3_deploy.sh index bb72b00011..70125033cf 100755 --- a/src/test/system_tests/ceph_s3_tests/test_ceph_s3_deploy.sh +++ b/src/test/system_tests/ceph_s3_tests/test_ceph_s3_deploy.sh @@ -38,7 +38,7 @@ commit_epoch=$(git show -s --format=%ci ${CEPH_TESTS_VERSION} | awk '{print $1}' commit_date=$(date -d ${commit_epoch} +%s) current_date=$(date +%s) -max_days="180" +max_days="360" if [ $((current_date-commit_date)) -gt $((3600*24*${max_days})) ] then echo "ceph tests were not updated for ${max_days} days, Exiting" From c8e1c7e910ac0cad5f2eb8006c31754e96143612 Mon Sep 17 00:00:00 2001 From: Utkarsh Srivastava Date: Tue, 21 May 2024 16:16:23 +0530 Subject: [PATCH 3/3] fix broken expiry calculation test Signed-off-by: Utkarsh Srivastava fix linting issue Signed-off-by: Utkarsh Srivastava (cherry picked from commit 755da5fb9f8ec080ef0c435d4f342367c1ac70e2) --- .../unit_tests/test_nsfs_glacier_backend.js | 72 ++++++++++--------- 1 file changed, 38 insertions(+), 34 deletions(-) diff --git a/src/test/unit_tests/test_nsfs_glacier_backend.js b/src/test/unit_tests/test_nsfs_glacier_backend.js index fe079bf754..21685f039e 100644 --- a/src/test/unit_tests/test_nsfs_glacier_backend.js +++ b/src/test/unit_tests/test_nsfs_glacier_backend.js @@ -37,6 +37,35 @@ function make_dummy_object_sdk() { }; } +/** + * @param {Date} date - the date to be asserted + * @param {Date} from - the date from where the offset is to be calculated + * @param {{ day_offset: number, hour?: number, min?: number, sec?: number }} expected + * @param {'UTC' | 'LOCAL'} [tz='LOCAL'] + */ +function assert_date(date, from, expected, tz = 'LOCAL') { + const that_if_not_this = (arg1, arg2) => { + if (arg1 === undefined) return arg2; + return arg1; + }; + + if (tz === 'UTC') { + from.setUTCDate(from.getUTCDate() + expected.day_offset); + + assert(date.getUTCDate() === from.getUTCDate()); + assert(date.getUTCHours() === that_if_not_this(expected.hour, from.getUTCHours())); + assert(date.getUTCMinutes() === that_if_not_this(expected.min, from.getUTCMinutes())); + assert(date.getUTCSeconds() === that_if_not_this(expected.sec, from.getUTCSeconds())); + } else { + from.setDate(from.getDate() + expected.day_offset); + + assert(date.getDate() === from.getDate()); + assert(date.getHours() === that_if_not_this(expected.hour, from.getHours())); + assert(date.getMinutes() === that_if_not_this(expected.min, from.getMinutes())); + assert(date.getSeconds() === that_if_not_this(expected.sec, from.getSeconds())); + } +} + mocha.describe('nsfs_glacier', async () => { const src_bkt = 'src'; @@ -251,61 +280,36 @@ mocha.describe('nsfs_glacier', async () => { mocha.it('generate_expiry should round up the expiry', () => { const now = new Date(); - const midnight = new Date(); - midnight.setUTCHours(0, 0, 0, 0); + const pivot_time = new Date(now); const exp1 = GlacierBackend.generate_expiry(now, 1, '', 'UTC'); - assert(exp1.getUTCDate() === now.getUTCDate() + 1); - assert(exp1.getUTCHours() === now.getUTCHours()); - assert(exp1.getUTCMinutes() === now.getUTCMinutes()); - assert(exp1.getUTCSeconds() === now.getUTCSeconds()); + assert_date(exp1, now, { day_offset: 1 }, 'UTC'); const exp2 = GlacierBackend.generate_expiry(now, 10, '', 'UTC'); - assert(exp2.getUTCDate() === now.getUTCDate() + 10); - assert(exp2.getUTCHours() === now.getUTCHours()); - assert(exp2.getUTCMinutes() === now.getUTCMinutes()); - assert(exp2.getUTCSeconds() === now.getUTCSeconds()); - - const pivot_time = new Date(now); + assert_date(exp2, now, { day_offset: 10 }, 'UTC'); const exp3 = GlacierBackend.generate_expiry(now, 10, '02:05:00', 'UTC'); pivot_time.setUTCHours(2, 5, 0, 0); - if (now <= pivot_time) { - assert(exp3.getUTCDate() === now.getUTCDate() + 10); + assert_date(exp3, now, { day_offset: 10, hour: 2, min: 5, sec: 0 }, 'UTC'); } else { - assert(exp3.getUTCDate() === now.getUTCDate() + 10 + 1); + assert_date(exp3, now, { day_offset: 10 + 1, hour: 2, min: 5, sec: 0 }, 'UTC'); } - assert(exp3.getUTCHours() === 2); - assert(exp3.getUTCMinutes() === 5); - assert(exp3.getUTCSeconds() === 0); const exp4 = GlacierBackend.generate_expiry(now, 1, '02:05:00', 'LOCAL'); pivot_time.setHours(2, 5, 0, 0); - if (now <= pivot_time) { - assert(exp4.getDate() === now.getDate() + 1); + assert_date(exp4, now, { day_offset: 1, hour: 2, min: 5, sec: 0 }, 'LOCAL'); } else { - assert(exp4.getDate() === now.getDate() + 1 + 1); + assert_date(exp4, now, { day_offset: 1 + 1, hour: 2, min: 5, sec: 0 }, 'LOCAL'); } - assert(exp4.getHours() === 2); - assert(exp4.getMinutes() === 5); - assert(exp4.getSeconds() === 0); const exp5 = GlacierBackend.generate_expiry(now, 1, `${now.getHours()}:${now.getMinutes()}:${now.getSeconds()}`, 'LOCAL'); - - assert(exp5.getDate() === now.getDate() + 1); - assert(exp5.getHours() === now.getHours()); - assert(exp5.getMinutes() === now.getMinutes()); - assert(exp5.getSeconds() === now.getSeconds()); + assert_date(exp5, now, { day_offset: 1 }, 'LOCAL'); const some_date = new Date("2004-05-08"); const exp6 = GlacierBackend.generate_expiry(some_date, 1.5, `02:05:00`, 'UTC'); - - assert(exp6.getUTCDate() === some_date.getUTCDate() + 1 + 1); - assert(exp6.getUTCHours() === 2); - assert(exp6.getUTCMinutes() === 5); - assert(exp6.getUTCSeconds() === 0); + assert_date(exp6, some_date, { day_offset: 1 + 1, hour: 2, min: 5, sec: 0 }, 'UTC'); }); });