Skip to content

Commit

Permalink
Merge pull request #7966 from romayalon/romy-5.15-backports
Browse files Browse the repository at this point in the history
5.15 | Backports
  • Loading branch information
nimrod-becker authored Apr 10, 2024
2 parents d4917eb + e7931fe commit 7add82e
Show file tree
Hide file tree
Showing 17 changed files with 736 additions and 47 deletions.
25 changes: 22 additions & 3 deletions config.js
Original file line number Diff line number Diff line change
Expand Up @@ -751,9 +751,19 @@ config.NSFS_GLACIER_MIGRATE_INTERVAL = 15 * 60 * 1000;
// of `manage_nsfs glacier restore`
config.NSFS_GLACIER_RESTORE_INTERVAL = 15 * 60 * 1000;

// NSFS_GLACIER_EXPIRY_INTERVAL indicates the interval between runs
// of `manage_nsfs glacier expiry`
config.NSFS_GLACIER_EXPIRY_INTERVAL = 12 * 60 * 60 * 1000;
// NSFS_GLACIER_EXPIRY_RUN_TIME must be of the format hh:mm which specifies
// when NooBaa should allow running glacier expiry process
// NOTE: This will also be in the same timezone as specified in
// NSFS_GLACIER_EXPIRY_TZ
config.NSFS_GLACIER_EXPIRY_RUN_TIME = '03:00';

// NSFS_GLACIER_EXPIRY_RUN_TIME_TOLERANCE_MINS configures the delay
// tolerance in minutes.
//
// eg. If the expiry run time is set to 03:00 and the tolerance is
// set to be 2 mins then the expiry can trigger till 03:02 (unless
// already triggered between 03:00 - 03:02
config.NSFS_GLACIER_EXPIRY_RUN_DELAY_LIMIT_MINS = 2 * 60;

/** @type {'UTC' | 'LOCAL'} */
config.NSFS_GLACIER_EXPIRY_TZ = 'LOCAL';
Expand Down Expand Up @@ -812,6 +822,15 @@ config.NSFS_WHITELIST = [];
config.NSFS_HEALTH_ENDPOINT_RETRY_COUNT = 3;
config.NSFS_HEALTH_ENDPOINT_RETRY_DELAY = 10;


/** @type {'file' | 'executable'} */
config.NC_MASTER_KEYS_STORE_TYPE = 'file';
// unless override in config.json, the default will be the config_dir/master_keys.json
config.NC_MASTER_KEYS_FILE_LOCATION = '';
config.NC_MASTER_KEYS_GET_EXECUTABLE = '';
config.NC_MASTER_KEYS_PUT_EXECUTABLE = '';
config.NC_MASTER_KEYS_MANAGER_REFRESH_THRESHOLD = 5 * 60 * 1000; // 5 minutes

//Quota
config.QUOTA_LOW_THRESHOLD = 80;
config.QUOTA_MAX_OBJECTS = Number.MAX_SAFE_INTEGER;
Expand Down
68 changes: 68 additions & 0 deletions docs/dev_guide/NonContainerizedDeveloperCustomizations.md
Original file line number Diff line number Diff line change
Expand Up @@ -337,6 +337,74 @@ Example:
3. systemctl restart noobaa_nsfs
```

## 19. Set Master Keys Store type-
**Description -** This flag will set the type of the master keys store used by NooBaa.

**Configuration Key -** NC_MASTER_KEYS_STORE_TYPE

**Type -** string

**Default -** 'file'
**Steps -**
```
1. Open /path/to/config_dir/config.json file.
2. Set the config key -
Example:
"NC_MASTER_KEYS_STORE_TYPE": 'executable'
3. systemctl restart noobaa_nsfs
```

## 20. Set Master Keys File Location -
**Description -** This flag will set the location of the master keys file used by NooBaa.

**Configuration Key -** NC_MASTER_KEYS_FILE_LOCATION

**Type -** string

**Default -** '/etc/noobaa.conf.d/master_keys.json'
**Steps -**
```
1. Open /path/to/config_dir/config.json file.
2. Set the config key -
Example:
"NC_MASTER_KEYS_FILE_LOCATION": '/private/tmp/master_keys.json'
3. systemctl restart noobaa_nsfs
```

## 21. Set Master Keys GET executable script -
**Description -** This flag will set the location of the executable script for reading the master keys file used by NooBa.

**Configuration Key -** NC_MASTER_KEYS_GET_EXECUTABLE

**Type -** string

**Default -** undefined
**Steps -**
```
1. Open /path/to/config_dir/config.json file.
2. Set the config key -
Example:
"NC_MASTER_KEYS_GET_EXECUTABLE": '/private/tmp/get_master_keys.sh'
3. systemctl restart noobaa_nsfs
```

## 22. Set Master Keys PUT executable script -
**Description -** This flag will set the location of the executable script for updating the master keys file used by NooBa.

**Configuration Key -** NC_MASTER_KEYS_PUT_EXECUTABLE

**Type -** string

**Default -** undefined
**Steps -**
```
1. Open /path/to/config_dir/config.json file.
2. Set the config key -
Example:
"NC_MASTER_KEYS_PUT_EXECUTABLE": '/private/tmp/put_master_keys.sh'
3. systemctl restart noobaa_nsfs
```

## Config.json example
```
> cat /path/to/config_dir/config.json
Expand Down
40 changes: 27 additions & 13 deletions src/cmd/manage_nsfs.js
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ const { print_usage } = require('../manage_nsfs/manage_nsfs_help_utils');
const { TYPES, ACTIONS, VALID_OPTIONS, OPTION_TYPE, FROM_FILE, BOOLEAN_STRING_VALUES,
LIST_ACCOUNT_FILTERS, LIST_BUCKET_FILTERS, GLACIER_ACTIONS } = require('../manage_nsfs/manage_nsfs_constants');
const NoobaaEvent = require('../manage_nsfs/manage_nsfs_events_utils').NoobaaEvent;
const nc_mkm = require('../manage_nsfs/nc_master_key_manager').get_instance();

function throw_cli_error(error_code, detail, event_arg) {
const error_event = NSFS_CLI_ERROR_EVENT_MAP[error_code.code];
Expand Down Expand Up @@ -200,7 +201,7 @@ async function add_bucket(data) {
const fs_context = native_fs_utils.get_process_fs_context(config_root_backend);
const bucket_conf_path = get_config_file_path(buckets_dir_path, data.name);
const exists = await native_fs_utils.is_path_exists(fs_context, bucket_conf_path);
if (exists) throw_cli_error(ManageCLIError.BucketAlreadyExists, data.name, {bucket: data.name});
if (exists) throw_cli_error(ManageCLIError.BucketAlreadyExists, data.name, { bucket: data.name });
data._id = mongo_utils.mongoObjectId();
data.owner_account = account_id;
const data_json = JSON.stringify(data);
Expand All @@ -209,7 +210,7 @@ async function add_bucket(data) {
// for validating against the schema we need an object, hence we parse it back to object
nsfs_schema_utils.validate_bucket_schema(JSON.parse(data_json));
await native_fs_utils.create_config_file(fs_context, buckets_dir_path, bucket_conf_path, data_json);
write_stdout_response(ManageCLIResponse.BucketCreated, data_json, {bucket: data.name});
write_stdout_response(ManageCLIResponse.BucketCreated, data_json, { bucket: data.name });
}

/** verify_bucket_owner will check if the bucket_owner has an account
Expand Down Expand Up @@ -419,6 +420,7 @@ async function fetch_existing_account_data(target) {
get_config_file_path(accounts_dir_path, target.name) :
get_symlink_config_file_path(access_keys_dir_path, target.access_keys[0].access_key);
source = await get_config_data(account_path, true);
source.access_keys = await nc_mkm.decrypt_access_keys(source);
} catch (err) {
dbg.log1('NSFS Manage command: Could not find account', target, err);
if (err.code === 'ENOENT') {
Expand Down Expand Up @@ -452,17 +454,20 @@ async function add_account(data) {
throw_cli_error(err_code, event_arg, {account: event_arg});
}
data._id = mongo_utils.mongoObjectId();
data = JSON.stringify(data);
const encrypted_account = await nc_mkm.encrypt_access_keys(data);
data.master_key_id = encrypted_account.master_key_id;
const encrypted_data = JSON.stringify(encrypted_account);
// We take an object that was stringify
// (it unwraps ths sensitive strings, creation_date to string and removes undefined parameters)
// for validating against the schema we need an object, hence we parse it back to object
nsfs_schema_utils.validate_account_schema(JSON.parse(data));
await native_fs_utils.create_config_file(fs_context, accounts_dir_path, account_config_path, data);
nsfs_schema_utils.validate_account_schema(JSON.parse(encrypted_data));
await native_fs_utils.create_config_file(fs_context, accounts_dir_path, account_config_path, encrypted_data);
await native_fs_utils._create_path(access_keys_dir_path, fs_context, config.BASE_MODE_CONFIG_DIR);
await nb_native().fs.symlink(fs_context, account_config_relative_path, account_config_access_key_path);
write_stdout_response(ManageCLIResponse.AccountCreated, data, {account: event_arg});
write_stdout_response(ManageCLIResponse.AccountCreated, data, { account: event_arg });
}


async function update_account(data) {
await validate_account_args(data, ACTIONS.UPDATE);

Expand All @@ -475,12 +480,14 @@ async function update_account(data) {

if (!update_name && !update_access_key) {
const account_config_path = get_config_file_path(accounts_dir_path, data.name);
data = JSON.stringify(data);
const encrypted_account = await nc_mkm.encrypt_access_keys(data);
data.master_key_id = encrypted_account.master_key_id;
const encrypted_data = JSON.stringify(encrypted_account);
// We take an object that was stringify
// (it unwraps ths sensitive strings, creation_date to string and removes undefined parameters)
// for validating against the schema we need an object, hence we parse it back to object
nsfs_schema_utils.validate_account_schema(JSON.parse(data));
await native_fs_utils.update_config_file(fs_context, accounts_dir_path, account_config_path, data);
nsfs_schema_utils.validate_account_schema(JSON.parse(encrypted_data));
await native_fs_utils.update_config_file(fs_context, accounts_dir_path, account_config_path, encrypted_data);
write_stdout_response(ManageCLIResponse.AccountUpdated, data);
return;
}
Expand All @@ -499,16 +506,21 @@ async function update_account(data) {
const err_code = name_exists ? ManageCLIError.AccountNameAlreadyExists : ManageCLIError.AccountAccessKeyAlreadyExists;
throw_cli_error(err_code);
}
data = JSON.stringify(_.omit(data, ['new_name', 'new_access_key']));
data = _.omit(data, ['new_name', 'new_access_key']);
const encrypted_account = await nc_mkm.encrypt_access_keys(data);
data.master_key_id = encrypted_account.master_key_id;
const encrypted_data = JSON.stringify(encrypted_account);
data = JSON.stringify(data);

// We take an object that was stringify
// (it unwraps ths sensitive strings, creation_date to string and removes undefined parameters)
// for validating against the schema we need an object, hence we parse it back to object
nsfs_schema_utils.validate_account_schema(JSON.parse(data));
nsfs_schema_utils.validate_account_schema(JSON.parse(encrypted_data));
if (update_name) {
await native_fs_utils.create_config_file(fs_context, accounts_dir_path, new_account_config_path, data);
await native_fs_utils.create_config_file(fs_context, accounts_dir_path, new_account_config_path, encrypted_data);
await native_fs_utils.delete_config_file(fs_context, accounts_dir_path, cur_account_config_path);
} else if (update_access_key) {
await native_fs_utils.update_config_file(fs_context, accounts_dir_path, cur_account_config_path, data);
await native_fs_utils.update_config_file(fs_context, accounts_dir_path, cur_account_config_path, encrypted_data);
}
// TODO: safe_unlink can be better but the current impl causing ELOOP - Too many levels of symbolic links
// need to find a better way for atomic unlinking of symbolic links
Expand Down Expand Up @@ -560,6 +572,7 @@ async function get_account_status(data, show_secrets) {
get_symlink_config_file_path(access_keys_dir_path, data.access_keys[0].access_key) :
get_config_file_path(accounts_dir_path, data.name);
const config_data = await get_config_data(account_path, show_secrets);
if (config_data.access_keys) config_data.access_keys = await nc_mkm.decrypt_access_keys(config_data);
write_stdout_response(ManageCLIResponse.AccountStatus, config_data);
} catch (err) {
if (_.isUndefined(data.name)) {
Expand Down Expand Up @@ -656,6 +669,7 @@ async function list_config_files(type, config_path, wide, show_secrets, filters)
if (wide || should_filter) {
const full_path = path.join(config_path, entry.name);
const data = await get_config_data(full_path, show_secrets || should_filter);
if (data.access_keys) data.access_keys = await nc_mkm.decrypt_access_keys(data);
if (should_filter && !filter_list_item(type, data, filters)) return undefined;
// remove secrets on !show_secrets && should filter
return wide ? _.omit(data, show_secrets ? [] : ['access_keys']) : { name: entry.name.slice(0, entry.name.indexOf('.json')) };
Expand Down
86 changes: 82 additions & 4 deletions src/manage_nsfs/manage_nsfs_glacier.js
Original file line number Diff line number Diff line change
Expand Up @@ -69,12 +69,65 @@ async function process_expiry() {
const fs_context = native_fs_utils.get_process_fs_context();

await lock_and_run(fs_context, SCAN_LOCK, async () => {
if (!(await time_exceeded(fs_context, config.NSFS_GLACIER_EXPIRY_INTERVAL, GlacierBackend.EXPIRY_TIMESTAMP_FILE))) return;
const backend = getGlacierBackend();
if (
await backend.low_free_space() ||
await is_desired_time(
fs_context,
new Date(),
config.NSFS_GLACIER_EXPIRY_RUN_TIME,
config.NSFS_GLACIER_EXPIRY_RUN_DELAY_LIMIT_MINS,
GlacierBackend.EXPIRY_TIMESTAMP_FILE,
)
) {
await backend.expiry(fs_context);
await record_current_time(fs_context, GlacierBackend.EXPIRY_TIMESTAMP_FILE);
}
});
}

/**
* is_desired_time returns true if the given time matches with
* the desired time or if
* @param {nb.NativeFSContext} fs_context
* @param {Date} current
* @param {string} desire time in format 'hh:mm'
* @param {number} delay_limit_mins
* @param {string} timestamp_file
* @returns {Promise<boolean>}
*/
async function is_desired_time(fs_context, current, desire, delay_limit_mins, timestamp_file) {
const [desired_hour, desired_min] = desire.split(':').map(Number);
if (
isNaN(desired_hour) ||
isNaN(desired_min) ||
(desired_hour < 0 || desired_hour >= 24) ||
(desired_min < 0 || desired_min >= 60)
) {
throw new Error('invalid desired_time - must be hh:mm');
}

await getGlacierBackend().expiry(fs_context);
await record_current_time(fs_context, GlacierBackend.EXPIRY_TIMESTAMP_FILE);
});
const min_time = get_tz_date(desired_hour, desired_min, 0, config.NSFS_GLACIER_EXPIRY_TZ);
const max_time = get_tz_date(desired_hour, desired_min + delay_limit_mins, 0, config.NSFS_GLACIER_EXPIRY_TZ);

if (current >= min_time && current <= max_time) {
try {
const { data } = await nb_native().fs.readFile(fs_context, path.join(config.NSFS_GLACIER_LOGS_DIR, timestamp_file));
const lastrun = new Date(data.toString());

// Last run should NOT be in this window
if (lastrun >= min_time && lastrun <= max_time) return false;
} catch (error) {
if (error.code === 'ENOENT') return true;
console.error('failed to read last run timestamp:', error, 'timestamp_file:', timestamp_file);

throw error;
}

return true;
}

return false;
}

/**
Expand Down Expand Up @@ -134,6 +187,31 @@ async function run_glacier_operation(fs_context, log_namespace, cb) {
}
}

/**
* @param {number} hours
* @param {number} mins
* @param {number} secs
* @param {'UTC' | 'LOCAL'} tz
* @returns {Date}
*/
function get_tz_date(hours, mins, secs, tz) {
const date = new Date();

if (tz === 'UTC') {
date.setUTCHours(hours);
date.setUTCMinutes(hours);
date.setUTCSeconds(secs);
date.setUTCMilliseconds(0);
} else {
date.setHours(hours);
date.setMinutes(mins);
date.setSeconds(secs);
date.setMilliseconds(0);
}

return date;
}

/**
* lock_and_run acquires a flock and calls the given callback after
* acquiring the lock
Expand Down
Loading

0 comments on commit 7add82e

Please sign in to comment.