Skip to content

Commit

Permalink
Merge pull request #7962 from romayalon/romy-5.15-backports
Browse files Browse the repository at this point in the history
5.15 | Backports
  • Loading branch information
nimrod-becker authored Apr 10, 2024
2 parents fa5b1e5 + 7b93ca2 commit a1b4b8a
Show file tree
Hide file tree
Showing 22 changed files with 742 additions and 397 deletions.
2 changes: 1 addition & 1 deletion .nvmrc
Original file line number Diff line number Diff line change
@@ -1 +1 @@
20.9.0
20.11.0
35 changes: 34 additions & 1 deletion config.js
Original file line number Diff line number Diff line change
Expand Up @@ -607,6 +607,11 @@ config.WORM_ENABLED = false;
config.NAMESPACE_MONITOR_ENABLED = true;
config.NAMESPACE_MONITOR_DELAY = 3 * 60 * 1000;

//////////////////////////////////
// NAMESPACE MODE CALC //
//////////////////////////////////

config.NS_MAX_ALLOWED_IO_ERRORS = 9;

////////////////////////////////
// BUCKET REPLICATOR //
Expand Down Expand Up @@ -727,7 +732,7 @@ config.NSFS_EXIT_EVENTS_TIME_FRAME_MIN = 24 * 60; // per day
config.NSFS_MAX_EXIT_EVENTS_PER_TIME_FRAME = 10; // allow max 10 failed forks per day

config.NSFS_GLACIER_LOGS_DIR = '/var/run/noobaa-nsfs/wal';
config.NSFS_GLACIER_LOGS_MAX_INTERVAL = 15 * 60 * 1000;
config.NSFS_GLACIER_LOGS_POLL_INTERVAL = 10 * 1000;

// NSFS_GLACIER_ENABLED can override internal autodetection and will force
// the use of restore for all objects.
Expand Down Expand Up @@ -756,6 +761,34 @@ config.NSFS_GLACIER_EXPIRY_TZ = 'LOCAL';
// Format must be HH:MM:SS
config.NSFS_GLACIER_EXPIRY_TIME_OF_DAY = '00:00:00';

config.NSFS_STATFS_CACHE_SIZE = 10000;
config.NSFS_STATFS_CACHE_EXPIRY_MS = 1 * 1000;

// NSFS_LOW_FREE_SPACE_CHECK_ENABLED if set to true will use the below mentioned
// thresholds to determine if the writes should be denied even
// before we hit ENOSPC more filesystem.
config.NSFS_LOW_FREE_SPACE_CHECK_ENABLED = false;

// NSFS_LOW_FREE_SPACE_MB controls that how much space in
// bytes does NooBaa consider to be too low to perform `PUT` operations
// safely.
config.NSFS_LOW_FREE_SPACE_MB = 8 * 1024;

// NSFS_LOW_FREE_SPACE_PERCENT controls how much space in terms of
// percentage does NooBaa consider to be too low to perform `PUT`
// operations safely.
config.NSFS_LOW_FREE_SPACE_PERCENT = 0.08;

// NSFS_LOW_FREE_SPACE_MB_UNLEASH controls how much much space in bytes
// does NooBaa consider to be enough to perform `PUT` operations
// safely.
config.NSFS_LOW_FREE_SPACE_MB_UNLEASH = 10 * 1024;

// NSFS_LOW_FREE_SPACE_PERCENT_UNLEASH controls how much much space in of
// percentage does NooBaa consider to be enough to perform `PUT`
// operations safely.
config.NSFS_LOW_FREE_SPACE_PERCENT_UNLEASH = 0.10;

////////////////////////////
// NSFS NON CONTAINERIZED //
////////////////////////////
Expand Down
20 changes: 20 additions & 0 deletions src/api/pool_api.js
Original file line number Diff line number Diff line change
Expand Up @@ -377,6 +377,26 @@ module.exports = {
}
},

update_last_monitoring: {
doc: 'Update last namespace monitoring',
method: 'POST',
params: {
type: 'object',
required: ['namespace_resource_id', 'last_monitoring'],
properties: {
last_monitoring: {
idate: true,
},
namespace_resource_id: {
objectid: true
},
}
},
auth: {
system: 'admin'
}
},

scale_hosts_pool: {
doc: 'Change the pool\'s underlaying host count',
method: 'POST',
Expand Down
29 changes: 11 additions & 18 deletions src/endpoint/s3/ops/s3_get_bucket_policy_status.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
'use strict';

const S3Error = require('../s3_errors').S3Error;
const _ = require('lodash');

/**
* https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html
Expand All @@ -16,27 +17,19 @@ async function get_bucket_policy_status(req) {
// TODO: implemented according to current implementation of authorize_request_policy. should update when authorize_request_policy changed
// full public policy defintion: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status
function _is_policy_public(policy) {
for (const statement of policy.statement) {
let principal_wildcard = false;
let resource_wildcard = false;
if (statement.effect === 'deny') {
return false;
}
for (const principal of statement.principal) {
if (principal.unwrap() === '*') {
principal_wildcard = true;
}
}
for (const resource of statement.resource) {
if ((/[?*]/).test(resource)) {
resource_wildcard = true;
for (const statement of policy.Statement) {
if (statement.Effect === 'Allow' && statement.Principal) {
const statement_principal = statement.Principal.AWS ? statement.Principal.AWS : statement.Principal;
//although redundant, its technicly possible to have both wildcard and specific principal.
//in this case the wildcard principal override the specific one
for (const principal of _.flatten([statement_principal])) {
if (principal.unwrap() === '*') {
return true;
}
}
}
if (!principal_wildcard || !resource_wildcard) {
return false;
}
}
return true;
return false;
}

module.exports = {
Expand Down
2 changes: 1 addition & 1 deletion src/endpoint/s3/ops/s3_get_object_uploadId.js
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ async function get_object_uploadId(req) {
UploadId: req.query.uploadId,
Initiator: s3_utils.DEFAULT_S3_USER,
Owner: s3_utils.DEFAULT_S3_USER,
StorageClass: s3_utils.STORAGE_CLASS_STANDARD,
StorageClass: s3_utils.parse_storage_class(reply.storage_class),
MaxParts: max,
PartNumberMarker: num_marker,
IsTruncated: reply.is_truncated,
Expand Down
5 changes: 0 additions & 5 deletions src/endpoint/s3/s3_errors.js
Original file line number Diff line number Diff line change
Expand Up @@ -546,11 +546,6 @@ S3Error.InvalidObjectStorageClass = Object.freeze({
message: 'Restore is not allowed for the object\'s current storage class.',
http_code: 403,
});
S3Error.StorageClassNotImplemented = Object.freeze({
code: 'NotImplemented',
message: 'This storage class is not implemented.',
http_code: 501,
});

S3Error.RPC_ERRORS_TO_S3 = Object.freeze({
UNAUTHORIZED: S3Error.AccessDenied,
Expand Down
2 changes: 1 addition & 1 deletion src/endpoint/s3/s3_rest.js
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ async function authorize_request_policy(req) {

const is_anon = !(auth_token && auth_token.access_key);
if (is_anon) {
authorize_anonymous_access(s3_policy, method, arn_path, req);
await authorize_anonymous_access(s3_policy, method, arn_path, req);
return;
}

Expand Down
39 changes: 22 additions & 17 deletions src/manage_nsfs/manage_nsfs_glacier.js
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,7 @@ async function process_migrations() {
* @param {import('../sdk/nsfs_glacier_backend/backend').GlacierBackend} backend
*/
async function run_glacier_migrations(fs_context, backend) {
// This WAL is getting opened only so that we can process all the prcess WAL entries
const wal = new PersistentLogger(
config.NSFS_GLACIER_LOGS_DIR,
GlacierBackend.MIGRATE_WAL_NAME,
{ disable_rotate: true, locking: 'EXCLUSIVE' },
);

await wal.process_inactive(async file => backend.migrate(fs_context, file));
await run_glacier_operation(fs_context, GlacierBackend.MIGRATE_WAL_NAME, backend.migrate.bind(backend));
}

async function process_restores() {
Expand All @@ -69,14 +62,7 @@ async function process_restores() {
* @param {import('../sdk/nsfs_glacier_backend/backend').GlacierBackend} backend
*/
async function run_glacier_restore(fs_context, backend) {
// This WAL is getting opened only so that we can process all the prcess WAL entries
const wal = new PersistentLogger(
config.NSFS_GLACIER_LOGS_DIR,
GlacierBackend.RESTORE_WAL_NAME,
{ disable_rotate: true, locking: 'EXCLUSIVE' },
);

await wal.process_inactive(async file => backend.restore(fs_context, file));
await run_glacier_operation(fs_context, GlacierBackend.RESTORE_WAL_NAME, backend.restore.bind(backend));
}

async function process_expiry() {
Expand Down Expand Up @@ -106,7 +92,7 @@ async function time_exceeded(fs_context, interval, timestamp_file) {

if (lastrun.getTime() + interval < Date.now()) return true;
} catch (error) {
console.error('failed to read last run timestamp:', error);
console.error('failed to read last run timestamp:', error, 'timestamp_file:', timestamp_file);
if (error.code === 'ENOENT') return true;

throw error;
Expand All @@ -129,6 +115,25 @@ async function record_current_time(fs_context, timestamp_file) {
);
}

/**
* run_glacier_operations takes a log_namespace and a callback and executes the
* callback on each log file in that namespace. It will also generate a failure
* log file and persist the failures in that log file.
* @param {nb.NativeFSContext} fs_context
* @param {string} log_namespace
* @param {Function} cb
*/
async function run_glacier_operation(fs_context, log_namespace, cb) {
const log = new PersistentLogger(config.NSFS_GLACIER_LOGS_DIR, log_namespace, { locking: 'EXCLUSIVE' });
try {
await log.process(async (entry, failure_recorder) => cb(fs_context, entry, failure_recorder));
} catch (error) {
console.error('failed to process log in namespace:', log_namespace);
} finally {
await log.close();
}
}

/**
* lock_and_run acquires a flock and calls the given callback after
* acquiring the lock
Expand Down
3 changes: 2 additions & 1 deletion src/sdk/bucketspace_fs.js
Original file line number Diff line number Diff line change
Expand Up @@ -568,12 +568,13 @@ class BucketSpaceFS extends BucketSpaceSimpleFS {
const bucket_config_path = this._get_bucket_config_path(name);
const { data } = await nb_native().fs.readFile(this.fs_context, bucket_config_path);
const bucket = JSON.parse(data.toString());
await bucket_policy_utils.validate_s3_policy(policy, bucket.name, async principal => this._get_account_by_name(principal));
bucket.s3_policy = policy;
const bucket_to_validate = _.omitBy(bucket, _.isUndefined);
dbg.log2("put_bucket_policy: bucket properties before validate_bucket_schema",
bucket_to_validate);
nsfs_schema_utils.validate_bucket_schema(bucket_to_validate);
await bucket_policy_utils.validate_s3_policy(bucket.s3_policy, bucket.name, async principal =>
this._get_account_by_name(principal));
const update_bucket = JSON.stringify(bucket);
await nb_native().fs.writeFile(
this.fs_context,
Expand Down
Loading

0 comments on commit a1b4b8a

Please sign in to comment.