diff --git a/config.js b/config.js index a73f1ae314..932870f3b1 100644 --- a/config.js +++ b/config.js @@ -188,8 +188,7 @@ config.MIRROR_WRITER_EMPTY_DELAY = 30000; config.MIRROR_WRITER_MARKER_STORE_PERIOD = 10 * 60000; // store markers every 10 min config.TIER_TTF_WORKER_ENABLED = true; -config.TIER_TTF_WORKER_BATCH_SIZE = 50; -config.TIER_TTF_WORKER_BATCH_DELAY = 50; +config.TIER_TTF_WORKER_BATCH_DELAY = 500; config.TIER_TTF_WORKER_EMPTY_DELAY = 30000; config.TIER_SPILLBACK_WORKER_ENABLED = true; diff --git a/frontend/src/app/reducers/host-parts-reducer.js b/frontend/src/app/reducers/host-parts-reducer.js index c3ac2af2ee..69705adc30 100644 --- a/frontend/src/app/reducers/host-parts-reducer.js +++ b/frontend/src/app/reducers/host-parts-reducer.js @@ -82,15 +82,13 @@ function onFailFetchHostObjects(state, { payload }) { function onFetchCloudResourceObjects(state, { payload }) { return onFetchHostObjects( - state, - { payload: _resourceQueryToHostQuery(payload) } + state, { payload: _resourceQueryToHostQuery(payload) } ); } function onCompleteFetchCloudResourceObjects(state, { payload }) { return onCompleteFetchHostObjects( - state, - { + state, { payload: { query: _resourceQueryToHostQuery(payload.query), response: payload.response @@ -101,8 +99,7 @@ function onCompleteFetchCloudResourceObjects(state, { payload }) { function onFailCloudResourceObjects(state, { payload }) { return onFailFetchHostObjects( - state, - { + state, { payload: { query: _resourceQueryToHostQuery(payload.query), error: payload.error diff --git a/package-lock.json b/package-lock.json index 5d70b5d164..c1ab5c3ed6 100644 --- a/package-lock.json +++ b/package-lock.json @@ -3474,9 +3474,9 @@ } }, "lodash": { - "version": "4.17.11", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz", - "integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==" + "version": "4.17.14", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.14.tgz", + "integrity": "sha512-mmKYbW3GLuJeX+iGP+Y7Gp1AiGHGbXHCOh/jZmrawMmsE7MS4znI3RL2FsjbqOyMayHInjOeykW7PEajUk1/xw==" }, "lodash.flattendeep": { "version": "4.4.0", @@ -4037,9 +4037,9 @@ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, "multer": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/multer/-/multer-1.4.1.tgz", - "integrity": "sha512-zzOLNRxzszwd+61JFuAo0fxdQfvku12aNJgnla0AQ+hHxFmfc/B7jBVuPr5Rmvu46Jze/iJrFpSOsD7afO8SDw==", + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/multer/-/multer-1.4.2.tgz", + "integrity": "sha512-xY8pX7V+ybyUpbYMxtjM9KAiD9ixtg5/JkeKUTD6xilfDv0vzzOFcCp4Ljb1UU3tSOM3VTZtKo63OmzOrGi3Cg==", "requires": { "append-field": "^1.0.0", "busboy": "^0.2.11", @@ -5641,9 +5641,9 @@ "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" }, "ssh2": { - "version": "0.8.4", - "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-0.8.4.tgz", - "integrity": "sha512-qztb9t4b34wJSiWVpeTMVVN/5KCuBoyctBc2BcSe/Uq4NRnF0gB16Iu5p72ILhdYATcMNwB5WppzPIEs/3wB8Q==", + "version": "0.8.5", + "resolved": "https://registry.npmjs.org/ssh2/-/ssh2-0.8.5.tgz", + "integrity": "sha512-TkvzxSYYUSQ8jb//HbHnJVui4fVEW7yu/zwBxwro/QaK2EGYtwB+8gdEChwHHuj142c5+250poMC74aJiwApPw==", "requires": { "ssh2-streams": "~0.4.4" } @@ -6031,9 +6031,9 @@ "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=" }, "typescript": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.5.1.tgz", - "integrity": "sha512-64HkdiRv1yYZsSe4xC1WVgamNigVYjlssIoaH2HcZF0+ijsk5YK2g0G34w9wJkze8+5ow4STd22AynfO6ZYYLw==" + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.5.3.tgz", + "integrity": "sha512-ACzBtm/PhXBDId6a6sDJfroT2pOWt/oOnk4/dElG5G33ZL776N3Y6/6bKZJBFpd+b05F3Ct9qDjMeJmRWtE2/g==" }, "uglify-js": { "version": "3.6.0", diff --git a/package.json b/package.json index 3d742cd0d6..4189437965 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "noobaa-core", - "version": "5.0.0", + "version": "5.0.1", "private": true, "license": "SEE LICENSE IN LICENSE", "description": "", @@ -66,6 +66,8 @@ "ajv": "6.10.0", "aws-sdk": "2.488.0", "azure-storage": "2.10.3", + "babel-plugin-transform-runtime": "6.23.0", + "babel-preset-es2015": "6.24.1", "bcrypt": "3.0.6", "big-integer": "1.6.44", "bindings": "1.5.0", @@ -88,7 +90,7 @@ "ip": "1.1.5", "jsonwebtoken": "8.5.1", "linux-blockutils": "0.1.0", - "lodash": "4.17.11", + "lodash": "4.17.14", "mime": "2.4.4", "minimist": "1.2.0", "mkdirp": "0.5.1", @@ -97,7 +99,7 @@ "mongodb": "3.2.7", "mongodb-uri": "0.9.7", "morgan": "1.9.1", - "multer": "1.4.1", + "multer": "1.4.2", "nan": "2.14.0", "ncp": "2.0.0", "net-ping": "1.2.3", @@ -110,13 +112,14 @@ "request": "2.88.0", "rimraf": "2.6.3", "rotating-file-stream": "1.4.2", + "seedrandom": "3.0.1", "serve-favicon": "2.5.0", "setimmediate": "1.0.5", "sinon": "7.3.2", - "ssh2": "0.8.4", + "ssh2": "0.8.5", "ts-node": "8.3.0", "tslint": "5.16.0", - "typescript": "3.5.1", + "typescript": "3.5.3", "utf-8-validate": "5.0.2", "uuid": "3.3.2", "why-is-node-running": "2.1.0", @@ -124,10 +127,7 @@ "xml2js": "0.4.19", "yaml": "1.6.0", "yauzl": "2.10.0", - "yazl": "2.5.1", - "seedrandom": "3.0.1", - "babel-plugin-transform-runtime": "6.23.0", - "babel-preset-es2015": "6.24.1" + "yazl": "2.5.1" }, "devDependencies": { "mocha": "6.1.4", diff --git a/src/agent/block_store_services/block_store_fs.js b/src/agent/block_store_services/block_store_fs.js index d994b5cba4..bf28c2a379 100644 --- a/src/agent/block_store_services/block_store_fs.js +++ b/src/agent/block_store_services/block_store_fs.js @@ -11,7 +11,6 @@ const fs_utils = require('../../util/fs_utils'); const os_utils = require('../../util/os_utils'); const config = require('../../../config.js'); const string_utils = require('../../util/string_utils'); -const promise_utils = require('../../util/promise_utils'); const BlockStoreBase = require('./block_store_base').BlockStoreBase; const { RpcError } = require('../../rpc'); @@ -42,7 +41,6 @@ class BlockStoreFs extends BlockStoreBase { return P.map(dir_list, dir => fs_utils.create_path(dir), { concurrency: 10 }) - .then(() => this._upgrade_to_blocks_tree()) .then(() => fs.statAsync(this.usage_path) .catch(ignore_not_found) ) @@ -263,98 +261,6 @@ class BlockStoreFs extends BlockStoreBase { return path.join(this.blocks_path_root, block_dir, file); } - _upgrade_to_blocks_tree() { - return fs.statAsync(this.old_blocks_path) - .catch(err => { - // when it doesn't exist it means we don't need to upgrade - // on any other error, we ignore as we don't really expect - // any error we have anything to do about it - if (err.code !== 'ENOENT') { - dbg.log0('_upgrade_to_blocks_tree:', - 'Old blocks dir failed to stat, ignoring', - this.old_blocks_path, err); - } - }) - .then(stat => { - if (!stat) return; - if (stat.size > 64 * 1024 * 1024) { - dbg.warn('_upgrade_to_blocks_tree:', - 'Old blocks dir is huge and might crash the process', - 'spawning upgrade_agent_to_blocks_tree.py to the rescue', - this.old_blocks_path, stat); - // spawning the python script to iterativly move - // the large blocks to blocks tree. - // the output of the script will be redirected to our stdout - // though this will not be logged through our debug module, - // but still collected in diagnostics. - return promise_utils.spawn('python', [ - 'src/agent/upgrade_agent_to_blocks_tree.py', - '--wet', - this.root_path - ]); - } - if (stat.size > 8 * 1024 * 1024) { - dbg.warn('_upgrade_to_blocks_tree:', - 'Old blocks dir is pretty big and might take longer to read', - this.old_blocks_path, stat); - } - return this._move_to_blocks_tree(); - }); - } - - _move_to_blocks_tree() { - let num_move_errors = 0; - dbg.log0('_upgrade_to_blocks_tree: reading', this.old_blocks_path); - return fs.readdirAsync(this.old_blocks_path) - .then(files => { - dbg.log2('found', files.length, 'files to move. files:', files); - return P.map(files, file => { - let file_split = file.split('.'); - let new_path = this._get_block_other_path(file); - if (file_split.length === 2) { - let block_id = file_split[0]; - let suffix = file_split[1]; - if (suffix === 'data') { - new_path = this._get_block_data_path(block_id); - } else if (suffix === 'meta') { - new_path = this._get_block_meta_path(block_id); - } - } - let old_path = path.join(this.old_blocks_path, file); - return fs.renameAsync(old_path, new_path) - .catch(err => { - // we log the error here and count, but do not throw - // to try and move all the rest of the files. - num_move_errors += 1; - dbg.error('_upgrade_to_blocks_tree:', - 'failed moving', old_path, '->', new_path, err); - }); - }, { - // limit the number of promises to use for moving blocks - // - set arbitrarily for now - concurrency: 10 - }); - }) - .then(() => fs.rmdirAsync(this.old_blocks_path)) - .then(() => { - // since we also successfuly deleted the old blocks dir - // it must mean there are no leftovers in anycase. - // so even if we counted num_move_errors, it might have been - // due to parallel operations with another process, - // so we ignore it. - if (num_move_errors) { - dbg.log0('_upgrade_to_blocks_tree: finished', this.old_blocks_path, - 'eventhough we had num_move_errors', num_move_errors); - } - dbg.log0('_upgrade_to_blocks_tree: done', this.old_blocks_path); - }) - .catch(err => { - dbg.error('_upgrade_to_blocks_tree: failed', - this.old_blocks_path, 'num_move_errors', num_move_errors, - err.stack || err); - }); - } - } function ignore_not_found(err) { diff --git a/src/agent/upgrade_agent_to_blocks_tree.py b/src/agent/upgrade_agent_to_blocks_tree.py deleted file mode 100644 index a02a18bd8d..0000000000 --- a/src/agent/upgrade_agent_to_blocks_tree.py +++ /dev/null @@ -1,96 +0,0 @@ -# This script helps to upgrade agent blocks structure to new tree blocks -# The reason it's written in python is that node.js does not allow to -# readdir iteratively and fails on OutOfMemory. -# See pending issues: -# https://github.com/libuv/libuv/pull/416 -# https://github.com/nodejs/node/issues/583 - -import os, sys - -wet = False -verbose = False - -def upgrade_node_dir(node_dir): - blocks_path = os.path.join(node_dir, 'blocks') - if not os.path.isdir(blocks_path): - print '*** No blocks dir found in:', blocks_path - return - - blocks_tree_path = os.path.join(node_dir, 'blocks_tree') - print 'Creating tree dirs under:', blocks_tree_path - if not os.path.isdir(blocks_tree_path): - if wet: os.mkdir(blocks_tree_path) - blocks_tree_other_path = os.path.join(blocks_tree_path, 'other.blocks') - if not os.path.isdir(blocks_tree_other_path): - if wet: os.mkdir(blocks_tree_other_path) - for i in xrange(0, 0x1000): - tree_path = os.path.join(blocks_tree_path, ('%03x' % i) + '.blocks') - if not os.path.isdir(tree_path): - if wet: os.mkdir(tree_path) - - print 'Moving blocks to:', blocks_tree_path - count = 0 - for f in os.listdir(blocks_path): - sp = f.split('.') - tree_path = blocks_tree_other_path - try: - if len(sp) == 2 and (sp[1] == 'data' or sp[1] == 'meta'): - i = int(sp[0], 16) % 0x1000 - tree_path = os.path.join(blocks_tree_path, ('%03x' % i) + '.blocks') - except: - # When the file name is not a hex id we expect a ValueError - # and will use the tree_path of 'other.blocks' - pass - if verbose: print 'Moving block:', f, '->', tree_path - if wet: os.rename(os.path.join(blocks_path, f), os.path.join(tree_path, f)) - count += 1 - if count % 1000 == 0: - print 'Count:', count - - print 'Moved', count, 'blocks to:', blocks_path - print 'Now removing blocks dir:', blocks_path - try: - if wet: os.rmdir(blocks_path) - except Exception as ex: - print '*** Removing blocks dir failed:', blocks_path, ex - - print 'Finished with:', blocks_path - - -def upgrade_agent_storage(agent_storage): - if not os.path.isdir(agent_storage): - print '*** No agent_storage dir in:', agent_storage - return - for node in os.listdir(agent_storage): - upgrade_node_dir(os.path.join(agent_storage, node)) - print 'Done with:', agent_storage - - -def main(): - global wet - global verbose - node_dir = None - - for arg in sys.argv[1:]: - if arg == '--wet': - wet = True - elif arg in ('--verbose', '-v'): - verbose = True - elif os.path.isdir(arg): - node_dir = arg - else: - print '*** Ignoring unknown argument:', arg - - if node_dir: - upgrade_node_dir(node_dir) - return - - # this path is only here for running "manually" on linux - # NOTE: this doesn't handle windows agents - # NOTE: this doesn't handle multidrive agents - if sys.platform.startswith('linux') : - upgrade_agent_storage('/usr/local/noobaa/agent_storage/') - return - -if __name__ == "__main__": - main() diff --git a/src/api/api.js b/src/api/api.js index 6ca6c1b7aa..00b3491f88 100644 --- a/src/api/api.js +++ b/src/api/api.js @@ -29,7 +29,6 @@ api_schema.register_api(require('./tiering_policy_api')); api_schema.register_api(require('./pool_api')); api_schema.register_api(require('./cluster_server_api')); api_schema.register_api(require('./cluster_internal_api')); -api_schema.register_api(require('./upgrade_api')); api_schema.register_api(require('./server_inter_process_api')); api_schema.register_api(require('./hosted_agents_api')); api_schema.register_api(require('./frontend_notifications_api')); @@ -64,7 +63,6 @@ class APIClient { this.pool = undefined; this.cluster_server = undefined; this.cluster_internal = undefined; - this.upgrade = undefined; this.server_inter_process = undefined; this.hosted_agents = undefined; this.frontend_notifications = undefined; diff --git a/src/api/cluster_internal_api.js b/src/api/cluster_internal_api.js index d2326b9a73..844a31b387 100644 --- a/src/api/cluster_internal_api.js +++ b/src/api/cluster_internal_api.js @@ -272,15 +272,6 @@ module.exports = { } }, - // used for backward compatability with basic_server_ops upgrade during tests - // TODO: remove after we don't use versions with upgrade in cluster_server - upgrade_cluster: { - method: 'POST', - auth: { - system: 'admin', - } - }, - }, definitions: { diff --git a/src/api/stats_api.js b/src/api/stats_api.js index 8847236de4..fc9d2ceca1 100644 --- a/src/api/stats_api.js +++ b/src/api/stats_api.js @@ -522,11 +522,24 @@ module.exports = { type: 'array', items: { type: 'object', - required: ['name', 'capacity', 'reduction_ratio', 'savings', 'total_usage', 'buckets_stats', 'usage_by_project', 'usage_by_bucket_class'], + required: [ + 'name', + 'address', + 'capacity', + 'reduction_ratio', + 'savings', + 'total_usage', + 'buckets_stats', + 'usage_by_project', + 'usage_by_bucket_class', + ], properties: { name: { type: 'string' }, + address: { + type: 'string' + }, capacity: { type: 'number' }, diff --git a/src/api/upgrade_api.js b/src/api/upgrade_api.js deleted file mode 100644 index 4322e64826..0000000000 --- a/src/api/upgrade_api.js +++ /dev/null @@ -1,108 +0,0 @@ -/* Copyright (C) 2016 NooBaa */ -'use strict'; - -/** - * - * UPGRADE API - * - * UPGRADE - * - */ -module.exports = { - - id: 'upgrade_api', - - methods: { - - upgrade_cluster: { - method: 'POST', - auth: { - system: 'admin', - } - }, - - member_pre_upgrade: { - method: 'POST', - params: { - type: 'object', - required: ['filepath', 'mongo_upgrade'], - properties: { - filepath: { - type: 'string' - }, - mongo_upgrade: { - type: 'boolean' - }, - stage: { - type: 'string', - enum: [ - 'UPGRADE_STAGE', - 'UPLOAD_STAGE', - 'RETEST_STAGE' - ] - } - } - }, - auth: { - system: false, - } - }, - - cluster_pre_upgrade: { - method: 'POST', - params: { - type: 'object', - properties: { - filepath: { - type: 'string' - } - } - }, - auth: { - system: false, - } - }, - - do_upgrade: { - method: 'POST', - params: { - type: 'object', - properties: { - filepath: { - type: 'string' - } - } - }, - auth: { - system: false, - } - }, - - get_upgrade_status: { - doc: 'get the status of cluster upgrade', - method: 'GET', - reply: { - type: 'object', - required: ['in_process'], - properties: { - in_process: { - type: 'boolean' - }, - } - }, - auth: { - system: false - } - }, - - reset_upgrade_package_status: { - doc: 'reset upgrade package status on a new upload', - method: 'POST', - auth: { - system: 'admin', - } - }, - - }, - -}; diff --git a/src/deploy/NVA_build/builder.Dockerfile b/src/deploy/NVA_build/builder.Dockerfile index 5eac2345ff..a12b90c289 100644 --- a/src/deploy/NVA_build/builder.Dockerfile +++ b/src/deploy/NVA_build/builder.Dockerfile @@ -14,10 +14,10 @@ RUN yum install -y -q wget unzip which vim centos-release-scl && \ yum clean all RUN source /opt/rh/devtoolset-7/enable && \ version="1.3.0" && \ - wget -q http://www.tortall.net/projects/yasm/releases/yasm-${version}.tar.gz && \ + wget -q -O yasm-1.3.0.tar.gz https://github.com/yasm/yasm/archive/v${version}.tar.gz && \ tar -xf yasm-${version}.tar.gz && \ pushd yasm-${version} && \ - ./configure && \ + ./autogen.sh && \ make && \ make install && \ popd && \ diff --git a/src/deploy/NVA_build/mongo_setup_users.js b/src/deploy/NVA_build/mongo_setup_users.js deleted file mode 100644 index a8d5763d2e..0000000000 --- a/src/deploy/NVA_build/mongo_setup_users.js +++ /dev/null @@ -1,45 +0,0 @@ -/* Copyright (C) 2016 NooBaa */ -/* eslint-env mongo */ -'use strict'; - -print('\nChecking mongodb users ...'); -var nbcoreDb = db.getSiblingDB('nbcore'); -var pwd = 'roonoobaa'; - -// try to authenticate with nbadmin. if succesful nothing to do -var res = db.auth('nbadmin', pwd); -if (res !== 1) { - print('\nusers are not set. creating users ...'); - var adminUser = { - user: 'nbadmin', - pwd: pwd, - roles: [{ - role: "root", - db: "admin" - }] - }; - db.createUser(adminUser); - db.auth('nbadmin', pwd); - var nbcoreUser = { - user: 'nbsrv', - pwd: pwd, - roles: [{ - role: "readWrite", - db: "nbcore" - }] - }; - nbcoreDb.createUser(nbcoreUser); - - //Temporary until we will handle admin correctly for both databases - var coretestDb = db.getSiblingDB('coretest'); - var coretestDbUser = { - user: 'nbsrv', - pwd: pwd, - roles: [{ - role: "root", - db: "admin" - }] - }; - coretestDb.createUser(coretestDbUser); - -} diff --git a/src/deploy/NVA_build/named.conf b/src/deploy/NVA_build/named.conf deleted file mode 100644 index b994beca38..0000000000 --- a/src/deploy/NVA_build/named.conf +++ /dev/null @@ -1,59 +0,0 @@ -// -// named.conf -// -// Provided by Red Hat bind package to configure the ISC BIND named(8) DNS -// server as a caching only nameserver (as a localhost DNS resolver only). -// -// See /usr/share/doc/bind*/sample/ for example named configuration files. -// -// See the BIND Administrators Reference Manual (ARM) for details about the -// configuration located in /usr/share/doc/bind-{version}/Bv9ARM.html - -options { - listen-on port 53 { 127.0.0.1; }; - listen-on-v6 port 53 { ::1; }; - directory "/var/named"; - dump-file "/var/named/data/cache_dump.db"; - statistics-file "/var/named/data/named_stats.txt"; - memstatistics-file "/var/named/data/named_mem_stats.txt"; - allow-query { localhost; }; - allow-query-cache { localhost; }; - /* - - If you are building an AUTHORITATIVE DNS server, do NOT enable recursion. - - If you are building a RECURSIVE (caching) DNS server, you need to enable - recursion. - - If your recursive DNS server has a public IP address, you MUST enable access - control to limit queries to your legitimate users. Failing to do so will - cause your server to become part of large scale DNS amplification - attacks. Implementing BCP38 within your network would greatly - reduce such attack surface - */ - recursion yes; - include "/etc/noobaa_configured_dns.conf"; - dnssec-enable yes; - // Danny - I removed dnssec-validation since it produced dns errors when there is a clock skew - dnssec-validation no; - - /* Path to ISC DLV key */ - bindkeys-file "/etc/named.iscdlv.key"; - - managed-keys-directory "/var/named/dynamic"; - - pid-file "/run/named/named.pid"; - session-keyfile "/run/named/session.key"; -}; - -logging { - channel default_debug { - file "data/named.run"; - severity dynamic; - }; -}; - -zone "." IN { - type hint; - file "named.ca"; -}; - -include "/etc/named.rfc1912.zones"; -include "/etc/named.root.key"; diff --git a/src/deploy/NVA_build/noobaa_core.yaml b/src/deploy/NVA_build/noobaa_core.yaml index 5ac6c96b55..614acf21eb 100644 --- a/src/deploy/NVA_build/noobaa_core.yaml +++ b/src/deploy/NVA_build/noobaa_core.yaml @@ -128,7 +128,7 @@ spec: tcpSocket: port: 6001 timeoutSeconds: 5 - image: noobaa/noobaa-core:5.0.0 + image: noobaa/noobaa-core:5.0.1 imagePullPolicy: IfNotPresent resources: # https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ @@ -213,7 +213,7 @@ spec: name: mongo-datadir initContainers: - name: init-mongo - image: noobaa/noobaa-core:5.0.0 + image: noobaa/noobaa-core:5.0.1 # command: ['/noobaa_init_files/kube_pv_chown', 'server'] command: ['/noobaa_init_files/noobaa_init.sh', 'init_mongo'] volumeMounts: diff --git a/src/deploy/NVA_build/noobaa_deploy_k8s.sh b/src/deploy/NVA_build/noobaa_deploy_k8s.sh index 0f09105022..0fc4a49040 100755 --- a/src/deploy/NVA_build/noobaa_deploy_k8s.sh +++ b/src/deploy/NVA_build/noobaa_deploy_k8s.sh @@ -8,7 +8,7 @@ EMAIL="admin@noobaa.io" PASSWD="" SYS_NAME=noobaa NAMESPACE=$(kubectl config get-contexts | grep "\*" | awk '{print $5}') -NOOBAA_CORE_YAML=https://raw.githubusercontent.com/noobaa/noobaa-core/4.0/src/deploy/NVA_build/noobaa_core.yaml +NOOBAA_CORE_YAML=https://raw.githubusercontent.com/noobaa/noobaa-core/5.0/src/deploy/NVA_build/noobaa_core.yaml CREDS_SECRET_NAME=noobaa-create-sys-creds NOOBAA_SECRETS_NAME=noobaa-secrets ACCESS_KEY="" diff --git a/src/deploy/NVA_build/two_step_upgrade_checkups_spawn.sh b/src/deploy/NVA_build/two_step_upgrade_checkups_spawn.sh deleted file mode 100644 index 362a6929d7..0000000000 --- a/src/deploy/NVA_build/two_step_upgrade_checkups_spawn.sh +++ /dev/null @@ -1,8 +0,0 @@ -EXTRACTION_PATH="/tmp/test/" -NEW_TMP_ROOT="/tmp/test/noobaa-core/"; -NEW_UPGRADE_UTILS="/tmp/test/noobaa-core/src/upgrade/upgrade_utils.js --new_pre_upgrade"; -NODEVER=$(cat /tmp/test/noobaa-core/.nvmrc) -mkdir -p /tmp/v${NODEVER} -cp -f ${NEW_TMP_ROOT}build/public/node-v${NODEVER}-linux-x64.tar.xz /tmp/ -tar -xJf /tmp/node-v${NODEVER}-linux-x64.tar.xz -C /tmp/v${NODEVER} --strip-components 1 -/tmp/v${NODEVER}/bin/node ${NEW_UPGRADE_UTILS} \ No newline at end of file diff --git a/src/deploy/NVA_build/upgrade.sh b/src/deploy/NVA_build/upgrade.sh deleted file mode 100755 index b4ae29f4bb..0000000000 --- a/src/deploy/NVA_build/upgrade.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -# We should not support upgrade from 1.* version to 2.* -# Since 2.* versions don't use the upgrade.sh we can feel comfortable to throw here -exit 1 diff --git a/src/deploy/mongo_upgrade/mongo_upgrade_mark_completed.js b/src/deploy/mongo_upgrade/mongo_upgrade_mark_completed.js deleted file mode 100644 index 836c5d94b7..0000000000 --- a/src/deploy/mongo_upgrade/mongo_upgrade_mark_completed.js +++ /dev/null @@ -1,57 +0,0 @@ -/* Copyright (C) 2016 NooBaa */ -/* eslint-env mongo */ -/* global setVerboseShell */ -'use strict'; - -// the following params are set from outside the script -// using mongo --eval 'var param_ip="..."' and we only declare them here for completeness -var param_secret; -var version; -// var param_bcrypt_secret; -// var param_client_subject; - -update_version_change(); -mongo_upgrade_mark_completed(); -mongo_flush_changes(); - -function mongo_flush_changes() { - print('\nmongo_flush_changes start...'); - const admin_db = db.getSiblingDB('admin'); - admin_db.runCommand({ fsync: 1 }); - print('\nmongo_flush_changes end...'); -} - -function update_version_change() { - print('\nMONGO UPDATE VERSION CHANGE - START ...'); - setVerboseShell(true); - db.system_history.insert({ - time_stamp: new Date(), - history_type: 'VERSION', - version_snapshot: version || 'Unknown' - }); - print('\nMONGO UPDATE VERSION CHANGE - DONE.'); -} - -function mongo_upgrade_mark_completed() { - print('\nMONGO UPGRADE MARK COMPLETED - START ...'); - setVerboseShell(true); - - db.systems.update({}, { - $set: { - "last_upgrade.timestamp": Date.now() - } - }); - - // mark upgrade status of this server as completed - db.clusters.update({ - owner_secret: param_secret - }, { - $set: { - upgrade: { - status: 'COMPLETED', - } - } - }); - - print('\nMONGO UPGRADE MARK COMPLETED - DONE.'); -} diff --git a/src/deploy/mongo_upgrade/mongo_upgrade_wait_for_master.js b/src/deploy/mongo_upgrade/mongo_upgrade_wait_for_master.js deleted file mode 100644 index bcc692bd24..0000000000 --- a/src/deploy/mongo_upgrade/mongo_upgrade_wait_for_master.js +++ /dev/null @@ -1,74 +0,0 @@ -/* Copyright (C) 2016 NooBaa */ -/* eslint-env mongo */ -/* global setVerboseShell, sleep */ -'use strict'; - - -var param_secret; - -mongo_upgrade_wait_for_master(); - -function mongo_upgrade_wait_for_master() { - print('\nMONGO UPGRADE WAIT FOR MASTER - START ...'); - setVerboseShell(true); - sync_cluster_upgrade(); - print('\nMONGO UPGRADE 19 - DONE.'); -} - -function sync_cluster_upgrade() { - // find if this server should perform mongo upgrade - var is_mongo_upgrade = db.clusters.find({ - owner_secret: param_secret - }).toArray()[0].upgrade ? db.clusters.find({ - owner_secret: param_secret - }).toArray()[0].upgrade.mongo_upgrade : true; - - // if this server shouldn't run mongo_upgrade, set status to DB_READY, - // to indicate that this server is upgraded and with mongo running. - // then wait for master to complete upgrade - if (!is_mongo_upgrade) { - db.clusters.update({ - owner_secret: param_secret - }, { - $set: { - "upgrade.stage": "DB_READY" - } - }); - var max_iterations = 400; // ~1 hour (multiplied with the 10 seconds sleep) - var i = 0; - while (i < max_iterations) { - print('waiting for master to complete mongo upgrade...'); - i += 1; - try { - var master_status = db.clusters.find({ - "upgrade.mongo_upgrade": true - }).toArray()[0] ? db.clusters.find({ - "upgrade.mongo_upgrade": true - }).toArray()[0].upgrade.status : 'COMPLETED'; - if (master_status === 'COMPLETED') { - print('\nmaster completed mongo_upgrade - finishing upgrade of this server'); - mark_completed(); - quit(); - } - } catch (err) { - print(err); - } - sleep(10000); - } - print('\nERROR: master did not finish mongo_upgrade in time!!! finishing upgrade of this server'); - quit(); - } -} - -function mark_completed() { - // mark upgrade status of this server as completed - db.clusters.update({ - owner_secret: param_secret - }, { - $set: { - upgrade: { - status: 'COMPLETED' - } - } - }); -} diff --git a/src/sdk/map_client.js b/src/sdk/map_client.js index 726dbb097f..a8b61181b0 100644 --- a/src/sdk/map_client.js +++ b/src/sdk/map_client.js @@ -143,7 +143,7 @@ class MapClient { async put_mapping() { // TODO should we filter out chunk.had_errors from put mapping? await this.rpc_client.object.put_mapping({ - chunks: this.chunks.map(chunk => chunk.to_api()), + chunks: this.chunks.filter(chunk => !chunk.had_errors).map(chunk => chunk.to_api()), move_to_tier: this.move_to_tier && this.move_to_tier._id, }); } diff --git a/src/server/analytic_services/prometheus_reporting.js b/src/server/analytic_services/prometheus_reporting.js index 7d1cdc1c0a..ef5f41ec0f 100644 --- a/src/server/analytic_services/prometheus_reporting.js +++ b/src/server/analytic_services/prometheus_reporting.js @@ -15,7 +15,7 @@ const METRIC_RECORDS = Object.freeze([{ configuration: { name: get_metric_name('cloud_types'), help: 'Cloud Resource Types in the System', - labelNames: ['type', 'count'] + labelNames: ['type'] } }, { metric_type: 'Gauge', @@ -23,7 +23,7 @@ const METRIC_RECORDS = Object.freeze([{ configuration: { name: get_metric_name('projects_capacity_usage'), help: 'Projects Capacity Usage', - labelNames: ['project', 'count'] + labelNames: ['project'] } }, { metric_type: 'Gauge', @@ -39,7 +39,7 @@ const METRIC_RECORDS = Object.freeze([{ configuration: { name: get_metric_name('bucket_class_capacity_usage'), help: 'Bucket Class Capacity Usage', - labelNames: ['bucket_class', 'count'] + labelNames: ['bucket_class'] } }, { metric_type: 'Gauge', @@ -47,7 +47,7 @@ const METRIC_RECORDS = Object.freeze([{ configuration: { name: get_metric_name('unhealthy_cloud_types'), help: 'Unhealthy Cloud Resource Types in the System', - labelNames: ['type', 'count'] + labelNames: ['type'] } }, { metric_type: 'Gauge', @@ -55,7 +55,7 @@ const METRIC_RECORDS = Object.freeze([{ configuration: { name: get_metric_name('object_histo'), help: 'Object Sizes Histogram Across the System', - labelNames: ['size', 'avg', 'count'] + labelNames: ['size', 'avg'] } }, { metric_type: 'Gauge', @@ -63,7 +63,7 @@ const METRIC_RECORDS = Object.freeze([{ configuration: { name: get_metric_name('providers_bandwidth'), help: 'Providers bandwidth usage', - labelNames: ['type', 'io_size', 'size'] + labelNames: ['type', 'write_size', 'read_size'] } }, { metric_type: 'Gauge', @@ -71,7 +71,7 @@ const METRIC_RECORDS = Object.freeze([{ configuration: { name: get_metric_name('providers_ops'), help: 'Providers number of operations', - labelNames: ['type', 'io_ops', 'number'] + labelNames: ['type', 'write_num', 'read_num'] } }, { metric_type: 'Gauge', @@ -95,7 +95,7 @@ const METRIC_RECORDS = Object.freeze([{ configuration: { name: get_metric_name('system_info'), help: 'System info', - labelNames: ['system_name'] + labelNames: ['system_name', 'system_address'] } }, { metric_type: 'Gauge', @@ -280,16 +280,16 @@ class PrometheusReporting { set_bucket_class_capacity_usage(usage_info) { if (!this.enabled()) return; this._metrics.bucket_class_capacity_usage.reset(); - for (let [key, value] of Object.entries(usage_info)) { - this._metrics.bucket_class_capacity_usage.set({ bucket_class: key }, value); + for (let [bucket_class, value] of Object.entries(usage_info)) { + this._metrics.bucket_class_capacity_usage.set({ bucket_class }, value); } } set_projects_capacity_usage(usage_info) { if (!this.enabled()) return; this._metrics.projects_capacity_usage.reset(); - for (let [key, value] of Object.entries(usage_info)) { - this._metrics.projects_capacity_usage.set({ project: key }, value); + for (let [project, value] of Object.entries(usage_info)) { + this._metrics.projects_capacity_usage.set({ project }, value); } } @@ -315,18 +315,25 @@ class PrometheusReporting { set_providers_bandwidth(type, write_size, read_size) { if (!this.enabled()) return; - this._metrics.providers_bandwidth.set({ type: type, io_size: 'write_size' }, write_size); - this._metrics.providers_bandwidth.set({ type: type, io_size: 'read_size' }, read_size); + const { hashMap } = this._metrics.providers_bandwidth; + const hashKey = Object.keys(hashMap).find(key => key.includes(`type:${type}`)); + const record = hashMap[hashKey]; + if (record) delete this._metrics.providers_bandwidth.hashMap[hashKey]; + this._metrics.providers_bandwidth.set({ type, write_size, read_size }, Date.now()); } set_providers_ops(type, write_num, read_num) { if (!this.enabled()) return; - this._metrics.providers_ops.set({ type: type, io_ops: 'write_ops' }, write_num); - this._metrics.providers_ops.set({ type: type, io_ops: 'read_ops' }, read_num); + const { hashMap } = this._metrics.providers_ops; + const hashKey = Object.keys(hashMap).find(key => key.includes(`type:${type}`)); + const record = hashMap[hashKey]; + if (record) delete this._metrics.providers_ops.hashMap[hashKey]; + this._metrics.providers_ops.set({ type, write_num, read_num }, Date.now()); } set_object_savings(savings) { if (!this.enabled()) return; + this._metrics.object_savings.reset(); const { logical_size, physical_size } = savings; this._metrics.object_savings.set({ logical_size, physical_size }, logical_size - physical_size); } @@ -342,19 +349,46 @@ class PrometheusReporting { set_system_info(info) { if (!this.enabled()) return; - this._metrics.system_info.set({ system_name: info.name }, 0); + this._metrics.system_info.reset(); + this._metrics.system_info.set({ system_name: info.name, system_address: info.address }, Date.now()); } update_providers_bandwidth(type, write_size, read_size) { if (!this.enabled()) return; - this._metrics.providers_bandwidth.inc({ type: type, io_size: 'write_size' }, write_size, new Date()); - this._metrics.providers_bandwidth.inc({ type: type, io_size: 'read_size' }, read_size, new Date()); + const { hashMap } = this._metrics.providers_bandwidth; + const hashKey = Object.keys(hashMap).find(key => key.includes(`type:${type}`)); + const record = hashMap[hashKey]; + let updated_labels = { + type, + write_size: (write_size || 0), + read_size: (read_size || 0), + }; + if (record) { + const { labels } = record; + updated_labels.write_size += (labels.write_size || 0); + updated_labels.read_size += (labels.read_size || 0); + delete this._metrics.providers_bandwidth.hashMap[hashKey]; + } + this._metrics.providers_bandwidth.set(updated_labels, Date.now()); } update_providers_ops(type, write_num, read_num) { if (!this.enabled()) return; - this._metrics.providers_ops.inc({ type: type, io_ops: 'write_ops' }, write_num, new Date()); - this._metrics.providers_ops.inc({ type: type, io_ops: 'read_ops' }, read_num, new Date()); + const { hashMap } = this._metrics.providers_ops; + const hashKey = Object.keys(hashMap).find(key => key.includes(`type:${type}`)); + const record = hashMap[hashKey]; + let updated_labels = { + type, + write_num: (write_num || 0), + read_num: (read_num || 0), + }; + if (record) { + const { labels } = record; + updated_labels.write_num += (labels.write_num || 0); + updated_labels.read_num += (labels.read_num || 0); + delete this._metrics.providers_ops.hashMap[hashKey]; + } + this._metrics.providers_ops.set(updated_labels, Date.now()); } } diff --git a/src/server/bg_services/md_aggregator.js b/src/server/bg_services/md_aggregator.js index 9398ce8cb1..1167643db5 100644 --- a/src/server/bg_services/md_aggregator.js +++ b/src/server/bg_services/md_aggregator.js @@ -32,18 +32,49 @@ async function run_md_aggregator(md_store, system_store, target_now, delay) { const system = system_store.data.systems[0]; if (!system || system_utils.system_in_maintenance(system._id)) return; + const global_last_update = system.global_last_update; + let has_more = true; let update_range = true; let range = {}; + const md_local_store = { + data: { + buckets: _.clone(system_store.data.buckets), + pools: _.clone(system_store.data.pools), + } + }; + while (has_more) { - if (update_range) range = await find_next_range({ target_now, system_store }); - const changes = range && await range_md_aggregator({ md_store, system_store, range }); + if (update_range) { + range = await find_next_range({ + target_now, + system_store: md_local_store, + global_last_update, + original_system_store: system_store, + }); + } + const changes = range && await range_md_aggregator({ + md_store, + system_store: md_local_store, + range, + global_last_update + }); if (changes) { const update = _.omit(changes, 'more_updates'); await system_store.make_changes({ update }); - await P.delay(delay); update_range = !changes.more_updates; + if (update_range) { + await system_store.make_changes({ + update: { + systems: [{ + _id: system._id, + global_last_update: range.till_time, + }] + } + }); + } + await P.delay(delay); } else { has_more = false; } @@ -56,13 +87,15 @@ async function run_md_aggregator(md_store, system_store, target_now, delay) { function find_minimal_range({ target_now, system_store, + global_last_update }) { let from_time = target_now; let till_time = target_now; let should_reset_all = false; _.forEach(system_store.data.buckets, bucket => { - const last_update = _.get(bucket, 'storage_stats.last_update') || config.NOOBAA_EPOCH; + const bucket_last_update = _.get(bucket, 'storage_stats.last_update') || config.NOOBAA_EPOCH; + const last_update = global_last_update > bucket_last_update ? global_last_update : bucket_last_update; if (last_update > target_now) { dbg.error('find_next_range: time skew detected for bucket', bucket.name, 'last_update', last_update, @@ -78,7 +111,8 @@ function find_minimal_range({ } }); _.forEach(system_store.data.pools, pool => { - const last_update = _.get(pool, 'storage_stats.last_update') || config.NOOBAA_EPOCH; + const pool_last_update = _.get(pool, 'storage_stats.last_update') || config.NOOBAA_EPOCH; + const last_update = global_last_update > pool_last_update ? global_last_update : pool_last_update; if (last_update > target_now) { dbg.error('find_next_range: time skew detected for pool', pool.name, 'last_update', last_update, @@ -100,25 +134,32 @@ function find_minimal_range({ function find_next_range({ target_now, system_store, + global_last_update, + original_system_store, }) { let { from_time, till_time, should_reset_all } = find_minimal_range({ target_now, system_store, + global_last_update }); + // printing the range and the buckets/pools relative info dbg.log0('find_next_range:', 'from_time', from_time, 'till_time*', till_time - from_time, - 'target_now*', target_now - from_time + 'target_now*', target_now - from_time, + 'global_last_update', global_last_update, ); _.forEach(system_store.data.buckets, bucket => { - const last_update = _.get(bucket, 'storage_stats.last_update') || config.NOOBAA_EPOCH; + const bucket_last_update = _.get(bucket, 'storage_stats.last_update') || config.NOOBAA_EPOCH; + const last_update = bucket_last_update > global_last_update ? bucket_last_update : global_last_update; dbg.log1('find_next_range: bucket', bucket.name, 'last_update*', last_update - from_time ); }); _.forEach(system_store.data.pools, pool => { - const last_update = _.get(pool, 'storage_stats.last_update') || config.NOOBAA_EPOCH; + const pool_last_update = _.get(pool, 'storage_stats.last_update') || config.NOOBAA_EPOCH; + const last_update = pool_last_update > global_last_update ? pool_last_update : global_last_update; dbg.log1('find_next_range: pool', pool.name, 'last_update*', last_update - from_time ); @@ -132,8 +173,12 @@ function find_next_range({ ); // Assigning NOOBAA_EPOCH so we will gather all data again till the new time // This means that we will be eventually consistent - return system_store.make_changes({ + return original_system_store.make_changes({ update: { + systems: [{ + _id: original_system_store.data.systems[0]._id, + global_last_update: config.NOOBAA_EPOCH, + }], buckets: _.map(system_store.data.buckets, bucket => ({ _id: bucket._id, storage_stats: { @@ -193,8 +238,8 @@ function range_md_aggregator({ const till_time = range.till_time; let more_updates = false; - const filtered_buckets = _.filter(system_store.data.buckets, bucket => bucket.storage_stats.last_update === from_time); - const filtered_pools = _.filter(system_store.data.pools, pool => pool.storage_stats.last_update === from_time); + const filtered_buckets = _.filter(system_store.data.buckets, bucket => bucket.storage_stats.last_update <= from_time); + const filtered_pools = _.filter(system_store.data.pools, pool => pool.storage_stats.last_update <= from_time); if (filtered_buckets.length > config.MD_AGGREGATOR_BATCH || filtered_pools.length > config.MD_AGGREGATOR_BATCH) { more_updates = true; } @@ -228,6 +273,7 @@ function range_md_aggregator({ ); const buckets_updates = _.map(buckets, bucket => { + let dont_change_last_update = false; const new_storage_stats = calculate_new_bucket({ bucket, existing_chunks_aggregate, @@ -237,30 +283,41 @@ function range_md_aggregator({ existing_blocks_aggregate, deleted_blocks_aggregate }); + if (_.isEqual(_.omit(bucket.storage_stats, 'last_update'), new_storage_stats)) { + dont_change_last_update = true; + } new_storage_stats.last_update = till_time; + bucket.storage_stats = new_storage_stats; return { _id: bucket._id, storage_stats: new_storage_stats, + dont_change_last_update }; }); const pools_updates = _.map(pools, pool => { + let dont_change_last_update = false; const new_storage_stats = calculate_new_pool({ pool, existing_blocks_aggregate, deleted_blocks_aggregate }); + if (_.isEqual(_.omit(pool.storage_stats, 'last_update'), new_storage_stats)) { + dont_change_last_update = true; + } new_storage_stats.last_update = till_time; + pool.storage_stats = new_storage_stats; return { _id: pool._id, storage_stats: new_storage_stats, + dont_change_last_update, }; }); return { buckets: buckets_updates, pools: pools_updates, - more_updates + more_updates, }; }); } diff --git a/src/server/bg_services/tier_ttf_worker.js b/src/server/bg_services/tier_ttf_worker.js index 11ae400310..2d4cb13e57 100644 --- a/src/server/bg_services/tier_ttf_worker.js +++ b/src/server/bg_services/tier_ttf_worker.js @@ -6,12 +6,11 @@ const _ = require('lodash'); const config = require('../../../config'); const dbg = require('../../util/debug_module')(__filename); const system_store = require('../system_services/system_store').get_instance(); -const UsageReportStore = require('../analytic_services/usage_report_store').UsageReportStore; +const usage_aggregator = require('./usage_aggregator'); const MDStore = require('../object_services/md_store').MDStore; const system_utils = require('../utils/system_utils'); const size_utils = require('../../util/size_utils'); const auth_server = require('../common_services/auth_server'); -const nodes_client = require('../node_services/nodes_client'); const node_allocator = require('../node_services/node_allocator'); const mapper = require('../object_services/mapper'); @@ -20,6 +19,7 @@ class TieringTTFWorker { this.name = name; this.client = client; this.initialized = false; + this.last_run = 'force'; } _can_run() { @@ -41,6 +41,7 @@ class TieringTTFWorker { const multi_tiered_buckets = this._get_multi_tiered_buckets(); if (!multi_tiered_buckets || !multi_tiered_buckets.length) { dbg.log0('no buckets with more than one tier. nothing to do'); + this.last_run = 'force'; return config.TIER_TTF_WORKER_EMPTY_DELAY; } @@ -50,7 +51,7 @@ class TieringTTFWorker { } _get_multi_tiered_buckets() { - return system_store.data.buckets.filter(bucket => bucket.tiering.tiers.length > 2); + return system_store.data.buckets.filter(bucket => bucket.tiering.tiers.length > 1); } async _rebuild_need_to_move_chunks(buckets) { @@ -63,28 +64,35 @@ class TieringTTFWorker { const now = Date.now(); for (const bucket of buckets) { - await node_allocator.refresh_tiering_alloc(bucket.tiering); + await node_allocator.refresh_tiering_alloc(bucket.tiering, this.last_run); const tiering_status = node_allocator.get_tiering_status(bucket.tiering); const selected_tier = mapper.select_tier_for_write(bucket.tiering, tiering_status); - - const storage = await nodes_client.instance().aggregate_data_free_by_tier([String(selected_tier._id)], - selected_tier.system._id); - const tier_storage_free = size_utils.json_to_bigint(storage[String(selected_tier._id)][0].free); - const reports = await UsageReportStore.instance().get_usage_reports({ + const tier_storage_free = size_utils.json_to_bigint(size_utils.reduce_minimum( + 'free', tiering_status[String(selected_tier._id)].mirrors_storage.map(storage => (storage.free || 0)) + )); + const valid = _.values(tiering_status[String(selected_tier._id)].pools).every(pool => pool.valid_for_allocation); + const reports = await usage_aggregator.get_bandwidth_report({ + bucket: bucket._id, since: now - (1000 * 60 * 60), till: now, - bucket: bucket._id, + time_range: 'hour' }); const report = reports[0]; - bucket.TTF = report && report.write_bytes ? tier_storage_free.divide(size_utils.json_to_bigint(report.write_bytes).divide(60)) : + const time = valid && report ? Math.floor((now - report.timestamp) / 1000 / 60) : 60; + bucket.TTF = valid && report && report.write_bytes ? tier_storage_free + .divide(size_utils.json_to_bigint(report.write_bytes).divide(time)) : // how much time in minutes will it take to fill (avg by last report) MAX_TTF; // time to fill in minutes - bucket.tier = selected_tier._id; + dbg.log1('TTF bucket', bucket.name, 'storage_free', tier_storage_free, 'report', report, 'TTF:', bucket.TTF); } const sorted_buckets = buckets.filter(bucket => bucket.TTF.lesser(TOO_BIG_TTF)).sort(compare_buckets_by_TTF); let chunks_to_rebuild = 0; - if (_.isEmpty(sorted_buckets)) return config.TIER_TTF_WORKER_EMPTY_DELAY; + if (_.isEmpty(sorted_buckets)) { + this.last_run = 'force'; + return config.TIER_TTF_WORKER_EMPTY_DELAY; + } for (const bucket of sorted_buckets) { - switch (bucket.TTF.value) { + const bucket_ttf = bucket.TTF.toJSNumber(); + switch (bucket_ttf) { case 0: chunks_to_rebuild = 30; break; @@ -112,18 +120,25 @@ class TieringTTFWorker { default: chunks_to_rebuild = 1; } - if (!chunks_to_rebuild) return config.TIER_TTF_WORKER_EMPTY_DELAY; - const chunk_ids = await MDStore.instance().find_oldest_tier_chunk_ids(bucket.tier, chunks_to_rebuild, 1); - await node_allocator.refresh_tiering_alloc(bucket.tiering); + if (!chunks_to_rebuild) continue; const tiering_status = node_allocator.get_tiering_status(bucket.tiering); - const next_tier = mapper.select_tier_for_write(bucket.tiering, tiering_status, bucket.tier); + const previous_tier = mapper.select_tier_for_write(bucket.tiering, tiering_status); + const next_tier_order = this.find_tier_order_in_tiering(bucket, previous_tier) + 1; + const chunk_ids = await MDStore.instance().find_oldest_tier_chunk_ids(previous_tier._id, chunks_to_rebuild, 1); + if (!chunk_ids.length) continue; + const next_tier = mapper.select_tier_for_write(bucket.tiering, tiering_status, next_tier_order); if (!next_tier) continue; - console.log(`TieringTTFWorker: Moving the following ${chunks_to_rebuild} chunks to next tier ${next_tier._id}`, chunk_ids); + console.log(`TieringTTFWorker: Moving the following ${chunks_to_rebuild} from ${previous_tier._id} to chunks to next tier ${next_tier._id}`, chunk_ids); await this._build_chunks(chunk_ids, next_tier._id); } + this.last_run = undefined; return config.TIER_TTF_WORKER_BATCH_DELAY; } + find_tier_order_in_tiering(bucket, tier) { + return bucket.tiering.tiers.find(t => String(t.tier._id) === String(tier._id)).order; + } + async _build_chunks(chunk_ids, next_tier) { return this.client.scrubber.build_chunks({ chunk_ids, tier: next_tier }, { auth_token: auth_server.make_auth_token({ diff --git a/src/server/object_services/map_builder.js b/src/server/object_services/map_builder.js index 5644ce52fe..98d658f291 100644 --- a/src/server/object_services/map_builder.js +++ b/src/server/object_services/map_builder.js @@ -52,6 +52,9 @@ class MapBuilder { await builder_lock.surround_keys(_.map(this.chunk_ids, String), async () => { + if (this.move_to_tier) { + await MDStore.instance().update_chunks_by_ids(this.chunk_ids, { tier: this.move_to_tier._id }); + } // we run the build twice. first time to perform all allocation, second time to perform deletions await this.run_build(this.chunk_ids); diff --git a/src/server/object_services/map_db_types.js b/src/server/object_services/map_db_types.js index 6f74a5f86a..c270760894 100644 --- a/src/server/object_services/map_db_types.js +++ b/src/server/object_services/map_db_types.js @@ -67,7 +67,7 @@ class ChunkDB { /** @returns {nb.ChunkConfig} */ get chunk_config() { return system_store.data.get_by_id(this.chunk_db.chunk_config); } - + /** @returns {FragDB[]} */ get frags() { if (!this.__frags) this.__frags = this.chunk_db.frags.map(frag_db => new_frag_db(frag_db, this)); return this.__frags; diff --git a/src/server/object_services/map_reader.js b/src/server/object_services/map_reader.js index 5a8bd012d7..99a71e4880 100644 --- a/src/server/object_services/map_reader.js +++ b/src/server/object_services/map_reader.js @@ -4,14 +4,16 @@ /** @typedef {typeof import('../../sdk/nb')} nb */ const _ = require('lodash'); -// const util = require('util'); +const util = require('util'); -// const dbg = require('../../util/debug_module')(__filename); +const dbg = require('../../util/debug_module')(__filename); const config = require('../../../config.js'); const MDStore = require('./md_store').MDStore; const map_server = require('./map_server'); const mongo_utils = require('../../util/mongo_utils'); const { ChunkDB } = require('./map_db_types'); +const server_rpc = require('../server_rpc'); +const auth_server = require('../common_services/auth_server'); /** * @@ -44,7 +46,10 @@ async function read_object_mapping(obj, start, end, location_info) { end_gt: rng.start, }); // console.log('TODO GGG read_object_mapping', parts); - const chunks = await read_parts_mapping(parts); + let chunks = await read_parts_mapping(parts, location_info); + if (await update_chunks_on_read(chunks, location_info)) { + chunks = await read_parts_mapping(parts, location_info); + } return chunks; } @@ -89,11 +94,13 @@ async function read_node_mapping(node_ids, skip, limit) { /** * * @param {nb.PartSchemaDB[]} parts + * @param {nb.LocationInfo} [location_info] * @returns {Promise} */ -async function read_parts_mapping(parts) { +async function read_parts_mapping(parts, location_info) { const chunks_db = await MDStore.instance().find_chunks_by_ids(mongo_utils.uniq_ids(parts, 'chunk')); - await MDStore.instance().load_blocks_for_chunks(chunks_db); + const sorter = location_info ? _block_sorter_local(location_info) : _block_sorter_basic; + await MDStore.instance().load_blocks_for_chunks(chunks_db, sorter); const chunks_db_by_id = _.keyBy(chunks_db, '_id'); const chunks = parts.map(part => { const chunk = new ChunkDB({ ...chunks_db_by_id[part.chunk.toHexString()], parts: [part] }); @@ -104,59 +111,48 @@ async function read_parts_mapping(parts) { } -// /** -// * @param {nb.Chunk[]} chunks -// * @param {nb.LocationInfo} [location_info] -// */ -// async function update_chunks_on_read(chunks, location_info) { -// const chunks = _.map(parts, 'chunk'); -// const tiering_status_by_bucket_id = {}; - -// for (const chunk of chunks) { -// map_server.populate_chunk(chunk); -// } - -// await _load_chunk_mappings(chunks, tiering_status_by_bucket_id); - -// const chunks_to_scrub = []; -// try { -// const bucket = system_store.data.get_by_id(chunks[0].bucket); -// const tiering_status = tiering_status_by_bucket_id[bucket._id]; -// const selected_tier = mapper.select_tier_for_write(bucket.tiering, tiering_status); -// for (const chunk of chunks) { -// map_server.populate_chunk(chunk); -// if (!chunk.tier._id || !_.isEqual(chunk.tier._id, selected_tier._id)) { -// dbg.log0('Chunk with low tier will be sent for rebuilding', chunk); -// chunks_to_scrub.push(chunk); -// } else if (location_info) { -// const chunk_info = mapper.get_chunk_info(chunk); -// const mapping = mapper.map_chunk(chunk_info, chunk.tier, bucket.tiering, tiering_status, location_info); -// if (mapper.should_rebuild_chunk_to_local_mirror(mapping, location_info)) { -// dbg.log2('Chunk with following mapping will be sent for rebuilding', chunk, mapping); -// chunks_to_scrub.push(chunk); -// } -// } -// } -// if (chunks_to_scrub.length) { -// dbg.log1('Chunks wasn\'t found in local pool - the following will be rebuilt:', util.inspect(chunks_to_scrub)); -// await server_rpc.client.scrubber.build_chunks({ -// chunk_ids: _.map(chunks_to_scrub, '_id'), -// tier: selected_tier._id, -// }, { -// auth_token: auth_server.make_auth_token({ -// system_id: chunks_to_scrub[0].system, -// role: 'admin' -// }) -// }); -// } -// } catch (err) { -// dbg.warn('Chunks failed to rebuilt - skipping'); -// } -// if (chunks_to_scrub.length) { -// // mismatch type... -// await MDStore.instance().load_blocks_for_chunks(chunks); -// } -// } +/** + * @param {nb.Chunk[]} chunks + * @param {nb.LocationInfo} [location_info] + */ +async function update_chunks_on_read(chunks, location_info) { + const chunks_to_scrub = []; + try { + const bucket = chunks[0].bucket; + const selected_tier = await map_server.select_tier_for_write(bucket); + for (const chunk of chunks) { + if ((!chunk.tier._id || !_.isEqual(chunk.tier._id, selected_tier._id)) && + map_server.enough_room_in_tier(selected_tier, bucket)) { + dbg.log0('Chunk with low tier will be sent for rebuilding', chunk._id); + chunks_to_scrub.push(chunk); + } else if (location_info) { + if (chunk.tier.data_placement !== 'MIRROR') return; + const mirror = await map_server.select_mirror_for_write(chunk.tier, bucket.tiering, location_info); + if (mirror.spread_pools.find(pool => (location_info.region && location_info.region === pool.region) || + location_info.pool_id === String(pool._id))) { + dbg.log2('Chunk with following mapping will be sent for rebuilding', chunk); + chunks_to_scrub.push(chunk); + } + } + } + if (chunks_to_scrub.length) { + const chunk_ids = _.map(chunks_to_scrub, '_id'); + dbg.log1('Chunks wasn\'t found in local pool/upper tier - the following will be rebuilt:', util.inspect(chunks_to_scrub)); + await server_rpc.client.scrubber.build_chunks({ + chunk_ids, + tier: selected_tier._id, + }, { + auth_token: auth_server.make_auth_token({ + system_id: bucket.system._id, + role: 'admin' + }) + }); + } + } catch (err) { + dbg.warn('Chunks failed to rebuilt - skipping'); + } + return chunks_to_scrub.length; +} // sanitizing start & end: we want them to be integers, positive, up to obj.size. function sanitize_object_range(obj, start, end) { @@ -184,6 +180,50 @@ function sanitize_object_range(obj, start, end) { }; } +/** + * sorting function for sorting blocks with most recent heartbeat first + * @param {nb.Block} block1 + * @param {nb.Block} block2 + */ +function _block_sorter_basic(block1, block2) { + const node1 = block1.node; + const node2 = block2.node; + if (node2.readable && !node1.readable) return 1; + if (node1.readable && !node2.readable) return -1; + return node2.heartbeat - node1.heartbeat; +} + +/** + * locality sorting function for blocks + * @param {nb.LocationInfo} location_info + */ +function _block_sorter_local(location_info) { + return sort_func; + /** + * locality sorting function for blocks + * @param {nb.Block} block1 + * @param {nb.Block} block2 + */ + function sort_func(block1, block2) { + const node1 = block1.node; + const node2 = block2.node; + const { node_id, host_id, pool_id, region } = location_info; + if (node2.readable && !node1.readable) return 1; + if (node1.readable && !node2.readable) return -1; + if (String(node2._id) === node_id && String(node1._id) !== node_id) return 1; + if (String(node1._id) === node_id && String(node2._id) !== node_id) return -1; + if (node2.host_id === host_id && node1.host_id !== host_id) return 1; + if (node1.host_id === host_id && node2.host_id !== host_id) return -1; + if (String(block2.pool) === pool_id && String(block1.pool) !== pool_id) return 1; + if (String(block1.pool) === pool_id && String(block2.pool) !== pool_id) return -1; + if (region) { + if (block2.pool.region === region && block1.pool.region !== region) return 1; + if (block1.pool.region === region && block2.pool.region !== region) return -1; + } + return node2.heartbeat - node1.heartbeat; + } +} + // EXPORTS exports.read_object_mapping = read_object_mapping; exports.read_object_mapping_admin = read_object_mapping_admin; diff --git a/src/server/object_services/map_server.js b/src/server/object_services/map_server.js index d2d7d4753c..57fcf48c5a 100644 --- a/src/server/object_services/map_server.js +++ b/src/server/object_services/map_server.js @@ -5,7 +5,6 @@ const _ = require('lodash'); const assert = require('assert'); -// const util = require('util'); const P = require('../../util/promise'); const dbg = require('../../util/debug_module')(__filename); @@ -127,7 +126,7 @@ class GetMapping { await P.map(uniq_tiers, tier => ensure_room_in_tier(tier, bucket)); await P.delay(config.ALLOCATE_RETRY_DELAY_MS); // TODO Decide if we want to update the chunks mappings when looping - // await this.prepare_chunks_group(chunks, bucket); + // await _prepare_chunks_group({ chunks, move_to_tier: this.move_to_tier, location_info: this.location_info }); } } })); @@ -363,7 +362,6 @@ class PutMapping { MDStore.instance().insert_chunks(this.new_chunks), MDStore.instance().insert_parts(this.new_parts), map_deleter.delete_blocks(this.delete_blocks), - this.move_to_tier && MDStore.instance().update_chunks_by_ids(this.update_chunk_ids, { tier: this.move_to_tier._id }), // TODO // (upload_size > obj.upload_size) && MDStore.instance().update_object_by_id(obj._id, { upload_size: upload_size }) @@ -375,6 +373,7 @@ class PutMapping { /** * @param {nb.Bucket} bucket + * @returns {Promise} */ async function select_tier_for_write(bucket) { const tiering = bucket.tiering; @@ -383,6 +382,18 @@ async function select_tier_for_write(bucket) { return mapper.select_tier_for_write(tiering, tiering_status); } +/** + * @param {nb.Tier} tier + * @param {nb.Tiering} tiering + * @param {nb.LocationInfo} location_info + * @returns {Promise} + */ +async function select_mirror_for_write(tier, tiering, location_info) { + await node_allocator.refresh_tiering_alloc(tiering); + const tiering_status = node_allocator.get_tiering_status(tiering); + return mapper.select_mirror_for_write(tier, tiering, tiering_status, location_info); +} + /** * @param {nb.ID} tier_id @@ -468,17 +479,17 @@ function enough_room_in_tier(tier, bucket) { const tier_status = tiering_status[tier_id_str]; const tier_in_tiering = _.find(tiering.tiers, t => String(t.tier._id) === tier_id_str); if (!tier_in_tiering || !tier_status) throw new Error(`Can't find current tier in bucket`); - const available_to_upload = size_utils.json_to_bigint(size_utils.reduce_maximum( + const available_to_upload = size_utils.json_to_bigint(size_utils.reduce_minimum( 'free', tier_status.mirrors_storage.map(storage => (storage.free || 0)) )); if (available_to_upload && available_to_upload.greater(config.ENOUGH_ROOM_IN_TIER_THRESHOLD)) { - dbg.log0('make_room_in_tier: has enough room', tier.name, available_to_upload.toJSNumber(), '>', config.ENOUGH_ROOM_IN_TIER_THRESHOLD); + dbg.log0('enough_room_in_tier: has enough room', tier.name, available_to_upload.toJSNumber(), '>', config.ENOUGH_ROOM_IN_TIER_THRESHOLD); map_reporter.add_event(`has_enough_room(${tier.name})`, available_to_upload.toJSNumber(), 0); return true; } else { - dbg.log0(`make_room_in_tier: not enough room ${tier.name}:`, + dbg.log0(`enough_room_in_tier: not enough room ${tier.name}:`, `${available_to_upload.toJSNumber()} < ${config.ENOUGH_ROOM_IN_TIER_THRESHOLD} should move chunks to next tier`); - map_reporter.add_event(`not_enough_room(${tier.name})`, available_to_upload.toJSNumber(), 0); + map_reporter.add_event(`enough_room_in_tier: not_enough_room(${tier.name})`, available_to_upload.toJSNumber(), 0); return false; } } @@ -523,9 +534,13 @@ async function _prepare_chunks_group({ chunks, move_to_tier, location_info }) { for (const chunk of chunks) { chunk.is_accessible = false; + chunk.is_building_blocks = false; + chunk.is_building_frags = false; let num_accessible_frags = 0; for (const frag of chunk.frags) { frag.is_accessible = false; + frag.is_building_blocks = false; + frag.allocations = []; for (const block of frag.blocks) { if (!block.node || !block.node._id) { dbg.warn('ORPHAN BLOCK (ignoring)', block); @@ -601,6 +616,8 @@ async function prepare_blocks_from_db(blocks) { exports.GetMapping = GetMapping; exports.PutMapping = PutMapping; exports.select_tier_for_write = select_tier_for_write; +exports.select_mirror_for_write = select_mirror_for_write; +exports.enough_room_in_tier = enough_room_in_tier; exports.make_room_in_tier = make_room_in_tier; exports.prepare_chunks = prepare_chunks; exports.prepare_blocks = prepare_blocks; diff --git a/src/server/object_services/mapper.js b/src/server/object_services/mapper.js index 1e4a4746c5..e434c9253d 100644 --- a/src/server/object_services/mapper.js +++ b/src/server/object_services/mapper.js @@ -417,6 +417,7 @@ function find_local_pool(pools, location_info) { // EXPORTS exports.select_tier_for_write = select_tier_for_write; +exports.select_mirror_for_write = select_mirror_for_write; exports.map_chunk = map_chunk; exports.is_chunk_good_for_dedup = is_chunk_good_for_dedup; diff --git a/src/server/object_services/md_store.js b/src/server/object_services/md_store.js index c7525f8a1e..48649c8bec 100644 --- a/src/server/object_services/md_store.js +++ b/src/server/object_services/md_store.js @@ -926,10 +926,6 @@ class MDStore { $set: { deleted: delete_date }, - $rename: { - // obj: 'obj_del', - num: 'num_del', - } }); } @@ -941,10 +937,6 @@ class MDStore { $set: { deleted: delete_date }, - $rename: { - // obj: 'obj_del', - num: 'num_del', - } }); } @@ -1163,11 +1155,6 @@ class MDStore { $set: { deleted: delete_date }, - $rename: { - // obj: 'obj_del', - start: 'start_del', - // chunk: 'chunk_del', - } }); } @@ -1179,11 +1166,6 @@ class MDStore { $set: { deleted: delete_date }, - $rename: { - // obj: 'obj_del', - start: 'start_del', - // chunk: 'chunk_del', - } }); } @@ -1547,9 +1529,10 @@ class MDStore { /** * @param {nb.ChunkSchemaDB[]} chunks + * @param {?(a: any, b: any) => number} [sorter] * @return {Promise} */ - async load_blocks_for_chunks(chunks) { + async load_blocks_for_chunks(chunks, sorter) { if (!chunks || !chunks.length) return; const blocks = await this._blocks.col().find({ chunk: { $in: mongo_utils.uniq_ids(chunks, '_id') }, @@ -1560,7 +1543,8 @@ class MDStore { for (const chunk of chunks) { const blocks_by_frag = _.groupBy(blocks_by_chunk[chunk._id.toHexString()], 'frag'); for (const frag of chunk.frags) { - frag.blocks = blocks_by_frag[frag._id.toHexString()]; + const frag_blocks = blocks_by_frag[frag._id.toHexString()]; + frag.blocks = sorter ? frag_blocks.sort(sorter) : frag_blocks; } } } @@ -1634,10 +1618,6 @@ class MDStore { $set: { deleted: delete_date }, - // $rename: { - // chunk: 'chunk_del', - // node: 'node_del', - // } }); } diff --git a/src/server/object_services/schemas/object_multipart_indexes.js b/src/server/object_services/schemas/object_multipart_indexes.js index d9eb242957..0dec08abc8 100644 --- a/src/server/object_services/schemas/object_multipart_indexes.js +++ b/src/server/object_services/schemas/object_multipart_indexes.js @@ -10,6 +10,7 @@ module.exports = [{ unique: false, partialFilterExpression: { obj: { $exists: true }, + deleted: null, } } }]; diff --git a/src/server/object_services/schemas/object_part_indexes.js b/src/server/object_services/schemas/object_part_indexes.js index cd85066b32..4bc8cf9454 100644 --- a/src/server/object_services/schemas/object_part_indexes.js +++ b/src/server/object_services/schemas/object_part_indexes.js @@ -15,6 +15,7 @@ module.exports = [{ unique: false, partialFilterExpression: { obj: { $exists: true }, + deleted: null, } } }, diff --git a/src/server/server_rpc.js b/src/server/server_rpc.js index e20ca969f9..9d688e9d16 100644 --- a/src/server/server_rpc.js +++ b/src/server/server_rpc.js @@ -85,8 +85,6 @@ class ServerRpc { require('./system_services/cluster_server'), options); rpc.register_service(schema.cluster_internal_api, require('./system_services/cluster_server'), options); - rpc.register_service(schema.upgrade_api, - require('./system_services/upgrade_server'), options); rpc.register_service(schema.stats_api, require('./system_services/stats_aggregator'), options); rpc.register_service(schema.events_api, diff --git a/src/server/system_services/bucket_server.js b/src/server/system_services/bucket_server.js index 9ceac18a57..1df9727e13 100644 --- a/src/server/system_services/bucket_server.js +++ b/src/server/system_services/bucket_server.js @@ -57,7 +57,9 @@ function new_bucket_defaults(name, system_id, tiering_policy_id, tag) { objects_size: 0, objects_count: 0, objects_hist: [], - last_update: now - (2 * config.MD_GRACE_IN_MILLISECONDS) + // new buckets creation date will be rounded down to config.MD_AGGREGATOR_INTERVAL (30 seconds) + last_update: (Math.floor(now / config.MD_AGGREGATOR_INTERVAL) * config.MD_AGGREGATOR_INTERVAL) - + (2 * config.MD_GRACE_IN_MILLISECONDS), }, lambda_triggers: [], versioning: 'DISABLED' @@ -1102,9 +1104,12 @@ function get_bucket_info({ }; const metrics = _calc_metrics({ bucket, nodes_aggregate_pool, hosts_aggregate_pool, tiering_pools_status, info }); + const bucket_last_update = _.get(bucket, 'storage_stats.last_update') || config.NOOBAA_EPOCH; + const system_last_update = _.get(bucket, 'system.global.last_update') || config.NOOBAA_EPOCH; + const last_update = Math.max(system_last_update, bucket_last_update); info.usage_by_pool = { pools: {}, - last_update: _.get(bucket, 'storage_stats.last_update') + last_update, }; info.usage_by_pool.pools = []; @@ -1255,6 +1260,10 @@ function _calc_metrics({ const bucket_total = bucket_free.plus(bucket_used) .plus(bucket_used_other); + const bucket_last_update = _.get(bucket, 'storage_stats.last_update') || config.NOOBAA_EPOCH; + const system_last_update = _.get(bucket, 'system.global_last_update') || config.NOOBAA_EPOCH; + const last_update = Math.max(system_last_update, bucket_last_update); + info.storage = { values: size_utils.to_bigint_storage({ used: bucket_used, @@ -1262,7 +1271,7 @@ function _calc_metrics({ total: bucket_total, free: bucket_free, }), - last_update: _.get(bucket, 'storage_stats.last_update') + last_update }; const actual_free = size_utils.json_to_bigint(_.get(info, 'tiering.data.free') || 0); @@ -1291,7 +1300,7 @@ function _calc_metrics({ size_reduced: bucket_chunks_capacity, free: actual_free, available_for_upload, - last_update: _.get(bucket, 'storage_stats.last_update') + last_update, }); return { diff --git a/src/server/system_services/cluster_server.js b/src/server/system_services/cluster_server.js index a340237e63..5046f1700b 100644 --- a/src/server/system_services/cluster_server.js +++ b/src/server/system_services/cluster_server.js @@ -24,7 +24,6 @@ const Dispatcher = require('../notifications/dispatcher'); const system_store = require('./system_store').get_instance(); const promise_utils = require('../../util/promise_utils'); const { RpcError, RPC_BUFFERS } = require('../../rpc'); -const upgrade_server = require('./upgrade_server'); const cutils = require('../utils/clustering_utils'); let add_member_in_process = false; @@ -73,9 +72,6 @@ function new_cluster_info(params) { }], }], config_servers: [], - upgrade: { - status: 'COMPLETED', - }, }; return cluster; }) @@ -1501,4 +1497,3 @@ exports.set_hostname_internal = set_hostname_internal; exports.get_version = get_version; exports.get_secret = get_secret; exports.update_member_of_cluster = update_member_of_cluster; -exports.upgrade_cluster = upgrade_server.upgrade_cluster; diff --git a/src/server/system_services/schemas/cluster_schema.js b/src/server/system_services/schemas/cluster_schema.js index d4480130b6..83d0f0304b 100644 --- a/src/server/system_services/schemas/cluster_schema.js +++ b/src/server/system_services/schemas/cluster_schema.js @@ -1,8 +1,6 @@ /* Copyright (C) 2016 NooBaa */ 'use strict'; -const SensitiveString = require('../../../util/sensitive_string'); - module.exports = { id: 'cluster_schema', type: 'object', @@ -93,64 +91,6 @@ module.exports = { idate: true }, - //Upgrade proccess - upgrade: { - type: 'object', - properties: { - path: { - type: 'string' - }, - mongo_upgrade: { - type: 'boolean' - }, - status: { - type: 'string', - enum: [ - 'PENDING', - 'FAILED', - 'CAN_UPGRADE', - 'UPGRADING', - 'COMPLETED', - 'PRE_UPGRADE_PENDING', - 'PRE_UPGRADE_READY', - 'UPGRADE_FAILED' - ] - }, - stage: { - type: 'string', - enum: [ - 'COPY_NEW_CODE', - 'DB_READY', - 'UPGRADE_ABORTED', - 'UPGRADE_PLATFORM', - 'UPGRADE_MONGODB_VER', - 'UPGRADE_MONGODB_SCHEMAS', - 'UPDATE_SERVICES', - 'CLEANUP', - 'UPGRADE_COMPLETED', - ] - }, - error: { - type: 'string' - }, - report_info: { - type: 'string' - }, - initiator_email: { - wrapper: SensitiveString, - }, - tested_date: { - idate: true - }, - staged_package: { - type: 'string' - }, - package_uploaded: { - idate: true - } - }, - }, - heartbeat: { type: 'object', required: ['version', 'time'], diff --git a/src/server/system_services/schemas/system_schema.js b/src/server/system_services/schemas/system_schema.js index fc3d883381..85c9929741 100644 --- a/src/server/system_services/schemas/system_schema.js +++ b/src/server/system_services/schemas/system_schema.js @@ -212,7 +212,11 @@ module.exports = { } } } - }, - } + } + }, + + global_last_update: { + idate: true + }, } }; diff --git a/src/server/system_services/stats_aggregator.js b/src/server/system_services/stats_aggregator.js index 77c6ea661d..d01d16ada5 100644 --- a/src/server/system_services/stats_aggregator.js +++ b/src/server/system_services/stats_aggregator.js @@ -31,6 +31,7 @@ const prom_report = require('../analytic_services/prometheus_reporting').Prometh const HistoryDataStore = require('../analytic_services/history_data_store').HistoryDataStore; const { google } = require('googleapis'); const google_storage = google.storage('v1'); +const addr_utils = require('../../util/addr_utils'); const ops_aggregation = {}; @@ -146,6 +147,7 @@ const PARTIAL_SINGLE_ACCOUNT_DEFAULTS = { const PARTIAL_SINGLE_SYS_DEFAULTS = { name: '', + address: '', capacity: 0, reduction_ratio: 0, savings: { @@ -370,8 +372,16 @@ async function get_partial_systems_stats(req) { const total_usage = size_utils.bigint_to_bytes(storage.used); const capacity = 100 - Math.floor(((free_bytes / total_bytes) || 1) * 100); + const { system_address } = system; + const https_port = process.env.SSL_PORT || 5443; + const address = DEV_MODE ? `https://localhost:${https_port}` : addr_utils.get_base_address(system_address, { + hint: 'EXTERNAL', + protocol: 'https' + }).toString(); + return _.defaults({ name: system.name, + address, capacity, reduction_ratio, savings, @@ -832,6 +842,7 @@ function partial_cycle_parse_prometheus_metrics(payload) { reduction_ratio, savings, name, + address, usage_by_bucket_class, usage_by_project, total_usage, @@ -851,7 +862,7 @@ function partial_cycle_parse_prometheus_metrics(payload) { prom_report.instance().set_num_unhealthy_pools(unhealthy_pool_count); prom_report.instance().set_num_pools(pool_count); prom_report.instance().set_unhealthy_cloud_types(cloud_pool_stats); - prom_report.instance().set_system_info({ name }); + prom_report.instance().set_system_info({ name, address }); prom_report.instance().set_num_buckets(buckets); prom_report.instance().set_num_objects(objects_in_buckets); prom_report.instance().set_num_unhealthy_buckets(unhealthy_buckets); diff --git a/src/server/system_services/system_server.js b/src/server/system_services/system_server.js index ccb2172770..00bdb1c8e6 100644 --- a/src/server/system_services/system_server.js +++ b/src/server/system_services/system_server.js @@ -453,16 +453,6 @@ function read_system(req) { buckets_stats }) => { const cluster_info = cutil.get_cluster_info(rs_status); - for (const shard of cluster_info.shards) { - for (const server of shard.servers) { - const error = { - message: server.upgrade.error, - report_info: server.upgrade.report_info - }; - server.upgrade.error = error; - delete server.upgrade.report_info; - } - } const objects_sys = { count: size_utils.BigInteger.zero, size: size_utils.BigInteger.zero, diff --git a/src/server/system_services/system_store.js b/src/server/system_services/system_store.js index 6c84c43e1f..5f75d5200b 100644 --- a/src/server/system_services/system_store.js +++ b/src/server/system_services/system_store.js @@ -27,6 +27,7 @@ const size_utils = require('../../util/size_utils'); const os_utils = require('../../util/os_utils'); const mongo_utils = require('../../util/mongo_utils'); const mongo_client = require('../../util/mongo_client'); +const config = require('../../../config'); const { RpcError } = require('../../rpc'); const COLLECTIONS = [{ @@ -410,6 +411,7 @@ class SystemStore extends EventEmitter { super(); // // TODO: This is currently used as a cache, maybe will be moved in the future // this.valid_for_alloc_by_tier = {}; + this.last_update_time = config.NOOBAA_EPOCH; this.is_standalone = options.standalone; this.is_cluster_master = false; this.is_finished_initial_load = false; @@ -438,6 +440,11 @@ class SystemStore extends EventEmitter { .catch(_.noop); } + clean_system_store() { + this.old_db_data = undefined; + this.last_update_time = config.NOOBAA_EPOCH; + } + async refresh() { let load_time = 0; if (this.data) { @@ -463,7 +470,7 @@ class SystemStore extends EventEmitter { let new_data = new SystemStoreData(); let millistamp = time_utils.millistamp(); await this._register_for_changes(); - await this._read_data_from_db(new_data); + await this._read_new_data_from_db(new_data); const secret = await os_utils.read_server_secret(); this._server_secret = secret; dbg.log1('SystemStore: fetch took', time_utils.millitook(millistamp)); @@ -471,10 +478,11 @@ class SystemStore extends EventEmitter { dbg.log1('SystemStore: fetch data', util.inspect(new_data, { depth: 4 })); + this.old_db_data = this.old_db_data ? this._update_data_from_new(this.old_db_data, new_data) : new_data; + this.data = _.cloneDeep(this.old_db_data); millistamp = time_utils.millistamp(); - new_data.rebuild(); + this.data.rebuild(); dbg.log1('SystemStore: rebuild took', time_utils.millitook(millistamp)); - this.data = new_data; this.emit('load'); this.is_finished_initial_load = true; return this.data; @@ -485,6 +493,15 @@ class SystemStore extends EventEmitter { }); } + _update_data_from_new(data, new_data) { + COLLECTIONS.forEach(col => { + const old_data = data[col.name]; + const res = _.unionBy(new_data[col.name], old_data, doc => doc._id.toString()); + new_data[col.name] = res.filter(doc => !doc.deleted); + }); + return new_data; + } + async _register_for_changes() { if (this.is_standalone) { @@ -523,6 +540,32 @@ class SystemStore extends EventEmitter { )); } + _read_new_data_from_db(target) { + const now = Date.now(); + let newly_updated_query = { + last_update: { + $gte: this.last_update_time, + } + }; + return mongo_client.instance().connect() + .then(() => P.map(COLLECTIONS, + col => mongo_client.instance().collection(col.name) + .find(newly_updated_query, { + projection: { last_update: 0 } + }) + .toArray() + .then(res => { + for (const item of res) { + this._check_schema(col, item, 'warn'); + } + target[col.name] = res; + }) + )) + .then(() => { + this.last_update_time = now; + }); + } + _check_schema(col, item, warn) { return mongo_client.instance().validate(col.name, item, warn); } @@ -610,7 +653,8 @@ class SystemStore extends EventEmitter { */ async make_changes(changes) { const bulk_per_collection = {}; - const now = new Date(); + const now = Date.now(); + let any_news = false; dbg.log0('SystemStore.make_changes:', util.inspect(changes, { depth: 5 })); @@ -636,6 +680,8 @@ class SystemStore extends EventEmitter { _.each(list, item => { this._check_schema(col, item); data.check_indexes(col, item); + item.last_update = now; + any_news = true; get_bulk(name).insert(item); }); }); @@ -643,7 +689,8 @@ class SystemStore extends EventEmitter { const col = get_collection(name); _.each(list, item => { data.check_indexes(col, item); - let updates = _.omit(item, '_id', '$find'); + let dont_change_last_update = Boolean(item.dont_change_last_update); + let updates = _.omit(item, '_id', '$find', 'dont_change_last_update'); let finds = item.$find || _.pick(item, '_id'); if (_.isEmpty(updates)) return; let keys = _.keys(updates); @@ -671,6 +718,11 @@ class SystemStore extends EventEmitter { // if (updates.$set) { // this._check_schema(col, updates.$set, 'warn'); // } + if (!dont_change_last_update) { + if (!updates.$set) updates.$set = {}; + updates.$set.last_update = now; + any_news = true; + } get_bulk(name) .find(finds) .updateOne(updates); @@ -679,13 +731,15 @@ class SystemStore extends EventEmitter { _.each(changes.remove, (list, name) => { get_collection(name); _.each(list, id => { + any_news = true; get_bulk(name) .find({ _id: id }) .updateOne({ $set: { - deleted: now + deleted: now, + last_update: now, } }); }); @@ -707,15 +761,17 @@ class SystemStore extends EventEmitter { bulk => bulk.length && bulk.execute({ j: true }) )); - if (this.is_standalone) { - await this.load(); - } else { - // notify all the cluster (including myself) to reload - await server_rpc.client.redirector.publish_to_cluster({ - method_api: 'server_inter_process_api', - method_name: 'load_system_store', - target: '' - }); + if (any_news) { + if (this.is_standalone) { + await this.load(); + } else { + // notify all the cluster (including myself) to reload + await server_rpc.client.redirector.publish_to_cluster({ + method_api: 'server_inter_process_api', + method_name: 'load_system_store', + target: '' + }); + } } } diff --git a/src/server/system_services/upgrade_server.js b/src/server/system_services/upgrade_server.js deleted file mode 100644 index 89cfae8a02..0000000000 --- a/src/server/system_services/upgrade_server.js +++ /dev/null @@ -1,498 +0,0 @@ -/* Copyright (C) 2016 NooBaa */ -/* eslint max-lines: ['error', 2500] */ -'use strict'; - -const _ = require('lodash'); -const fs = require('fs'); -const url = require('url'); -const request = require('request'); - -const P = require('../../util/promise'); -const dbg = require('../../util/debug_module')(__filename); -const cutil = require('../utils/clustering_utils'); -const MongoCtrl = require('../utils/mongo_ctrl'); -const server_rpc = require('../server_rpc'); -const Dispatcher = require('../notifications/dispatcher'); -const system_store = require('./system_store').get_instance(); -const promise_utils = require('../../util/promise_utils'); - -const upgrade_utils = require('../../upgrade/upgrade_utils'); - -// TODO: maybe we need to change it to use upgrade status in DB. -// currently use this memory only flag to indicate if upgrade is still in process -let upgrade_in_process = false; - - -const DEFAULT_MAKE_CHANGES_RETRIES = { - max_retries: 10, - delay: 5000 -}; - -async function member_pre_upgrade(req) { - dbg.log0('UPGRADE: received upgrade package:, ', req.rpc_params.filepath, req.rpc_params.mongo_upgrade ? - 'this server should preform mongo_upgrade' : - 'this server should not preform mongo_upgrade'); - let server = system_store.get_local_cluster_info(); //Update path in DB - dbg.log0('update upgrade for server: ', cutil.get_cluster_info().owner_address); - let upgrade = _.omitBy({ - path: req.rpc_params.filepath, - mongo_upgrade: req.rpc_params.mongo_upgrade, - status: req.rpc_params.stage === 'UPGRADE_STAGE' ? 'PRE_UPGRADE_PENDING' : 'PENDING', - package_uploaded: req.rpc_params.stage === 'UPLOAD_STAGE' ? Date.now() : server.upgrade.package_uploaded, - staged_package: req.rpc_params.stage === 'UPLOAD_STAGE' ? 'UNKNOWN' : server.upgrade.staged_package - }, _.isUndefined); - - dbg.log0('UPGRADE:', 'updating cluster for server._id', server._id, 'with upgrade =', upgrade); - - - await system_store.make_changes_with_retries({ - update: { - clusters: [{ - _id: server._id, - upgrade: upgrade - }] - } - }, DEFAULT_MAKE_CHANGES_RETRIES); - await Dispatcher.instance().publish_fe_notifications({ secret: system_store.get_server_secret() }, 'change_upgrade_status'); - - try { - const res = await upgrade_utils.pre_upgrade({ - upgrade_path: upgrade.path, - extract_package: req.rpc_params.stage === 'UPLOAD_STAGE' - }); - - //Update result of pre_upgrade and message in DB - if (res.result) { - dbg.log0('UPGRADE:', 'get res.result =', res.result, ' setting status to CAN_UPGRADE'); - upgrade.status = req.rpc_params.stage === 'UPGRADE_STAGE' ? 'PRE_UPGRADE_READY' : 'CAN_UPGRADE'; - } else { - dbg.log0('UPGRADE:', 'get res.result =', res.result, ' setting status to FAILED'); - upgrade.status = 'FAILED'; - dbg.error('UPGRADE HAD ERROR: ', res.error); - // TODO: Change that shit to more suitable error handler - upgrade.error = res.error; - upgrade.report_info = res.report_info; - - if (req.rpc_params.stage === 'UPGRADE_STAGE') { - upgrade_in_process = false; - } - } - - if (req.rpc_params.stage === 'UPLOAD_STAGE') { - upgrade.staged_package = res.staged_package || 'UNKNOWN'; - } - upgrade.tested_date = res.tested_date; - - } catch (err) { - dbg.error('failed to run pre_upgrade'); - upgrade.status = 'FAILED'; - upgrade.error = upgrade_utils.pre_upgrade_failure_error; - } - - - dbg.log0('UPGRADE:', 'updating cluster again for server._id', server._id, 'with upgrade =', upgrade); - await system_store.make_changes_with_retries({ - update: { - clusters: [{ - _id: server._id, - upgrade: _.omitBy(upgrade, _.isUndefined) - }] - } - }, DEFAULT_MAKE_CHANGES_RETRIES); - - await Dispatcher.instance().publish_fe_notifications({ secret: system_store.get_server_secret() }, 'change_upgrade_status'); -} - -async function do_upgrade(req) { - await system_store.load(); - - dbg.log0('UPGRADE:', 'got do_upgrade'); - let server = system_store.get_local_cluster_info(); - if (server.upgrade.status !== 'UPGRADING') { - dbg.error('Not in upgrade state:', ' State Is: ', server.upgrade.status || 'NO_STATUS'); - throw new Error(`Not in upgrade state: ${server.upgrade.error ? server.upgrade.error : ''} State Is: ${server.upgrade.status || "NO_STATUS"}`); - } - if (server.upgrade.path === '') { - dbg.error('No package path supplied'); - throw new Error('No package path supplied'); - } - let filepath = server.upgrade.path; - //Async as the server will soon be restarted anyway - const on_err = err => { - dbg.error('upgrade scripted failed. Aborting upgrade:', err); - upgrade_in_process = false; - _handle_cluster_upgrade_failure(err, server.owner_address); - }; - upgrade_utils.do_upgrade(filepath, server.is_clusterized, on_err); -} - -function get_upgrade_status(req) { - const server = system_store.get_local_cluster_info(); - const upgrade_stage = _.get(server, 'upgrade.stage'); - if (upgrade_stage === 'UPGRADE_ABORTED') { - dbg.warn('UPGRADE: upgrade was aborted by upgrade manager. resetting upgrade_in_process to false'); - upgrade_in_process = false; - } - return { in_process: upgrade_in_process }; -} - -function cluster_pre_upgrade(req) { - dbg.log0('cluster_pre_upgrade:', cutil.pretty_topology(cutil.get_topology())); - // get all cluster members other than the master - const cinfo = system_store.get_local_cluster_info(); - const upgrade_path = req.rpc_params.filepath || _get_upgrade_path(); - - return P.resolve() - .then(() => { - if (!upgrade_path) { - throw new Error('cluster_pre_upgrade: must include path'); - } - const secondary_members = cutil.get_all_cluster_members().filter(ip => ip !== cinfo.owner_address); - dbg.log0('cluster_pre_upgrade:', 'secondaries =', secondary_members); - - return P.fcall(() => { - if (cinfo.is_clusterized) { - return MongoCtrl.is_master() - .then(res => res.ismaster); - } - return true; - }) - .then(is_master => { - if (!is_master) { - throw new Error('cluster_pre_upgrade: upgrade must be done on master node'); - } - - dbg.log0('cluster_pre_upgrade:', 'calling member_pre_upgrade'); - server_rpc.client.upgrade.member_pre_upgrade({ - filepath: upgrade_path, - mongo_upgrade: false, - stage: req.rpc_params.filepath ? 'UPLOAD_STAGE' : 'RETEST_STAGE' - }) - .catch(err => { - dbg.error('cluster_pre_upgrade:', 'pre_upgrade failed on master - aborting upgrade', err); - throw err; - }); - }) - .then(() => { - // upload package to secondaries - dbg.log0('cluster_pre_upgrade:', 'uploading package to all cluster members'); - // upload package to cluster members - return P.all(secondary_members.map(ip => _upload_package(upgrade_path, ip) - .catch(err => { - dbg.error('upgrade_cluster failed uploading package', err); - return _handle_cluster_upgrade_failure(new Error('DISTRIBUTION_FAILED'), ip); - }) - )); - }); - }) - .return(); -} - -async function upgrade_cluster(req) { - dbg.log0('UPGRADE got request to upgrade the cluster:', cutil.pretty_topology(cutil.get_topology())); - // get all cluster members other than the master - let cinfo = system_store.get_local_cluster_info(); - if (cinfo.upgrade.status !== 'CAN_UPGRADE' && cinfo.upgrade.status !== 'UPGRADE_FAILED') { - throw new Error(`Not in upgrade state: ${cinfo.upgrade.error ? cinfo.upgrade.error : ''}`); - } - const upgrade_path = _get_upgrade_path(); - // upgrade can only be called from master. throw error otherwise - upgrade_in_process = true; - - const is_master = cinfo.is_clusterized ? await MongoCtrl.is_master() : true; - if (!is_master) { - throw new Error('UPGRADE upgrade must be done on master node'); - } - - const updates = system_store.data.clusters.map(cluster => ({ - _id: cluster._id, - $set: { - "upgrade.initiator_email": req.account.email, - // This is a patch that was done in order so the FE won't wait for tests prior to upgrade - // We set this once again inside the member_pre_upgrade - "upgrade.status": 'PRE_UPGRADE_PENDING' - }, - $unset: { - "upgrade.error": 1 - } - })); - await system_store.make_changes_with_retries({ - update: { - clusters: updates - } - }, DEFAULT_MAKE_CHANGES_RETRIES); - - // Notice that we do not await here on purpose so the FE won't wait for the completion of the tests - _test_and_upgrade_in_background(cinfo, upgrade_path, req); -} - -function reset_upgrade_package_status(req) { - const updates = system_store.data.clusters.map(cluster => ({ - _id: cluster._id, - $set: { - 'upgrade.status': 'COMPLETED' - } - })); - Dispatcher.instance().activity({ - event: 'conf.upload_package', - level: 'info', - system: req.system._id, - actor: req.account && req.account._id, - }); - - return system_store.make_changes_with_retries({ - update: { - clusters: updates - } - }, DEFAULT_MAKE_CHANGES_RETRIES) - .then(() => Dispatcher.instance().publish_fe_notifications({ secret: system_store.get_server_secret() }, 'change_upgrade_status')); -} - -async function _test_and_upgrade_in_background(cinfo, upgrade_path, req) { - - const all_members = cutil.get_all_cluster_members(); - const secondary_members = all_members.filter(ip => ip !== cinfo.owner_address); - dbg.log0('UPGRADE:', 'secondaries =', secondary_members); - - - dbg.log0('UPGRADE:', 'testing package in all cluster members'); - const member_tests = all_members.map(async ip => { - const is_master = ip === cinfo.owner_address; - if (is_master) { - try { - dbg.log0('UPGRADE:', 'calling member_pre_upgrade'); - await server_rpc.client.upgrade.member_pre_upgrade({ - filepath: upgrade_path, - mongo_upgrade: true, - stage: 'UPGRADE_STAGE' - }); - } catch (err) { - - dbg.error('UPGRADE:', 'pre_upgrade failed on master - aborting upgrade', err); - throw err; - } - } else { - try { - await server_rpc.client.upgrade.member_pre_upgrade({ - filepath: system_store.data.clusters - .find(cluster => (String(cluster.owner_address) === String(ip))) - .upgrade.path, - mongo_upgrade: false, - stage: 'UPGRADE_STAGE' - }, { - address: server_rpc.get_base_address(ip), - }); - } catch (err) { - dbg.error('upgrade_cluster failed uploading package', err); - await _handle_cluster_upgrade_failure(new Error('DISTRIBUTION_FAILED'), ip); - } - } - - }); - - try { - // wait for all members to run tests - await P.all(member_tests); - //wait for all members to reach PRE_UPGRADE_READY. if one failed fail the upgrade - dbg.log0('UPGRADE:', 'waiting for secondaries to reach PRE_UPGRADE_READY'); - for (const ip of all_members) { - dbg.log0('UPGRADE:', 'waiting for server', ip, 'to reach PRE_UPGRADE_READY'); - await _wait_for_upgrade_state(ip, 'PRE_UPGRADE_READY'); - } - - Dispatcher.instance().activity({ - event: 'conf.system_upgrade_started', - level: 'info', - system: req.system._id, - actor: req.account && req.account._id, - }); - - //Update all clusters upgrade section with the new status and clear the error if exists - const cluster_updates = system_store.data.clusters.map(cluster => ({ - _id: cluster._id, - $set: { - "upgrade.status": 'UPGRADING' - }, - $unset: { - "upgrade.error": true - } - })); - //set last upgrade initiator under system - const system_updates = [{ - _id: req.system._id, - $set: { - // The timestamp is a patch so the UI won't fail on schema validation - // It will be updated once again in mongo_upgrade_mark_completed by design - "last_upgrade.timestamp": Date.now(), - "last_upgrade.initiator": (req.account && req.account.email) || '' - } - }]; - await system_store.make_changes_with_retries({ - update: { - clusters: cluster_updates, - systems: system_updates - } - }, DEFAULT_MAKE_CHANGES_RETRIES); - - await Dispatcher.instance().publish_fe_notifications({}, 'change_upgrade_status'); - - _handle_upgrade_stage({ secondary_members, upgrade_path }); - - } catch (err) { - dbg.error('Got error when running upgrade in background', err); - // TODO: we should try to fix everything and restore system. - // check if rollback is necessary or even possible - } -} - -function _handle_cluster_upgrade_failure(err, ip) { - return P.resolve() - .then(() => { - dbg.error('_handle_cluster_upgrade_failure: got error on cluster upgrade', err); - upgrade_in_process = false; - const fe_notif_params = {}; - const change = { - "upgrade.status": 'UPGRADE_FAILED', - "upgrade.error": 'Upgrade has failed with an interal error.' - }; - let updates; - if (ip) { - updates = [{ - _id: system_store.data.clusters.find(cluster => { - if (String(cluster.owner_address) === String(ip)) { - fe_notif_params.secret = cluster.owner_secret; - return true; - } - return false; - })._id, - $set: change - }]; - } else { - updates = system_store.data.clusters.map(cluster => ({ - _id: cluster._id, - $set: change - })); - } - return system_store.make_changes_with_retries({ - update: { - clusters: updates - } - }, DEFAULT_MAKE_CHANGES_RETRIES) - .then(() => Dispatcher.instance().publish_fe_notifications(fe_notif_params, 'change_upgrade_status')) - .finally(() => { - throw err; - }); - }); -} - -function _handle_upgrade_stage(params) { - // We do not return on purpose! - P.each(params.secondary_members, ip => { - dbg.log0('UPGRADE:', 'sending do_upgrade to server', ip, 'and and waiting for DB_READY state'); - return server_rpc.client.upgrade.do_upgrade({}, { - address: server_rpc.get_base_address(ip) - }) - .then(() => _wait_for_upgrade_stage(ip, 'DB_READY')) - .then(() => Dispatcher.instance().publish_fe_notifications({ - secret: system_store.data.clusters.find(cluster => (String(cluster.owner_address) === String(ip))).owner_secret - }, 'change_upgrade_status')) - .catch(err => { - dbg.error('UPGRADE:', 'got error on upgrade of server', ip, 'aborting upgrade process', err); - return _handle_cluster_upgrade_failure(err, ip); - }); - }) - // after all secondaries are upgraded it is safe to upgrade the primary. - // secondaries should wait (in upgrade.js) for primary to complete upgrade and perform mongo_upgrade - .then(() => { - dbg.log0('UPGRADE:', 'calling do_upgrade on master'); - return server_rpc.client.upgrade.do_upgrade({ - filepath: params.upgrade_path - }); - }) - .then(() => Dispatcher.instance().publish_fe_notifications({ secret: system_store.get_server_secret() }, 'change_upgrade_status')) - .catch(err => { - dbg.error('_handle_upgrade_stage got error on cluster upgrade', err); - upgrade_in_process = false; - }); -} - -function _wait_for_upgrade_state(ip, state) { - let max_retries = 60; - const upgrade_retry_delay = 10 * 1000; // the delay between testing upgrade status - return promise_utils.retry(max_retries, upgrade_retry_delay, () => system_store.load() - .then(() => { - dbg.log0('UPGRADE:', 'wating for', ip, 'to reach', state); - let status = cutil.get_member_upgrade_status(ip); - dbg.log0('UPGRADE:', 'got status:', status); - if (status !== state) { - dbg.error('UPGRADE:', 'timedout waiting for ' + ip + ' to reach ' + state); - if (status === 'FAILED') max_retries = 0; - throw new Error('timedout waiting for ' + ip + ' to reach ' + state); - } - }) - ); -} - -function _wait_for_upgrade_stage(ip, stage_req) { - // member upgrade can take some time. keep retrying for an hour - let max_retries = 360; - const upgrade_retry_delay = 10 * 1000; // the delay between testing upgrade status - return promise_utils.retry(max_retries, upgrade_retry_delay, () => system_store.load() - .then(() => { - dbg.log0('UPGRADE:', 'wating for', ip, 'to reach', stage_req); - let stage = cutil.get_member_upgrade_stage(ip); - dbg.log0('UPGRADE:', 'got stage:', stage); - if (stage !== stage_req) { - dbg.error('UPGRADE:', 'timedout waiting for ' + ip + ' to reach ' + stage_req); - // if (stage === 'FAILED') max_retries = 0; - throw new Error('timedout waiting for ' + ip + ' to reach ' + stage_req); - } - }) - ); -} - -function _upload_package(pkg_path, ip) { - var formData = { - upgrade_file: { - value: fs.createReadStream(pkg_path), - options: { - filename: 'noobaa.tar.gz', - contentType: 'application/x-gzip' - } - } - }; - let target = url.format({ - protocol: 'http', - slashes: true, - hostname: ip, - port: process.env.PORT, - pathname: 'upload_package' - }); - return P.ninvoke(request, 'post', { - url: target, - formData: formData, - rejectUnauthorized: false, - }) - .then((httpResponse, body) => { - console.log('Upload package successful:', body); - }); -} - -function _get_upgrade_path() { - let server = system_store.get_local_cluster_info(); - // if (server.upgrade.status !== 'CAN_UPGRADE') { - // throw new Error('Not in upgrade state:', server.upgrade.error ? server.upgrade.error : ''); - // } - if (server.upgrade.path === '') { - throw new Error('No package path supplied'); - } - return server.upgrade.path; -} - -// EXPORTS -exports.member_pre_upgrade = member_pre_upgrade; -exports.do_upgrade = do_upgrade; -exports.upgrade_cluster = upgrade_cluster; -exports.get_upgrade_status = get_upgrade_status; -exports.cluster_pre_upgrade = cluster_pre_upgrade; -exports.reset_upgrade_package_status = reset_upgrade_package_status; diff --git a/src/server/utils/clustering_utils.js b/src/server/utils/clustering_utils.js index 8e68d04d81..e0a1ff1a1c 100644 --- a/src/server/utils/clustering_utils.js +++ b/src/server/utils/clustering_utils.js @@ -189,7 +189,6 @@ function get_cluster_info(rs_status) { storage: storage, cpus: cpus, location: location, - upgrade: _.omit(cinfo.upgrade, 'stage_changed_date'), debug: _.omitBy({ level: cinfo.debug_level, time_left: debug_time @@ -286,41 +285,7 @@ function get_potential_masters() { return masters; } -function get_member_upgrade_status(ip) { - dbg.log0('UPGRADE:', 'get upgrade status for ip', ip); - let server_entry = system_store.data.clusters.find(server => server.owner_address === ip); - dbg.log0('UPGRADE:', 'found server:', server_entry); - if (!server_entry || !server_entry.upgrade) return 'NOT_READY'; - return server_entry.upgrade.status; -} - -function get_member_upgrade_stage(ip) { - dbg.log0('UPGRADE:', 'get upgrade stage for ip', ip); - let server_entry = system_store.data.clusters.find(server => server.owner_address === ip); - dbg.log0('UPGRADE:', 'found server:', server_entry); - if (!server_entry || !server_entry.upgrade) return; - return server_entry.upgrade.stage; -} - -function can_upload_package_in_cluster() { - const NOT_ALLOW_TO_UPLOAD_IN_MODES = [ - 'PENDING', - 'UPGRADING', - 'PRE_UPGRADE_PENDING', - 'PRE_UPGRADE_READY' - ]; - if (_.some(system_store.data.clusters, server => _.includes(NOT_ALLOW_TO_UPLOAD_IN_MODES, server.status))) { - return false; - } - return true; -} - - -function get_local_upgrade_path() { - let local_info = system_store.get_local_cluster_info(); - return local_info && local_info.upgrade; -} function send_master_update(is_master, master_address) { let system = system_store.data.systems[0]; @@ -381,12 +346,8 @@ exports.pretty_topology = pretty_topology; exports.rs_array_changes = rs_array_changes; exports.find_shard_index = find_shard_index; exports.get_cluster_info = get_cluster_info; -exports.get_member_upgrade_status = get_member_upgrade_status; exports.get_potential_masters = get_potential_masters; exports.send_master_update = send_master_update; exports.get_min_requirements = get_min_requirements; -exports.get_local_upgrade_path = get_local_upgrade_path; -exports.get_member_upgrade_stage = get_member_upgrade_stage; -exports.can_upload_package_in_cluster = can_upload_package_in_cluster; exports.check_if_clusterized = check_if_clusterized; exports.check_if_master = check_if_master; diff --git a/src/server/web_server.js b/src/server/web_server.js index c824e16585..3b59cece72 100755 --- a/src/server/web_server.js +++ b/src/server/web_server.js @@ -34,7 +34,6 @@ const SupervisorCtl = require('./utils/supervisor_ctrl'); const cutil = require('./utils/clustering_utils'); const system_server = require('./system_services/system_server'); const account_server = require('./system_services/account_server'); -const auth_server = require('./common_services/auth_server'); const addr_utils = require('../util/addr_utils'); const kube_utils = require('../util/kube_utils'); @@ -65,8 +64,6 @@ const https_port = process.env.SSL_PORT || 5443; process.env.PORT = http_port; process.env.SSL_PORT = https_port; -let webserver_started = 0; - system_store.once('load', async () => { await account_server.ensure_support_account(); if (process.env.CREATE_SYS_NAME && process.env.CREATE_SYS_EMAIL && @@ -94,10 +91,6 @@ async function start_web_server() { const https_server = https.createServer({ ...ssl_cert, honorCipherOrder: true }, app); server_rpc.rpc.register_ws_transport(https_server); await P.ninvoke(https_server, 'listen', https_port); - - dbg.log('Web Server Started, ports: http', http_port, 'https', https_port); - webserver_started = Date.now(); - } catch (err) { dbg.error('Web Server FAILED TO START', err.stack || err); process.exit(1); @@ -188,97 +181,6 @@ app.post('/upload_certificate', } ); -app.post('/upload_package', function(req, res, next) { - if (!system_store.is_finished_initial_load) res.status(503).end(); - const system = system_store.data.systems[0]; - if (!system) { - dbg.log0(`/upload_package without system returning error`); - res.status(503).end(); - } - - const curr_server = system_store.get_local_cluster_info(); - dbg.log0('/upload_package returning', system); - const NOT_ALLOW_TO_UPLOAD_IN_MODES = [ - 'PENDING', - 'UPGRADING', - 'PRE_UPGRADE_PENDING', - 'PRE_UPGRADE_READY' - ]; - if (_.includes(NOT_ALLOW_TO_UPLOAD_IN_MODES, curr_server.upgrade.status)) { - res.status(503).end(); - } - next(); - }, - multer({ - storage: multer.diskStorage({ - destination: function(req, file, cb) { - cb(null, '/tmp'); - }, - filename: function(req, file, cb) { - dbg.log0('UPGRADE upload', file); - cb(null, 'nb_upgrade_' + Date.now() + '_' + file.originalname); - } - }) - }) - .single('upgrade_file'), - function(req, res) { - var upgrade_file = req.file; - server_rpc.client.upgrade.member_pre_upgrade({ - filepath: upgrade_file.path, - mongo_upgrade: false, - stage: 'UPLOAD_STAGE' - }); //Async - res.end('Upgrade file uploaded successfully'); - }); - -app.post('/upgrade', function(req, res, next) { - if (!system_store.is_finished_initial_load) res.status(503).end(); - const system = system_store.data.systems[0]; - if (!system) { - dbg.log0(`/upgrade without system returning error`); - res.status(503).end(); - } - - const can_upload = cutil.can_upload_package_in_cluster(); - dbg.log0('/upgrade returning', system, can_upload); - - if (!can_upload) { - res.status(503).end(); - } - req.system = system; - next(); - }, - function(req, res, next) { - server_rpc.client.upgrade.reset_upgrade_package_status({}, { - address: 'http://127.0.0.1:' + http_port, - auth_token: auth_server.make_auth_token({ - system_id: req.system._id, - role: 'admin', - account_id: req.system.owner._id, - }) - }); - next(); - }, - multer({ - storage: multer.diskStorage({ - destination: function(req, file, cb) { - cb(null, '/tmp'); - }, - filename: function(req, file, cb) { - dbg.log0('UPGRADE upload', file); - cb(null, 'nb_upgrade_' + Date.now() + '_' + file.originalname); - } - }) - }) - .single('upgrade_file'), - function(req, res) { - var upgrade_file = req.file; - server_rpc.client.upgrade.cluster_pre_upgrade({ - filepath: upgrade_file.path, - }); //Async - res.end('Upgrade file uploaded successfully'); - }); - if (prom_reports.instance().enabled()) { app.get('/metrics', function(req, res) { @@ -353,62 +255,17 @@ app.get('/get_log_level', function(req, res) { }); }); -function getVersion(route) { - return P.resolve() - .then(() => { - const registered = server_rpc.is_service_registered('system_api.read_system'); - let started; - if (system_store.data.systems.length === 0 || cutil.check_if_clusterized()) { - // if no system or not clusterized then no need to wait - started = true; - } else { - // if in a cluster then after upgrade the user should be redirected to the new master - // give the new master 10 seconds to start completely before ending the upgrade - const WEBSERVER_START_TIME = 10 * 1000; - started = webserver_started && (Date.now() - webserver_started) > WEBSERVER_START_TIME; - } - - return (server_rpc.client.upgrade.get_upgrade_status()) - .then(status => { - if (started && registered && !status.in_process && system_store.is_finished_initial_load) { - const system = system_store.data.systems[0]; - if (!system) { - dbg.log0(`${route} without system returning ${pkg.version}, service registered and upgrade is not in progress`); - return { - status: 200, - version: pkg.version, - }; - } - return server_rpc.client.system.read_system({}, { - address: 'ws://127.0.0.1:' + http_port, - auth_token: auth_server.make_auth_token({ - system_id: system._id, - role: 'admin', - account_id: system.owner._id, - }) - }) - .then(sys => { - dbg.log0(`${route} returning ${pkg.version}, service registered and upgrade is not in progress`); - return { - status: 200, - version: pkg.version - }; - }) - .catch(err => { - dbg.log0(`${route} caught ${err} on read system, returning 503`); - return { status: 503 }; - }); - } else { - dbg.log0(`${route} returning 404, started(${started}), service_registered(${registered})`, - `, status.in_process(${status.in_process}), system_store.is_finished_initial_load(${system_store.is_finished_initial_load})`); - return { status: 404 }; - } - }) - .catch(err => { - dbg.error(`got error when checking upgrade status. returning 503`, err); - return { status: 503 }; - }); - }); +async function getVersion(route) { + const registered = server_rpc.is_service_registered('system_api.read_system'); + if (registered && system_store.is_finished_initial_load) { + return { + status: 200, + version: pkg.version + }; + } else { + dbg.log0(`${route} returning 404, service_registered(${registered}), system_store loaded(${system_store.is_finished_initial_load})`); + return { status: 404 }; + } } // An oauth authorize endpoint that forwards to the OAuth authorization server. diff --git a/src/test/framework/flow.js b/src/test/framework/flow.js deleted file mode 100644 index 453b290d3a..0000000000 --- a/src/test/framework/flow.js +++ /dev/null @@ -1,156 +0,0 @@ -/* Copyright (C) 2016 NooBaa */ -'use strict'; - -var steps = [ - //Upgrade moved externally to be run from the jenkins prior to the framework run - { - //Restore DB to defaults, upgrade tests fills the system with all kinds of configurations - name: 'Restore DB Defaults', - common: 'restore_db_defaults', - }, { - name: 'Clean Server for Run', - common: 'clean_server_for_run', - }, { - //Run FE Unit tests - ignore_failure: true, - name: 'FE Unit Tests', - action: 'gulp', - params: [{ - arg: '--cwd' - }, { - arg: 'frontend' - }, { - arg: 'test' - }], - }, { - //Run BE Unit tests - blocking: true, - name: 'BE Unit Tests', - action: 'npm', - params: [{ - arg: 'run', - }, { - arg: 'mocha', // TODO: restore mocha:coverage - // arg: 'mocha:coverage', - }], - env: { - npm_package_config_covdir: './report/cov/mocha', - PATH: process.env.PATH, - DEV_MODE: 'true' - }, - }, { - //Restore DB to defaults - name: 'Restore DB Defaults', - common: 'restore_db_defaults', - }, { - //Test Data Placement according to policy - name: 'Data Placement Test', - action: 'node', - params: [{ - arg: './src/test/system_tests/test_bucket_placement' - }], - }, { - //Restore DB to defaults - name: 'Restore DB Defaults', - common: 'restore_db_defaults', - }, { - //Test Data Rebuild and Eviction - name: 'Rebuild and Eviction Test', - action: 'node', - params: [{ - arg: './src/test/system_tests/test_build_chunks' - }], - }, { - //Restore DB to defaults - name: 'Restore DB Defaults', - common: 'restore_db_defaults', - }, { - name: 'Node Failure Test', - action: 'node', - params: [{ - arg: './src/test/system_tests/test_node_failure' - }], - }, { - //Restore DB to defaults - name: 'Restore DB Defaults', - common: 'restore_db_defaults', - }, { - name: 'Bucket Access Test', - action: 'node', - params: [{ - arg: './src/test/system_tests/test_bucket_access', - }], - }, { - //Restore DB to defaults - name: 'Restore DB Defaults', - common: 'restore_db_defaults', - }, { - //Test Ceph S3 - name: 'Ceph S3 Test', - action: 'node', - params: [{ - arg: './src/test/system_tests/test_ceph_s3' - }], - }, { - //Restore DB to defaults - name: 'Restore DB Defaults', - common: 'restore_db_defaults', - }, { - //Test Cloud Pools - action: 'node', - name: 'Cloud Pools Test', - ignore_failure: true, - params: [{ - arg: './src/test/system_tests/test_cloud_pools' - }], - }, { - //Restore DB to defaults - name: 'Restore DB Defaults', - common: 'restore_db_defaults', - }, { - //Bucket Lambda Triggers Test - name: 'Bucket Lambda Triggers Test', - action: 'node', - params: [{ - arg: './src/test/system_tests/test_bucket_lambda_triggers' - }], - }, { - //Restore DB to defaults - name: 'Restore DB Defaults', - common: 'restore_db_defaults', - }, { - //Bucket Lambda Triggers Test - name: 'Blob API Test', - action: 'node', - params: [{ - arg: './src/test/system_tests/test_blob_api' - }], - }, { - //Restore DB to defaults - name: 'Restore DB Defaults', - common: 'restore_db_defaults', - }, { - //Test MD Aggregator - name: 'MD Aggregator Test', - action: 'node', - params: [{ - arg: './src/test/system_tests/test_md_aggregator' - }], - } -]; - -module.exports = steps; - -/*Example Step: -{ - name: 'Some Name', - action: 'node src/some_file.js' OR common 'function name' OR lib_test '/src/sometest' - params: if exists, array of args - [ - arg: 'value', - input_arg: argument name of arg which was sent to the runner - ] - blocking: if true, will stop execution on failure of this step - ignore_failure: if true the test will only report fail\pass without failing the run -} -*/ diff --git a/src/test/framework/prepare_and_run_tests.sh b/src/test/framework/prepare_and_run_tests.sh deleted file mode 100755 index f2e6db4698..0000000000 --- a/src/test/framework/prepare_and_run_tests.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -source /root/.bashrc -cd /root/node_modules/noobaa-core -echo "AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" >> /data/.env -echo "AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY" >> /data/.env -echo 'ENDPOINT_BLOB_ENABLED=true' >> /data/.env -echo 'DEV_MODE=true' >> /data/.env -echo "AZURE_STORAGE_CONNECTION_STRING=$AZURE_STORAGE_CONNECTION_STRING" >> /data/.env -echo "TEST_RUN_NAME=$TEST_RUN_NAME" >> /data/.env - -# install dependencies -yum install -y git -npm install \ - gulp \ - mocha \ - istanbul-reports \ - istanbul-lib-hook \ - istanbul-lib-report \ - istanbul-lib-coverage \ - istanbul-lib-instrument - -/usr/local/bin/node src/test/framework/runner.js --GIT_VERSION 1 -rc=$? -if [ ${rc} -ne 0 ] -then - /usr/local/bin/node src/test/framework/send_logs.js - exit $rc -fi \ No newline at end of file diff --git a/src/test/framework/remote_runner.js b/src/test/framework/remote_runner.js deleted file mode 100644 index 7bb9c490a9..0000000000 --- a/src/test/framework/remote_runner.js +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (C) 2016 NooBaa */ -'use strict'; - -const argv = require('minimist')(process.argv, { string: ['server_secret'] }); - -require('../../util/dotenv').load(); -const ssh = require('../utils/ssh_functions'); - -const { - server_name, - server_ip, - server_secret -} = argv; - - -const ENV_VARS = [ - `AWS_ACCESS_KEY_ID=${process.env.AWS_ACCESS_KEY_ID}`, - `AWS_SECRET_ACCESS_KEY=${process.env.AWS_SECRET_ACCESS_KEY}`, - `AZURE_STORAGE_CONNECTION_STRING='${process.env.AZURE_STORAGE_CONNECTION_STRING}'`, - `TEST_RUN_NAME='${process.env.TEST_RUN_NAME}'` -]; - -function main() { - console.log(`running runner tests on ${server_name}, IP ${server_ip} Secret ${server_secret}`); - let ssh_client; - return ssh.ssh_connect({ - host: server_ip, - username: 'noobaaroot', - password: server_secret, - keepaliveInterval: 5000, - }) - .then(client => { - ssh_client = client; - const command = `sudo bash -c "${ENV_VARS.join(' ')} /root/node_modules/noobaa-core/src/test/framework/prepare_and_run_tests.sh"`; - console.log(`executing command on remote server (${server_ip}): ${command}`); - return ssh.ssh_exec(ssh_client, command); - }) - .then(() => { - console.log('SUCCESSFUL TESTS'); - process.exit(0); - }, - err => { - console.error('Remote Runner FAILED:', err.message); - process.exit(1); - } - ); -} - - - -if (require.main === module) { - main(); -} diff --git a/src/test/framework/runner.js b/src/test/framework/runner.js deleted file mode 100644 index 98f70c65c3..0000000000 --- a/src/test/framework/runner.js +++ /dev/null @@ -1,490 +0,0 @@ -/* Copyright (C) 2016 NooBaa */ -'use strict'; - -require('../../util/dotenv').load(); - -const _ = require('lodash'); -const fs = require('fs'); -const path = require('path'); -const argv = require('minimist')(process.argv); -const request = require('request'); - -const P = require('../../util/promise'); -const api = require('../../api'); -const dbg = require('../../util/debug_module')(__filename); -const ops = require('../utils/basic_server_ops'); -const fs_utils = require('../../util/fs_utils'); -const promise_utils = require('../../util/promise_utils'); -const { CoverageReport } = require('../../util/coverage_utils'); - -const COVERAGE_DIR = './report/cov'; -const REPORT_PATH = COVERAGE_DIR + '/regression_report.log'; -const DEFAULT_TEST_TIMEOUT = 20 * 60 * 1000; // 20 minutes timeout for a test - -const IS_MAC = process.platform === 'darwin'; - -class TestRunner { - constructor(args) { - this._version = args.GIT_VERSION; - this.server_ip = args.server_ip || '127.0.0.1'; - this._argv = args; - this._error = false; - this.tests_results = []; - if (args.FLOW_FILE) { - this._steps = require(args.FLOW_FILE); // eslint-disable-line global-require - } else { - this._steps = require(process.cwd() + '/src/test/framework/flow.js'); // eslint-disable-line global-require - } - } - - /************************** - * Common Functionality - **************************/ - wait_for_server_to_start(max_seconds_to_wait, port) { - const self = this; - var isNotListening = true; - var MAX_RETRIES = max_seconds_to_wait; - var wait_counter = 1; - //wait up to 10 seconds - console.log('waiting for server to start (1)'); - return promise_utils.pwhile(function() { - return isNotListening; - }, function() { - return P.ninvoke(request, 'get', { - url: `http://${self.server_ip}:` + port, - rejectUnauthorized: false, - }) - .then(function() { - console.log('server started after ' + wait_counter + ' seconds'); - isNotListening = false; - }) - .catch(function(err) { - console.log('waiting for server to start(2)'); - wait_counter += 1; - if (wait_counter >= MAX_RETRIES) { - console.error('Too many retries after restart server', err); - throw new Error('Too many retries'); - } - return P.delay(1000); - }); - //one more delay for reconnection of other processes - }) - .delay(2000) - .return(); - } - - restore_db_defaults() { - var self = this; - return promise_utils.exec('mongo nbcore /root/node_modules/noobaa-core/src/test/system_tests/mongodb_defaults.js') - .catch(function(err) { - console.log('failed on mongodb_defaults', err); - throw new Error('Failed pn mongodb reset'); - }) - .then(function() { - return promise_utils.exec('supervisorctl restart webserver bg_workers s3rver hosted_agents'); - }) - .then(function() { - return self.wait_for_server_to_start(30, 8080); - }) - .then(function() { - return self.wait_for_server_to_start(30, 6001); - }) - .delay(5000) //Workaround for agents sending HBs and re-registering to the server - .catch(function(err) { - console.log('Failed restarting webserver', err); - throw new Error('Failed restarting webserver'); - }); - } - - clean_server_for_run() { - if (IS_MAC) return; - const logs_regexp = /noobaa\.log\...\.gz/; - const logs_path = '/log/'; - return P.resolve(fs.readdirAsync(logs_path)) - .then(files => { - const num_files = files.length; - let i = 0; - return promise_utils.pwhile(function() { - return i < num_files; - }, function() { - i += 1; - if (logs_regexp.test(files[i - 1])) { - return fs.unlinkAsync(path.join(logs_path, files[i - 1])); - } - return P.resolve(); - }); - }); - } - - /************************** - * Flow Control - **************************/ - init_run() { - var self = this; - //Clean previous run results - console.log('Clearing previous test run results'); - return P.resolve() - .then(() => fs_utils.create_fresh_path(COVERAGE_DIR) - .catch(function(err) { - console.error('Failed cleaning ', COVERAGE_DIR, 'from previous run results', err); - throw new Error('Failed cleaning dir'); - })) - .then(() => promise_utils.exec('rm -rf /root/node_modules/noobaa-core/coverage/*') - .catch(function(err) { - console.error('Failed cleaning istanbul data from previous run results', err); - throw new Error('Failed cleaning istanbul data'); - })) - .then(() => self._restart_services(false)) // TODO: restore to 'true' to run coverage - .then(function() { - fs.appendFileSync(REPORT_PATH, 'Init Test Run for version ' + self._version + '\n'); - }); - } - - complete_run() { - //Take coverage output and report and pack them - var self = this; - var dst = '/tmp/res_' + this._version + '.tgz'; - // return this._write_coverage() // TODO: restore _write_coverage - return P.resolve() - .catch(function(err) { - console.error('Failed writing coverage for test runs', err); - throw new Error('Failed writing coverage for test runs'); - }) - .then(function() { - return promise_utils.exec('tar --warning=no-file-changed -zcvf ' + dst + ' ' + COVERAGE_DIR + '/*'); - }) - .catch(function(err) { - console.error('Failed archiving test runs', err); - throw new Error('Failed archiving test runs'); - }) - .then(function() { - return self._restart_services(false); - }) - .then(function() { - return self.wait_for_server_to_start(30, 6001); - }) - .then(function() { - return self.wait_for_server_to_start(30, 8080); - }) - .then(function() { - console.log('Uploading results file'); - //Save package on current NooBaa system - return ops.upload_file(self.server_ip, dst, 'first.bucket', 'report_' + self._version + '.tgz'); - }) - .catch(function(err) { - console.log('Failed restarting webserver', err); - throw new Error('Failed restarting webserver'); - }); - } - - run_tests() { - var self = this; - return P.each(self._steps, function(current_step) { - return P.resolve(self._print_current_step(current_step)) - .then(function(step_res) { - return P.resolve(self._run_current_step(current_step, step_res)); - }) - .then(function(step_res) { - fs.appendFileSync(REPORT_PATH, step_res + '\n'); - - }) - .catch(function(error) { - fs.appendFileSync(REPORT_PATH, 'Stopping tests with error ' + error + ' ' + error.stace + ' ' + error.message); - throw new Error(error); - }); - }) - .then(function() { - console.log('All steps done'); - fs.appendFileSync(REPORT_PATH, 'All steps done\n'); - - }) - .catch(function(error) { - fs.appendFileSync(REPORT_PATH, 'Stopping tests with error\n' + error); - throw new Error(error); - }); - } - - print_conclusion() { - for (const res of this.tests_results) { - if (res.success) { - console.log(`===PASSED=== ${res.name} passed`); - } else if (res.ignored) { - console.warn(`===FAILED-IGNORED=== ${res.name} failed - Result ignored`); - } else { - console.error(`===FAILED=== ${res.name} failed!`); - } - } - } - - _print_current_step(current_step) { - var step_res; - var title; - return P.fcall(function() { - if (current_step.common) { - title = 'Performing ' + current_step.name; - step_res = current_step.name; - } else if (current_step.action) { - title = 'Running Action ' + current_step.name; - step_res = current_step.name; - } else if (current_step.lib_test) { - title = 'Running Library Test ' + current_step.name; - step_res = current_step.name; - } else { - title = 'Running Unamed ' + current_step.action; - step_res = current_step.action; - } - fs.appendFileSync(REPORT_PATH, title + '\n'); - return step_res; - }); - } - - _run_current_step(current_step, step_res) { - var self = this; - if (!current_step.action && - !current_step.common && - !current_step.lib_test) { - step_res = ' No Action Defined!!!'; - return; - } - if (current_step.common) { - console.log('running', step_res); - dbg.set_log_to_file(path.join(process.cwd(), COVERAGE_DIR, current_step.name.replace(/ /g, '_')) + '.log'); - var ts = new Date(); - return P.resolve() - .then(() => self[current_step.common].apply(self)) - .timeout(current_step.timeout || DEFAULT_TEST_TIMEOUT) - .finally(() => dbg.set_log_to_file()) - .then(function() { - return step_res + ' - Successful common step ( took ' + - ((new Date() - ts) / 1000) + 's )'; - //return step_res; - }) - .catch(function(err) { - console.error('Failure while running ' + step_res + ' with error ' + err); - throw new Error(err); - }); - } else { - console.log('--------------------------------- ' + step_res + ' ---------------------------------'); - if (current_step.action) { - return self._run_action(current_step, step_res); - } else if (current_step.lib_test) { - return self._run_lib_test(current_step, step_res); - } else { - throw new Error('Undefined step'); - } - } - } - - _run_action(current_step, step_res) { - var ts = new Date(); - //Build execution context from action and arguments - var command = current_step.action; - var args = _.compact(_.map(current_step.params, p => { - if (p.arg) { - return p.arg; - } else if (p.input_arg) { - if (this._argv[p.input_arg]) { - return this._argv[p.input_arg]; - } else { - fs.appendFileSync(REPORT_PATH, 'No argument received for ' + p.input_args + '\n'); - } - } - })); - var options = _.pick(current_step, 'env'); - return promise_utils.spawn(command, args, options, false, false, current_step.timeout || DEFAULT_TEST_TIMEOUT) - .then(res => { - this.tests_results.push({ name: current_step.name, success: true }); - step_res = ' ' + step_res + ' - Successful running action ( took ' + - ((new Date() - ts) / 1000) + 's )'; - console.log(step_res); - return step_res; - }) - .catch(err => { - const result = { name: current_step.name, success: false, ignored: true }; - if (!current_step.ignore_failure) { - this._error = true; - result.ignored = false; - } - this.tests_results.push(result); - if (current_step.blocking) { - fs.appendFileSync(REPORT_PATH, step_res + ' ' + err + '\n'); - throw new Error('Blocking action failed'); - } else { - step_res = '------------------------------\n' + - ' ' + step_res + ' - Failed with \n' + - err + - '\n------------------------------ ' + - '( took ' + ((new Date() - ts) / 1000) + 's )'; - } - console.error(step_res, 'Failed action with', err.message); - return step_res; - }); - } - - _run_lib_test(current_step, step_res) { - var ts = new Date(); - // Used in order to log inside a file instead of console prints - var test = require(process.cwd() + current_step.lib_test); // eslint-disable-line global-require - return P.resolve(test.run_test()) - .timeout(current_step.timeout || DEFAULT_TEST_TIMEOUT) - .then(res => { - this.tests_results.push({ name: current_step.name, success: true }); - step_res = ' ' + step_res + ' - Successful ( took ' + - ((new Date() - ts) / 1000) + 's )'; - console.log(step_res); - return step_res; - }) - .catch(res => { - const result = { name: current_step.name, success: false, ignored: true }; - if (!current_step.ignore_failure) { - this._error = true; - result.ignored = false; - } - this.tests_results.push(result); - if (current_step.blocking) { - fs.appendFileSync(REPORT_PATH, step_res + ' ' + res + '\n'); - throw new Error('Blocking libtest failed'); - } else { - step_res = ' ' + step_res + ' - Failed with \n' + - '------------------------------\n' + - res + - '------------------------------ ' + - '( took ' + ((new Date() - ts) / 1000) + 's )'; - } - console.error('Failed lib test with', res); - return step_res; - }); - } - - _write_coverage() { - const report = new CoverageReport(); - //Get all collectors data - const rpc = api.new_rpc(); - const client = rpc.new_client(); - return client.create_auth_token({ - email: 'demo@noobaa.com', - password: 'DeMo1', - system: 'demo' - }) - .then(() => client.redirector.publish_to_cluster({ - method_api: 'debug_api', - method_name: 'get_coverage_data', - target: '' - }, { - auth_token: client.options.auth_token, - })) - .then(function(res) { - - // Add all received data to the collector - _.each(res.redirect_reply.aggregated, function(r) { - if (r.coverage_data) { - report.add_data(r.coverage_data); - } else { - console.warn('no coverage_data'); - } - }); - - // Add unit test coverage data if exists (on failure of unit test, does not exist) - const unit_test_coverage_file = COVERAGE_DIR + '/mocha/coverage-final.json'; - if (fs.existsSync(unit_test_coverage_file)) { - const coverage_data = JSON.parse(fs.readFileSync(unit_test_coverage_file, 'utf8')); - report.add_data(coverage_data); - } else { - console.warn('No unit test coverage_data'); - } - - report.write({ - report_dir: COVERAGE_DIR + '/istanbul', - report_type: 'lcov', - }); - - console.log('done writing coverage report'); - }) - .catch(function(err) { - console.warn('Error on _write_coverage', err, err.stack); - throw err; - }); - } - - async _restart_services(test_run) { - if (IS_MAC) return; - console.log('Restarting services with TESTRUN arg to', test_run); - var command; - if (test_run) { - command = "sed -i 's/\\(.*web_server.js\\)/\\1 --TESTRUN/' /data/noobaa_supervisor.conf "; - command += " ; sed -i 's/\\(.*bg_workers.js\\)/\\1 --TESTRUN/' /data/noobaa_supervisor.conf "; - } else { - command = "sed -i 's/\\(.*web_server.js\\).*--TESTRUN/\\1/' /data/noobaa_supervisor.conf "; - command += " ; sed -i 's/\\(.*bg_workers.js\\).*--TESTRUN/\\1/' /data/noobaa_supervisor.conf "; - } - await promise_utils.exec(command); - let retries = 0; - const MAX_RETRIES = 3; - while (retries < MAX_RETRIES) { - try { - await promise_utils.exec('supervisorctl update'); - await P.delay(5000); - await promise_utils.exec('supervisorctl restart webserver bg_workers'); - retries = MAX_RETRIES; - } catch (err) { - retries += 1; - if (retries < MAX_RETRIES) { - console.error('failed restarting services. retry..', err); - await P.delay(5000); - } else { - console.error(`failed restarting services for ${MAX_RETRIES} retries. aborting`, err); - throw err; - } - } - } - await P.delay(5000); - } -} - - -module.exports = TestRunner; - -function main() { - if (!argv.GIT_VERSION) { - console.error('Must supply git version (--GIT_VERSION)'); - process.exit(1); - } - var run = new TestRunner(argv); - return P.resolve(run.init_run()) - .catch(function(error) { - console.error('Init run failed, stopping tests', error); - run._restart_services(false); - process.exit(2); - }) - .then(function() { - console.log('Running tests'); - return run.run_tests(); - }) - .catch(function(error) { - console.error('run tests failed', error); - run._restart_services(false); - process.exit(3); - }) - .then(function() { - console.log('Finalizing run results'); - return run.complete_run(); - }) - .catch(function(error) { - console.error('Complete run failed', error); - run._restart_services(false); - process.exit(4); - }) - .finally(() => { - run.print_conclusion(); - if (run._error) { - run._restart_services(false); - process.exit(1); - } else { - process.exit(0); - } - }); -} - -if (require.main === module) { - main(); -} diff --git a/src/test/framework/test_env_builder.js b/src/test/framework/test_env_builder.js deleted file mode 100644 index 1ffcc973b4..0000000000 --- a/src/test/framework/test_env_builder.js +++ /dev/null @@ -1,495 +0,0 @@ -/* Copyright (C) 2016 NooBaa */ -'use strict'; - -const argv = require('minimist')(process.argv, { string: ['server_secret'] }); -const dbg = require('../../util/debug_module')(__filename); - -const fs = require('fs'); -const P = require('../../util/promise'); -const promise_utils = require('../../util/promise_utils'); -const agent_functions = require('../utils/agent_functions'); -const server_functions = require('../utils/server_functions'); -const AzureFunctions = require('../../deploy/azureFunctions'); -const sanity_build_test = require('../system_tests/sanity_build_test'); -const ssh = require('../utils/ssh_functions'); - - -const version_map = 'src/deploy/version_map.json'; - -// Environment Setup -var clientId = process.env.CLIENT_ID; -var domain = process.env.DOMAIN; -var secret = process.env.APPLICATION_SECRET; -var subscriptionId = process.env.AZURE_SUBSCRIPTION_ID; - -const oses = agent_functions.supported_oses(); - -const { - resource, - storage, - vnet, - name, - id = 0, - location = 'westus2', - clean_only, - clean_by_id, - cleanup, - upgrade, - rerun_upgrade, - server_ip, - server_secret, - js_script, - shell_script, - container, - skip_agent_creation = false, - skip_server_creation = false, - skip_configuration = false, - create_lg = false, - lg_ip, - server_external_ip = false, - num_agents = oses.length, - vm_size = 'B', - agents_disk_size = 'default', - random_base_version = false, - min_version = '2.1', - pool_name = 'first.pool', -} = argv; - -let { - min_required_agents = 7, -} = argv; - -dbg.set_process_name('test_env_builder'); - -const agents = create_agents_plan(); -const server = { name: name + '-' + id, ip: server_ip, secret: server_secret }; -const lg = { name: 'loadGenerator-' + name + '-' + id, ip: lg_ip }; -const created_agents = []; -let vmSize; - -const azf = new AzureFunctions(clientId, domain, secret, subscriptionId, resource, location); - -function exit_on_error(msg, err) { - console.error(msg, err); - process.exit(1); -} - -async function main() { - let exit_code = 0; - if (argv.help) { - print_usage(); - process.exit(); - } - await azf.authenticate(); - - if (clean_only) { - try { - await clean_test_env(); - process.exit(0); - } catch (err) { - exit_on_error('got error on cleanup (clean only):', err); - } - } - - try { - await P.all([ - prepare_server(), - prepare_agents(), - prepare_lg() - ]); - - await install_agents(); - } catch (err) { - exit_on_error('failed to prepare system for tests:', err); - } - - try { - await upgrade_test_env(); - await run_tests(); - } catch (err) { - console.error('failed running tests:', err); - exit_code = 1; - } - - if (cleanup) { - try { - await clean_test_env(); - } catch (err) { - console.error('failed cleaning environment'); - } - } - - process.exit(exit_code); -} - -async function get_random_base_version() { - let will_retry = true; - let version; - const buf = await fs.readFileAsync(version_map); - const ver_map = JSON.parse(buf.toString()); - if (ver_map.versions.length > 1) { - while (will_retry) { - version = ver_map.versions[Math.floor((Math.random() * ver_map.versions.length))]; - if (version.ver.split('.')[0] >= String(min_version).split('.')[0]) { - if (version.ver.split('.')[1] >= String(min_version).split('.')[1]) { - will_retry = false; - } - } - } - } else { - version = ver_map.versions[0]; - } - return version.vhd; -} - -//this function is getting servers array creating and upgrading them. -async function prepare_server() { - if (skip_server_creation) { - console.log('skipping server creation'); - if (!server_ip || !server_secret) { - console.error('Cannot skip server creation without ip and secret supplied, please use --server_ip and --server_secret'); - throw new Error('Failed using existing server'); - } - return; - } - console.log(`prepare_server: creating server ${server.name}`); - console.log(`NooBaa server vmSize is: ${vmSize}`); - server.version = 'latest'; - let createServerParams = { - serverName: server.name, - vnet, - storage, - vmSize, - latestRelease: true, - createSystem: true, - createPools: [pool_name] - }; - if (random_base_version) { - createServerParams.imagename = await get_random_base_version(); - server.version = createServerParams.imagename.replace('.vhd', ''); - } - try { - server.secret = await azf.createServer(createServerParams); - if (server_external_ip) { - server.ip = await azf.getIpAddress(server.name + '_pip'); - } else { - server.ip = await azf.getPrivateIpAddress(`${server.name}_nic`, `${server.name}_ip`); - } - console.log(`server_info is`, server); - - // for docker\podman we need to copy the upgrade package and build docker image from it - if (container) { - await prepare_container_env(); - } - } catch (err) { - console.error(`prepare_server failed. server name: ${server.name}`, err); - throw err; - } -} - - - -async function prepare_container_env() { - console.log(`creating ssh connection to ${server.ip} with secret ${server.secret}`); - const ssh_client = await ssh.ssh_connect({ - host: server.ip, - username: 'noobaaroot', - password: server.secret, - keepaliveInterval: 5000, - }); - await server_functions.enable_noobaa_login(server.ip, server.secret); - // copy package to remote server - console.log(`uploading package ${upgrade} to server ${server.ip}`); - await promise_utils.exec(`scp -o "StrictHostKeyChecking no" ${upgrade} noobaaroot@${server.ip}:/tmp/noobaa-NVA.tar.gz`); - ssh.ssh_exec(ssh_client, `sudo supervisorctl stop all`); - ssh.ssh_exec(ssh_client, `sudo tar -xzf /tmp/noobaa-NVA.tar.gz -C /root/node_modules/`); - ssh.ssh_exec(ssh_client, `sudo cp /tmp/noobaa-NVA.tar.gz /root/node_modules/noobaa-core/`); - ssh.ssh_exec(ssh_client, `sudo /root/node_modules/noobaa-core/src/test/framework/prepare_podman_env.sh`); -} - -async function prepare_agents() { - if (skip_agent_creation) { - console.log('skipping agents creation'); - return; - } - console.log(`starting the create agents stage`); - await P.map(agents, async agent => { - try { - const hasImage = azf.getImagesfromOSname(agent.os).hasImage; - if (hasImage) { - console.log('Creating new agent from an image'); - agent.ip = await azf.createAgentFromImage({ - vmName: agent.name, - storage, - vnet, - os: agent.os, - vmSize, - diskSizeGB: agents_disk_size, - server_ip: server_ip, - shouldInstall: false - }); - } else { - console.log('Creating new agent from the marketplace'); - agent.ip = await azf.createAgent({ - vmName: agent.name, - storage, - vnet, - diskSizeGB: agents_disk_size, - os: agent.os, - vmSize, - server_ip - }); - } - console.log(`agent created: ip ${agent.ip} name ${agent.name} of type ${agent.os}`); - agent.prepared = true; - created_agents.push(agent); - } catch (err) { - console.error(`Creating agent ${agent.name} VM failed`, err); - } - }); - if (created_agents.length < min_required_agents) { - console.error(`could not create the minimum number of required agents (${min_required_agents})`); - throw new Error(`could not create the minimum number of required agents (${min_required_agents})`); - } else { - console.log(`Created ${created_agents.length}`); - } -} - -function install_agents() { - if (skip_agent_creation) { - console.log('skipping agents installation'); - return P.resolve(); - } - let num_installed = 0; - console.log(`Starting to install ${created_agents.length} Agents`); - return agent_functions.getAgentConf(server.ip, [], pool_name) - .then(agent_conf => P.map(created_agents, agent => { - const os = azf.getImagesfromOSname(agent.os); - return P.resolve() - .then(() => { - if (os.hasImage) { - console.log(`installing agent ${agent.name} type ${agent.os} using ssh`); - return (agent_functions.getAgentConfInstallString(server.ip, os.osType, [], pool_name)) - .then(inst_string => agent_functions.runAgentCommandViaSsh( - agent.ip, - AzureFunctions.QA_USER_NAME, - AzureFunctions.ADMIN_PASSWORD, - inst_string, - os.osType)); - } else { - console.log(`installing agent ${agent.name} type ${agent.os} using extension`); - return azf.createAgentExtension({ - vmName: agent.name, - storage, - vnet, - ip: agent.ip, - agentConf: agent_conf, - serverIP: server.ip - }); - } - }) - .then( - () => { // successfully installed - num_installed += 1; - console.log(`Finished creating agent extension on ${agent.name}`); - }, - err => { // failed installation - console.error(`failed installing agent on ${agent.name}`, err); - }); - }) - .then(() => { - if (num_installed < min_required_agents) { - console.error(`could not install the minimum number of required agents (${min_required_agents})`); - throw new Error(`could not install the minimum number of required agents (${min_required_agents})`); - } - console.log('All required agents were installed'); - })); -} - -async function prepare_lg() { - if (create_lg) { - try { - await azf.authenticate(); - await azf.createLGFromImage({ - vmName: lg.name, - vnet: argv.vnet, - storage: argv.storage, - }); - if (server_external_ip) { - lg.ip = await azf.getIpAddress(lg.name + '_pip'); - } else { - lg.ip = await azf.getPrivateIpAddress(`${lg.name}_nic`, `${lg.name}_ip`); - } - console.log(`lg_info is: `, lg); - } catch (err) { - console.error(`prepare_lg failed. lg name: ${lg.name}`, err); - throw err; - } - } -} - -// upgrade server to the required version. -// currently using sanity_build_test.js script -async function upgrade_test_env() { - if (!upgrade || random_base_version) { - return; - } - console.log(`upgrading server with package ${upgrade}`); - try { - await sanity_build_test.run_test(server.ip, upgrade, false, skip_configuration); - } catch (err) { - console.error('upgrade_test_env failed', err); - throw err; - } - if (rerun_upgrade) { - console.log(`Got rerun_upgrade flag. running upgrade again from the new version to the same version (${upgrade})`); - try { - await sanity_build_test.run_test(server.ip, upgrade, true, true); /*skip configuration*/ - } catch (err) { - console.error(`Failed upgrading from the new version ${upgrade}`, err); - throw err; - } - } -} - -function run_tests() { - // disable all dbg.log output before running tests - dbg.set_console_output(false); - dbg.original_console(); - return P.resolve() - .then(() => { - if (js_script) { - console.log(`running js script ${js_script} on ${server.name}`); - return promise_utils.fork(js_script, [ - '--server_name', server.name, '--server_ip', server.ip, '--server_secret', server.secret, - '--lg_name', lg.name, '--lg_ip', lg.ip, '--version', server.version - ].concat(process.argv)) - .catch(err => { - console.log('Failed running script', err); - throw err; - }); - } else if (shell_script) { - console.log(`running bash script ${shell_script} on ${server.name}`); - return promise_utils.spawn(shell_script, [server.name, server.ip, server.secret]) - .catch(err => { - console.log('Failed running script', err); - throw err; - }); - } - }) - .finally(() => { - dbg.wrapper_console(); - dbg.set_console_output(true); - }); -} - -async function clean_test_env() { - if (clean_by_id) { - const vms_list = await azf.listVirtualMachines('', ''); - const vms_to_delete = vms_list.filter(vm => (vm.includes(id) && vm.includes(name))); - console.log(`deleting virtual machines`, vms_to_delete); - await P.map(vms_to_delete, vm => - azf.deleteVirtualMachine(vm) - .catch(err => console.error(`failed deleting ${vm} with error: `, err.message)) - ); - } else { - let vms_to_delete = [ - ...agents.map(agent => agent.name), - server.name.replace(/_/g, '-') - ]; - console.log(`deleting virtual machines`, vms_to_delete); - await P.map(vms_to_delete, vm => - azf.deleteVirtualMachine(vm) - .catch(err => console.error(`failed deleting ${vm} with error: `, err.message)) - ); - } -} - -function create_agents_plan() { - let plan = []; - let osname; - let curOs = 0; - if (skip_agent_creation) { - return plan; - } - for (let i = 0; i < num_agents; i++) { - osname = oses[curOs]; - plan.push({ name: osname + '-' + id, os: osname }); - curOs += 1; - if (curOs === oses.length) { - curOs = 0; - } - } - return plan; -} - -function verify_args() { - //verifying the vm_size - if (vm_size === 'A') { - vmSize = 'Standard_A2_v2'; - } else if (vm_size === 'B') { - vmSize = 'Standard_B2s'; - } else { - console.error('vm_size can be only A or B'); - process.exit(1); - } - //verifying agents disk size. - if (agents_disk_size !== 'default') { - if (agents_disk_size > 1023) { - console.error(`Max disk size is 1023 GB`); - process.exit(1); - } else if (agents_disk_size < 40) { - console.error(`Min disk size is 40 GB`); - process.exit(1); - } - } - //if number of requested agents is less then the min required then changing the min to agent number - if (num_agents < min_required_agents) { - min_required_agents = num_agents; - } - - if (clean_by_id) { - if (id === 0) { - console.error(`When using --clean_by_id we must use also --id `); - process.exit(1); - } - } -} - -function print_usage() { - console.log(` -Usage: node ${process.argv[1]} --resource --vnet --storage --name --id - --help - show this usage - --resource - the azure resource group to use - --storage - the azure storage account to use - --vnet - the azure virtual network to use - --name - the vm name - --id - run id - will be added to server name and agents - --clean_only - only delete resources from previous runs - --clean_by_id - delete all the machines with the specified id - --cleanup - delete all resources from azure env after the run - --upgrade - path to an upgrade package - --rerun_upgrade - rerunning the upgrade after the first upgrade - --server_ip - existing server ip - --server_secret - existing server secret - --js_script - js script to run after env is ready (receives server_name, server_ip server_secret arguments) - --shell_script - shell script to run after env is ready - --skip_agent_creation - do not create new agents - --skip_server_creation - do not create a new server, --server_ip and --server_secret must be supplied - --skip_configuration - do not create configuration - --create_lg - create lg - --lg_ip - existing lg ip - --server_external_ip - running with the server external ip (default: internal) - --num_agents - number of agents to create, default is (default: ${num_agents}) - --min_required_agents - min number of agents required to run the desired tests (default: ${ - min_required_agents}), will fail if could not create this number of agents - --vm_size - vm size can be A (A2) or B (B2) (default: ${vm_size}) - --random_base_version - will create a random version of base noobaa server - --agents_disk_size - created agents with different disk size in GB (min: 40, max 1023) -`); -} - -if (require.main === module) { - verify_args(); - main(); -} diff --git a/src/test/unit_tests/coretest.js b/src/test/unit_tests/coretest.js index 8496385990..264e5e7fd3 100644 --- a/src/test/unit_tests/coretest.js +++ b/src/test/unit_tests/coretest.js @@ -118,6 +118,12 @@ function setup({ incomplete_rpc_coverage } = {}) { await mongo_client.instance().db().dropDatabase(); await announce('mongo_client reconnect()'); await mongo_client.instance().reconnect(); + system_store.clean_system_store(); + await server_rpc.client.redirector.publish_to_cluster({ + method_api: 'server_inter_process_api', + method_name: 'load_system_store', + target: '' + }); await announce('ensure_support_account()'); await account_server.ensure_support_account(); diff --git a/src/test/unit_tests/index.js b/src/test/unit_tests/index.js index 059fadadb2..f925feb40f 100644 --- a/src/test/unit_tests/index.js +++ b/src/test/unit_tests/index.js @@ -52,7 +52,7 @@ require('./test_nb_native_b64'); require('./test_node_allocator'); require('./test_bucket_chunks_builder'); require('./test_mirror_writer'); -// require('./test_tiering_upload'); +require('./test_tiering_upload'); // SERVERS require('./test_agent'); diff --git a/src/test/unit_tests/signature_test_suite/awscli/awscli_iwgdito6.sreq b/src/test/unit_tests/signature_test_suite/awscli/awscli_iwgdito6.sreq index 8f3abcec98..87f545a93c 100644 --- a/src/test/unit_tests/signature_test_suite/awscli/awscli_iwgdito6.sreq +++ b/src/test/unit_tests/signature_test_suite/awscli/awscli_iwgdito6.sreq @@ -1,14 +1,14 @@ -PUT /files/util/upgrade_utils.js HTTP/1.1 -Host: 127.0.0.1 -Accept-Encoding: identity -Content-Length: 1400 -Content-MD5: kFEhyqsnxOV06DsEuI/VWg== -Expect: 100-continue -Date: Thu, 08 Dec 2016 13:02:40 GMT -User-Agent: aws-cli/1.11.26 Python/2.7.10 Darwin/16.1.0 botocore/1.4.83 -Content-Type: application/javascript -Authorization: AWS 123:94HB0mQmvBXd7/dm6B4JXwTRyJQ= - +PUT /files/util/upgrade_utils.js HTTP/1.1 +Host: 127.0.0.1 +Accept-Encoding: identity +Content-Length: 1400 +Content-MD5: kFEhyqsnxOV06DsEuI/VWg== +Expect: 100-continue +Date: Thu, 08 Dec 2016 13:02:40 GMT +User-Agent: aws-cli/1.11.26 Python/2.7.10 Darwin/16.1.0 botocore/1.4.83 +Content-Type: application/javascript +Authorization: AWS 123:94HB0mQmvBXd7/dm6B4JXwTRyJQ= + 'use strict'; var fs = require('fs'); diff --git a/src/test/unit_tests/test_md_aggregator_unit.js b/src/test/unit_tests/test_md_aggregator_unit.js index fd5a5715fa..0508e590da 100644 --- a/src/test/unit_tests/test_md_aggregator_unit.js +++ b/src/test/unit_tests/test_md_aggregator_unit.js @@ -17,10 +17,97 @@ const config = require('../../../config.js'); const MDStore = require('../../server/object_services/md_store').MDStore; const md_aggregator = require('../../server/bg_services/md_aggregator.js'); +function make_test_system_store(last_update, md_store) { + + const systems = _.times(1, i => ({ + _id: md_store.make_md_id(), + name: `system${i}`, + owner: md_store.make_md_id(), + })); + + const buckets = _.times(10, i => ({ + _id: md_store.make_md_id(), + name: `bucket${i}`, + storage_stats: { + last_update, + chunks_capacity: 0, + blocks_size: 0, + pools: {}, + objects_size: 0, + objects_count: 0, + objects_hist: [], + }, + })); + + const pools = _.times(10, i => ({ + _id: md_store.make_md_id(), + name: `pool${i}`, + storage_stats: { + last_update, + blocks_size: 0, + } + })); + + const system_store = { + is_finished_initial_load: true, + data: { + buckets, + pools, + systems, + }, + changes_list: [], + debug: true, + find_system_by_id(id) { + return _.find(this.data.systems, system => String(system._id) === String(id)); + }, + find_bucket_by_id(id) { + return _.find(this.data.buckets, bucket => String(bucket._id) === String(id)); + }, + find_pool_by_id(id) { + return _.find(this.data.pools, pool => String(pool._id) === String(id)); + }, + make_changes(changes) { + this.changes_list.push(changes); + if (this.debug) { + coretest.log('system store changes #', + this.changes_list.length, + util.inspect(changes, true, null, true) + ); + } + if (changes.update.systems) { + changes.update.systems.forEach(updates => { + const system = this.find_system_by_id(updates._id); + _.forEach(updates, (val, key) => { + if (key !== '_id') _.set(system, key, val); + }); + }); + } + if (changes.update.buckets) { + changes.update.buckets.forEach(updates => { + const bucket = this.find_bucket_by_id(updates._id); + _.forEach(updates, (val, key) => { + if (key !== '_id') _.set(bucket, key, val); + }); + }); + } + if (changes.update.pools) { + changes.update.pools.forEach(updates => { + const pool = this.find_pool_by_id(updates._id); + _.forEach(updates, (val, key) => { + if (key !== '_id') _.set(pool, key, val); + }); + }); + } + return P.resolve(); + }, + }; + + return system_store; +} + mocha.describe('md_aggregator', function() { const md_store = new MDStore(`_test_md_store_${Date.now().toString(36)}`); - const system_id = md_store.make_md_id(); mocha.describe('calculations', function() { @@ -214,9 +301,10 @@ mocha.describe('md_aggregator', function() { self.timeout(30000); const last_update = Date.now(); const target_now = last_update + CYCLE; - const system_store = make_test_system_store(last_update); + const system_store = make_test_system_store(last_update, md_store); const block_id1 = md_store.make_md_id_from_time(last_update + sub_cycle()); coretest.log('block 1 addtion date', block_id1.getTimestamp().getTime()); + const system_id = system_store.data.systems[0]._id; return P.resolve() .then(() => md_store.insert_blocks([{ @@ -243,7 +331,7 @@ mocha.describe('md_aggregator', function() { })) .then(() => md_aggregator.run_md_aggregator(md_store, system_store, target_now, 0)) .then(() => { - assert.strictEqual(system_store.changes_list.length, 1); + assert.strictEqual(system_store.changes_list.length, 2); const changes = system_store.changes_list[0]; assert.strictEqual(changes.update.buckets.length, system_store.data.buckets.length); assert.strictEqual(changes.update.pools.length, system_store.data.pools.length); @@ -263,18 +351,19 @@ mocha.describe('md_aggregator', function() { self.timeout(30000); const last_update = Date.now(); const target_now = last_update + (2 * CYCLE); - const system_store = make_test_system_store(last_update); + const system_store = make_test_system_store(last_update, md_store); const block_id1 = md_store.make_md_id_from_time(last_update + sub_cycle()); const block_id2 = md_store.make_md_id_from_time(last_update + sub_cycle()); const bucket = system_store.data.buckets[0]; const pool = system_store.data.pools[0]; coretest.log('block 1 addtion date', block_id1.getTimestamp().getTime()); coretest.log('block 2 addtion date', block_id2.getTimestamp().getTime()); + const system_id = system_store.data.systems[0]._id; return P.resolve() .then(() => md_store.insert_blocks([ - make_block(block_id1, 120, bucket, pool), - make_block(block_id2, 350, bucket, pool), + make_block(block_id1, 120, bucket, pool, system_id), + make_block(block_id2, 350, bucket, pool, system_id), ])) .then(() => md_store.insert_chunks([{ _id: md_store.make_md_id_from_time(last_update + sub_cycle()), @@ -294,7 +383,7 @@ mocha.describe('md_aggregator', function() { })) .then(() => md_aggregator.run_md_aggregator(md_store, system_store, target_now, 0)) .then(() => { - assert.strictEqual(system_store.changes_list.length, 2); + assert.strictEqual(system_store.changes_list.length, 4); const changes0 = system_store.changes_list[0]; assert.strictEqual(changes0.update.buckets.length, system_store.data.buckets.length); assert.strictEqual(changes0.update.pools.length, system_store.data.pools.length); @@ -306,7 +395,7 @@ mocha.describe('md_aggregator', function() { changes0.update.pools.forEach(item => { assert.strictEqual(item.storage_stats.last_update, last_update + CYCLE); }); - const changes1 = system_store.changes_list[1]; + const changes1 = system_store.changes_list[2]; assert.strictEqual(changes1.update.buckets[0].storage_stats.blocks_size, 120); assert.strictEqual(changes1.update.pools[0].storage_stats.blocks_size, 120); changes1.update.buckets.forEach(item => { @@ -323,17 +412,18 @@ mocha.describe('md_aggregator', function() { self.timeout(30000); const last_update = Date.now(); const target_now = last_update + (2 * CYCLE); - const system_store = make_test_system_store(last_update); + const system_store = make_test_system_store(last_update, md_store); const bucket = system_store.data.buckets[0]; const pool = system_store.data.pools[0]; const blocks_to_delete = []; + const system_id = system_store.data.systems[0]._id; return P.resolve() .then(() => { const blocks = []; for (let i = 0; i < 1024; ++i) { // 1 PB const block_id = md_store.make_md_id_from_time(last_update + sub_cycle()); - blocks.push(make_block(block_id, 1024 * 1024 * 1024 * 1024, bucket, pool)); + blocks.push(make_block(block_id, 1024 * 1024 * 1024 * 1024, bucket, pool, system_id)); if (i % 2) blocks_to_delete.push(block_id); } return md_store.insert_blocks(blocks); @@ -356,7 +446,7 @@ mocha.describe('md_aggregator', function() { })) .then(() => md_aggregator.run_md_aggregator(md_store, system_store, target_now, 0)) .then(() => { - assert.strictEqual(system_store.changes_list.length, 2); + assert.strictEqual(system_store.changes_list.length, 4); const changes0 = system_store.changes_list[0]; assert.strictEqual(changes0.update.buckets.length, system_store.data.buckets.length); assert.strictEqual(changes0.update.pools.length, system_store.data.pools.length); @@ -368,7 +458,7 @@ mocha.describe('md_aggregator', function() { changes0.update.pools.forEach(item => { assert.strictEqual(item.storage_stats.last_update, last_update + CYCLE); }); - const changes1 = system_store.changes_list[1]; + const changes1 = system_store.changes_list[2]; assert.deepEqual(changes1.update.buckets[0].storage_stats.blocks_size, Math.pow(2, 49)); assert.deepEqual(changes1.update.pools[0].storage_stats.blocks_size, Math.pow(2, 49)); changes1.update.buckets.forEach(item => { @@ -385,10 +475,11 @@ mocha.describe('md_aggregator', function() { const self = this; // eslint-disable-line no-invalid-this self.timeout(30000); const last_update = Date.now(); - const system_store = make_test_system_store(last_update); + const system_store = make_test_system_store(last_update, md_store); const num_ranges = system_store.data.buckets.length; const range = CYCLE / 2; const target_now = last_update + (num_ranges * range); + const system_id = system_store.data.systems[0]._id; return P.resolve() .then(() => md_store.insert_blocks(_.times(num_ranges, i => { @@ -398,7 +489,7 @@ mocha.describe('md_aggregator', function() { bucket.storage_stats.last_update = current_cycle; pool.storage_stats.last_update = current_cycle; const block_id = md_store.make_md_id_from_time(current_cycle + (sub_cycle() / 2)); - return make_block(block_id, 666, bucket, pool); + return make_block(block_id, 666, bucket, pool, system_id); }))) .then(() => md_store.insert_chunks([{ _id: md_store.make_md_id_from_time(last_update + sub_cycle()), @@ -415,18 +506,20 @@ mocha.describe('md_aggregator', function() { })) .then(() => md_aggregator.run_md_aggregator(md_store, system_store, target_now, 0)) .then(() => { - assert.strictEqual(system_store.changes_list.length, num_ranges); + assert.strictEqual(system_store.changes_list.length, num_ranges * 2); system_store.changes_list.forEach((changes, i) => { - assert.strictEqual(changes.update.buckets.length, i + 1); - assert.strictEqual(changes.update.pools.length, i + 1); - assert.strictEqual(changes.update.buckets[0].storage_stats.blocks_size, 666); - assert.strictEqual(changes.update.pools[0].storage_stats.blocks_size, 666); - changes.update.buckets.forEach(item => { - assert.strictEqual(item.storage_stats.last_update, last_update + ((i + 1) * range)); - }); - changes.update.pools.forEach(item => { - assert.strictEqual(item.storage_stats.last_update, last_update + ((i + 1) * range)); - }); + if (!changes.update.systems) { + assert.strictEqual(changes.update.buckets.length, (i / 2) + 1); + assert.strictEqual(changes.update.pools.length, (i / 2) + 1); + assert.strictEqual(changes.update.buckets[0].storage_stats.blocks_size, 666); + assert.strictEqual(changes.update.pools[0].storage_stats.blocks_size, 666); + changes.update.buckets.forEach(item => { + assert.strictEqual(item.storage_stats.last_update, last_update + (((i / 2) + 1) * range)); + }); + changes.update.pools.forEach(item => { + assert.strictEqual(item.storage_stats.last_update, last_update + (((i / 2) + 1) * range)); + }); + } }); }); }); @@ -436,7 +529,7 @@ mocha.describe('md_aggregator', function() { self.timeout(30000); const last_update = Date.now(); const target_now = last_update - 1; - const system_store = make_test_system_store(last_update); + const system_store = make_test_system_store(last_update, md_store); return P.resolve() .then(() => md_aggregator.run_md_aggregator(md_store, system_store, target_now, 0)) @@ -465,91 +558,25 @@ mocha.describe('md_aggregator', function() { const num_splits = 13; const last_update = Date.now(); const target_now = last_update + (num_splits * CYCLE); - const system_store = make_test_system_store(last_update); + const system_store = make_test_system_store(last_update, md_store); system_store.debug = false; return P.resolve() .then(() => md_aggregator.run_md_aggregator(md_store, system_store, target_now, 0)) .then(() => { - assert.strictEqual(system_store.changes_list.length, num_splits); + assert.strictEqual(system_store.changes_list.length, num_splits * 2); system_store.changes_list.forEach((changes, i) => { - assert.strictEqual(changes.update.buckets.length, system_store.data.buckets.length); - assert.strictEqual(changes.update.pools.length, system_store.data.pools.length); + if (!changes.update.systems) { + assert.strictEqual(changes.update.buckets.length, system_store.data.buckets.length); + assert.strictEqual(changes.update.pools.length, system_store.data.pools.length); + } }); }); }); }); - - function make_test_system_store(last_update) { - - const buckets = _.times(10, i => ({ - _id: md_store.make_md_id(), - name: `bucket${i}`, - storage_stats: { - last_update, - chunks_capacity: 0, - blocks_size: 0, - pools: {}, - objects_size: 0, - objects_count: 0, - objects_hist: [], - }, - })); - - const pools = _.times(10, i => ({ - _id: md_store.make_md_id(), - name: `pool${i}`, - storage_stats: { - last_update, - blocks_size: 0, - } - })); - - const system_store = { - is_finished_initial_load: true, - data: { - buckets, - pools, - }, - changes_list: [], - debug: true, - find_bucket_by_id(id) { - return _.find(this.data.buckets, bucket => String(bucket._id) === String(id)); - }, - find_pool_by_id(id) { - return _.find(this.data.pools, pool => String(pool._id) === String(id)); - }, - make_changes(changes) { - this.changes_list.push(changes); - if (this.debug) { - coretest.log('system store changes #', - this.changes_list.length, - util.inspect(changes, true, null, true) - ); - } - changes.update.buckets.forEach(updates => { - const bucket = this.find_bucket_by_id(updates._id); - _.forEach(updates, (val, key) => { - if (key !== '_id') _.set(bucket, key, val); - }); - }); - changes.update.pools.forEach(updates => { - const pool = this.find_pool_by_id(updates._id); - _.forEach(updates, (val, key) => { - if (key !== '_id') _.set(pool, key, val); - }); - }); - return P.resolve(); - }, - }; - - return system_store; - } - - - function make_block(block_id, size, bucket, pool) { + function make_block(block_id, size, bucket, pool, system_id) { return { _id: block_id, system: system_id, diff --git a/src/test/unit_tests/test_system_store.js b/src/test/unit_tests/test_system_store.js index b57b93125c..7ea772f0f4 100644 --- a/src/test/unit_tests/test_system_store.js +++ b/src/test/unit_tests/test_system_store.js @@ -9,9 +9,8 @@ const _ = require('lodash'); const mongo_client = require('../../util/mongo_client'); coretest.setup(); -// const _ = require('lodash'); const mocha = require('mocha'); -// const assert = require('assert'); +const assert = require('assert'); const system_store = require('../../server/system_services/system_store').get_instance(); @@ -29,14 +28,17 @@ function _get_wiredtiger_log_diff(a, b) { mocha.describe('system_store', function() { // eslint-disable-next-line no-undef - after(function() { + afterEach(function() { // hacky - all the added systems were failing some of the next tests // remove all dummy systems coretest.log('cleaning test systems:'); - return mongo_client.instance().collection('systems').remove({ + return mongo_client.instance().collection('systems').deleteMany({ name: { $nin: ['demo', 'coretest'] } + }).then(() => { + system_store.clean_system_store(); + return system_store.load(); }); }); @@ -98,6 +100,89 @@ mocha.describe('system_store', function() { }); }); + mocha.it('Check make_changes updates new created systems', function() { + const LOOP_CYCLES = 10; + let first_data_store; + return system_store.load() + .then(data1 => { + first_data_store = _.cloneDeep(data1); + console.log('first_data_store', first_data_store.systems.length); + return promise_utils.loop(LOOP_CYCLES, cycle => system_store.make_changes({ + insert: { + systems: [{ + _id: system_store.new_system_store_id(), + name: `JenTheMajesticSlothSystemStoreLoop2-${cycle}`, + owner: system_store.new_system_store_id() + }] + } + })); + }) + .then(() => system_store.load()) + .then(data2 => { + console.log('new_data_store', data2.systems.length); + assert.deepStrictEqual(first_data_store.systems.length + LOOP_CYCLES, data2.systems.length); + }); + }); + + mocha.it('Check make_changes returns no diff when not changing last_update', function() { + const system_id = system_store.new_system_store_id(); + const orig_name = `JenTheMajesticSlothSystemStoreLoop3`; + return system_store.load() + .then(() => system_store.make_changes({ + insert: { + systems: [{ + _id: system_id, + name: orig_name, + owner: system_store.new_system_store_id(), + }] + } + })) + .then(() => system_store.make_changes({ + update: { + systems: [{ + _id: system_id, + name: 'new_name', + dont_change_last_update: true + }] + } + })) + .then(() => system_store.load()) + .then(data2 => { + console.log('new_data_store', data2.systems.length); + assert.strictEqual(data2.systems[0].name, orig_name); + }); + }); + + mocha.it('Check make_changes returns diff when changing last_update', function() { + const system_id = system_store.new_system_store_id(); + const orig_name = `JenTheMajesticSlothSystemStoreLoop3`; + return system_store.load() + .then(() => system_store.make_changes({ + insert: { + systems: [{ + _id: system_id, + name: orig_name, + owner: system_store.new_system_store_id(), + }] + } + })) + .then(() => system_store.make_changes({ + update: { + systems: [{ + _id: system_id, + name: 'new_name', + dont_change_last_update: false + }] + } + })) + .then(() => system_store.load()) + .then(data2 => { + console.log('new_data_store', data2.systems.length); + assert.strictEqual(data2.systems[0].name, 'new_name'); + }); + }); + + // TODO continue test_system_store ... }); diff --git a/src/test/utils/basic_server_ops.js b/src/test/utils/basic_server_ops.js index b6a369cee9..e35b5b472e 100644 --- a/src/test/utils/basic_server_ops.js +++ b/src/test/utils/basic_server_ops.js @@ -35,113 +35,7 @@ function disable_rpc_validation() { } function upload_and_upgrade(ip, upgrade_pack, dont_verify_version) { - console.log('Upgrading the machine'); - rpc_validation_disabled = true; //Running from a new code onto an older code server - let client = get_rpc_client(ip); - let previous_srv_version; - - var filename; - if (upgrade_pack.indexOf('/') === -1) { - filename = upgrade_pack; - } else { - filename = upgrade_pack.substring(upgrade_pack.indexOf('/')); - } - - var formData = { - upgrade_file: { - value: fs.createReadStream(upgrade_pack), - options: { - filename: filename, - contentType: 'application/x-gzip' - } - } - }; - - var auth_params = { - email: 'demo@noobaa.com', - password: 'DeMo1', - system: 'demo' - }; - return client.create_auth_token(auth_params) - .then(() => P.ninvoke(request, 'post', { - url: 'http://' + ip + ':8080/upgrade', - formData: formData, - rejectUnauthorized: false, - })) - .then(() => { - let ready = false; - return promise_utils.pwhile(() => !ready, () => client.system.read_system() - .then(res => { - const upgrade_status = _.get(res, 'cluster.shards.0.servers.0.upgrade.status'); - console.log(`waiting for upgrade status to be CAN_UPGRADE. current upgrade_status = ${upgrade_status}`); - if (upgrade_status === 'CAN_UPGRADE') { - previous_srv_version = res.version; - ready = true; - } else if (upgrade_status === 'FAILED') { - console.log('Failed on pre upgrade tests', util.inspect(res.cluster.shards[0].servers)); - throw new Error('Failed on pre upgrade tests'); - } else { - return P.delay(5000); - } - })); - }) - .then(() => console.log('Upload package successful')) - .then(() => client.upgrade.upgrade_cluster()) - .catch(err => { - // TODO: remove when upgrade_api is merged to base version (version we upgrade from in vitaly) - if (err.rpc_code === 'NO_SUCH_RPC_SERVICE') { - console.log('Failed using upgrade.upgrade_cluster, using cluster_internal.upgrade_cluster', err); - return client.cluster_internal.upgrade_cluster(); - } else { - throw err; - } - }) - .then(() => P.delay(10000)) - .then(() => wait_for_server(ip)) - .then(() => P.delay(10000)) - .then(() => { - var isNotListening = true; - return promise_utils.pwhile( - function() { - return isNotListening; - }, - function() { - return P.ninvoke(request, 'get', { - url: 'http://' + ip + ':80/', - rejectUnauthorized: false, - }).then(res => { - console.log('S3 server started after upgrade'); - isNotListening = false; - }, err => { - console.log('waiting for S3 server to start', err); - return P.delay(10000); - }); - }); - }) - .then(() => client.system.read_system()) - .then(res => { - // make sure that upgrade is successful by checking it is marked as completed - const upgrade_successful = res.cluster.shards[0].servers.every(server => server.upgrade.status === 'COMPLETED'); - if (!upgrade_successful) { - console.error('some servers did not reach to upgrade COMPLETED status:', - res.cluster.shards[0].servers.map(server => server.upgrade)); - throw new Error('not all servers completed upgrade successfully'); - } - //Server is up, check returned version to verify the server was upgraded - if (dont_verify_version) { - console.warn('Skipping version check - probably upgrading to the same version intentionally'); - return; - } - if (res.version === previous_srv_version) { - //Upgrade actually failed - console.error('Upgrade failed, version did not change'); - throw new Error('Upgrade failed, version did not change'); - } - }) - .catch(err => { - console.error('Upload package failed', err, err.stack); - throw new Error('Upload package failed ' + err); - }); + throw new Error('DEPRECATED - this upgrade flow is not relevant anymore'); } async function wait_for_server(ip, wait_for_version) { diff --git a/src/tools/mongodb_bucket_blow.js b/src/tools/mongodb_bucket_blow.js index 670be7e9de..c51cd0ed36 100644 --- a/src/tools/mongodb_bucket_blow.js +++ b/src/tools/mongodb_bucket_blow.js @@ -12,6 +12,7 @@ let system_id = db.systems.findOne()._id; let pool_id = db.pools.findOne({ resource_type: { $ne: "INTERNAL" } })._id; let ccc = db.chunk_configs.findOne()._id; +let now = Date.now(); for (let j = 0; j < 5; ++j) { let array_of_tiers = []; @@ -31,6 +32,7 @@ for (let j = 0; j < 5; ++j) { _id: new ObjectId(), spread_pools: [pool_id], }], + last_update: now, }); array_of_policies.push({ _id: policy_id, @@ -45,7 +47,8 @@ for (let j = 0; j < 5; ++j) { chunk_split_config: { avg_chunk: 4194304, delta_chunk: 1048576 - } + }, + last_update: now }); array_of_buckets.push({ _id: bucket_id, @@ -64,7 +67,8 @@ for (let j = 0; j < 5; ++j) { last_update: Date.now() - (2 * 90000) }, lambda_triggers: [], - versioning: "DISABLED" + versioning: "DISABLED", + last_update: now, }); } db.tiers.insert(array_of_tiers); diff --git a/src/upgrade/default_upgrade_backup.sh b/src/upgrade/default_upgrade_backup.sh deleted file mode 100755 index 841322ab60..0000000000 --- a/src/upgrade/default_upgrade_backup.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -set -e - -BACKUP_DIR=/backup -ETC_BACKUP_DIR=${BACKUP_DIR}/etc -CORE_DIR=/root/node_modules/noobaa-core - - -if [ "$1" == "restore" ]; then - echo "UPGRADE: restoring previous version files" - if [ -d ${BACKUP_DIR}/noobaa-core ]; then - echo "UPGRADE: restoring ${CORE_DIR}" - echo "UPGRADE: removing directory of failed upgrade: ${CORE_DIR}" - rm -rf ${CORE_DIR} - echo "UPGRADE: copying ${BACKUP_DIR}/noobaa-core to ${CORE_DIR}" - /bin/cp -rf ${BACKUP_DIR}/noobaa-core ${CORE_DIR} - echo "UPGRADE: ${CORE_DIR} restored successfully" - else - echo "UPGRADE: Restore error. could not find direcotry ${BACKUP_DIR}/noobaa-core to restore from" - fi - - if [ -d ${ETC_BACKUP_DIR} ]; then - echo "UPGRADE: restoring ${ETC_BACKUP_DIR}" - /bin/cp -rf ${ETC_BACKUP_DIR}/* /etc/ - echo "UPGRADE: ${ETC_BACKUP_DIR} restored successfully" - else - echo "UPGRADE: Restore error. could not find direcotry ${ETC_BACKUP_DIR} to restore from" - fi - - -else - echo "UPGRADE: removing old backup dir and creating new one" - rm -rf ${BACKUP_DIR} - mkdir -p ${BACKUP_DIR} - mkdir -p ${ETC_BACKUP_DIR} - - echo "UPGRADE: copying noobaa-core to ${BACKUP_DIR}" - /bin/cp -rf ${CORE_DIR} ${BACKUP_DIR} - - echo "UPGRADE: copy /etc files to ${ETC_BACKUP_DIR}" - /bin/cp /etc/noobaa_* ${ETC_BACKUP_DIR} - /bin/cp /etc/ntp.conf ${ETC_BACKUP_DIR} - /bin/cp /etc/yum.conf ${ETC_BACKUP_DIR} - /bin/cp /etc/dhclient.conf ${ETC_BACKUP_DIR} - /bin/cp /etc/resolv.conf ${ETC_BACKUP_DIR} - /bin/cp -r /data/mongo/ssl ${ETC_BACKUP_DIR} - - echo "UPGRADE: backup finished successfully" -fi diff --git a/src/upgrade/platform_upgrade.js b/src/upgrade/platform_upgrade.js deleted file mode 100644 index 468b5a7ec5..0000000000 --- a/src/upgrade/platform_upgrade.js +++ /dev/null @@ -1,843 +0,0 @@ -/* Copyright (C) 2016 NooBaa */ -"use strict"; - -const os = require('os'); -const fs = require('fs'); -const path = require('path'); -const _ = require('lodash'); - -const pkg = require('../../package.json'); -const dbg = require('../util/debug_module')(__filename); -const P = require('../util/promise'); -const promise_utils = require('../util/promise_utils'); -const fs_utils = require('../util/fs_utils'); -const dotenv = require('../util/dotenv'); -const os_utils = require('../util/os_utils'); -const supervisor = require('../server/utils/supervisor_ctrl'); -const mongo_client = require('../util/mongo_client'); - -const EXTRACTION_PATH = '/tmp/test'; -const UPGRADE_INFO_FILE_PATH = `/tmp/upgrade_info_${pkg.version}.json`; -const CORE_DIR = '/root/node_modules/noobaa-core'; -const NEW_VERSION_DIR = path.join(EXTRACTION_PATH, 'noobaa-core'); -// use shell script for backup\restore so we won't be dependent in node version -const BACKUP_SCRIPT = path.join(CORE_DIR, 'src/upgrade/pre_upgrade_backup.sh'); -const BACKUP_SCRIPT_NEW_PATH = path.join(EXTRACTION_PATH, 'pre_upgrade_backup.sh'); -const HOME = '/root'; -const NVM_DIR = `${HOME}/.nvm`; - -const SWAP_SIZE_MB = 8 * 1024; - - -// in clustered mongo it can take time before all members are operational. wait up to 10 minutes -const WAIT_FOR_MONGO_TIMEOUT = 10 * 60000; - - -const DOTENV_VARS_FROM_OLD_VER = Object.freeze([ - 'JWT_SECRET', - 'PLATFORM', - 'DEV_MODE', - 'MONGO_RS_URL', - 'MONGO_SSL_USER', - 'ENDPOINT_BLOB_ENABLED', - 'PLATFORM', - 'ENDPOINT_PORT', - 'ENDPOINT_SSL_PORT' -]); - -const EXEC_DEFAULTS = Object.freeze({ - ignore_rc: false, - return_stdout: true, - trim_stdout: true -}); - -// map process name (in ps) to service name (in supervisor.conf) -const SERVICES_INFO = Object.freeze([{ - srv: 'webserver', - proc: 'web_server', - stop: true, - }, - { - srv: 'bg_workers', - proc: 'bg_workers', - stop: true, - }, - { - srv: 'hosted_agents', - proc: 'hosted_agents_starter', - stop: true, - }, - { - srv: 's3rver', - proc: 's3rver_starter', - stop: true, - }, - { - srv: 'mongo_monitor', - proc: 'mongo_monitor', - stop: true, - }, - { - srv: 'upgrade_manager', - proc: 'upgrade_manager', - stop: false, - }, - { - srv: 'mongo_wrapper', - proc: 'mongo_wrapper', - stop: false, - }, - { - srv: 'rsyslog', - proc: 'rsyslogd', - stop: false, - }, - { - srv: 'logrotate', - proc: 'logrotate.sh', - stop: false, - }, -]); - - -async function stopped_services_during_upgrade() { - const supervised_list = await supervisor.list(); - dbg.log0('UPGRADE: current services list is', supervised_list); - - // stop all services but upgrade_manager mongo and syslog\logrotate - return supervised_list.filter(srv => ( - srv !== 'mongo_wrapper' && - srv !== 'upgrade_manager' && - srv !== 'rsyslog' && - srv !== 'logrotate' - )); -} - - -async function stop_services() { - // first stop all services in supervisor conf except those required for upgrade - dbg.log0('UPGRADE: stopping services before upgrade'); - const srv_to_stop = await stopped_services_during_upgrade(); - dbg.log0('UPGRADE: stopping services:', srv_to_stop); - await supervisor.update_services_autostart(srv_to_stop, false); - await supervisor.apply_changes(); - await supervisor.stop(srv_to_stop); - - // now make sure that all the services to stop are actually stopped - const procs = SERVICES_INFO - .filter(info => srv_to_stop.includes(info.srv)) - .map(info => info.proc); - const ps_services = await os_utils.get_services_ps_info(procs); - if (ps_services.length > 0) { - dbg.warn('UPGRADE: found services that should be down. killing them:', ps_services); - ps_services.forEach(srv => { - try { - process.kill(Number.parseInt(srv.pid, 10), 'SIGKILL'); - } catch (err) { - dbg.warn('failed killing process', srv); - } - }); - } - - // now make sure that there is no rouge -} - -async function start_services() { - const stopped_services = await stopped_services_during_upgrade(); - dbg.log0('UPGRADE: starting services:', stopped_services); - await supervisor.start(stopped_services); -} - -async function run_platform_upgrade_steps(old_version) { - if (!should_upgrade_platform()) return; - - dbg.log0(`UPGRADE: upgrading from version ${old_version}`); - - await platform_upgrade_common(); - - // perform specific platform upgrades according to old version - const [major_ver, minor_ver] = parse_ver(old_version.split('-')[0]); - if (major_ver <= 2) { - if (minor_ver < 4) await platform_upgrade_2_4_0(); - if (minor_ver < 7) await platform_upgrade_2_7_0(); - if (minor_ver < 8) await platform_upgrade_2_8_0(); - if (minor_ver < 10) await platform_upgrade_2_10_0(); - } - -} - -async function platform_upgrade_2_10_0() { - dbg.log0('UPGRADE: running platform_upgrade_2_10_0'); - // azure handles swap in a non generic way. fix to the azure way - await fix_azure_swap(); - await fix_supervisor_alias(); -} - - -async function fix_mongod_user() { - // change ownership of mongo related files to mongod user\group - await exec('chown -R mongod:mongod /data/mongo/'); - await exec('chown -R mongod:mongod /data/mongo/ssl/'); - //change permissions for mongo_ssl files - allow r\x for dir and r only for files - await exec('chmod 400 -R /data/mongo/ssl/*'); - await exec('chmod 500 /data/mongo/ssl'); -} - -async function fix_azure_swap() { - if (process.env.PLATFORM !== 'azure') return; - // turn off swap - await exec('/sbin/swapoff -a'); - // remove swap partition from fstab - await fs_utils.file_delete('/swapfile'); - // remove swap partition in fstab - await exec(`sed -i 's:/swapfile.*::' /etc/fstab`); - - // configure swap using azure's waagent - await exec(`sed -i 's:ResourceDisk.EnableSwap=n:ResourceDisk.EnableSwap=y:' /etc/waagent.conf`); - await exec(`sed -i 's:ResourceDisk.SwapSizeMB=0:ResourceDisk.SwapSizeMB=${SWAP_SIZE_MB}:' /etc/waagent.conf`); - // restart waagent - await exec(`service waagent restart`); - -} - -async function fix_supervisor_alias() { - const SUPERVISOR_ALIAS = ` - -supervisorctl() { -/bin/supervisorctl $@ -if [ $1 == "status" ]; then - echo "" - if [ ! -f /tmp/mongo_wrapper_backoff ]; then - echo "There is no mongo backoff in effect" - else - echo "There is a mongo backoff in effect for $(cat /tmp/mongo_wrapper_backoff) seconds" - fi - fi -} - -`; - - const supervisor_func = await promise_utils.exec(`grep supervisorctl\\(\\) ~/.bashrc | wc -l`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - - if (supervisor_func === '0') { - await fs.appendFileAsync('/root/.bashrc', SUPERVISOR_ALIAS); - } -} - -async function platform_upgrade_2_8_0() { - if (process.env.PLATFORM === 'docker') return; - dbg.log0('UPGRADE: running platform_upgrade_2_8_0'); - // exclude cleaning of supervisor and upgrade files - await fs.appendFileAsync('/usr/lib/tmpfiles.d/tmp.conf', 'x /tmp/supervisor*\n'); - await fs.appendFileAsync('/usr/lib/tmpfiles.d/tmp.conf', 'x /tmp/test\n'); - await exec('systemctl restart systemd-tmpfiles-clean.service'); -} - -async function supervisor_tmp_deletion_rules() { - if (process.env.PLATFORM === 'docker') return; - dbg.log0('UPGRADE: running supervisor_tmp_deletion_rules'); - const tmp_conf = await fs.readFileAsync('/usr/lib/tmpfiles.d/tmp.conf').toString(); - let should_restart = false; - if (tmp_conf.indexOf('x /tmp/supervisor*') < 0) { - await fs.appendFileAsync('/usr/lib/tmpfiles.d/tmp.conf', 'x /tmp/supervisor*\n'); - should_restart = true; - } - if (tmp_conf.indexOf('x /tmp/test') < 0) { - await fs.appendFileAsync('/usr/lib/tmpfiles.d/tmp.conf', 'x /tmp/test\n'); - should_restart = true; - } - if (should_restart) { - // exclude cleaning of supervisor and upgrade files - await exec('systemctl restart systemd-tmpfiles-clean.service'); - } -} - -async function platform_upgrade_2_7_0() { - dbg.log0('UPGRADE: running platform_upgrade_2_7_0'); - //verify abrt package is removed - await exec('yum remove abrt -y'); -} - -// platform upgrade for version 2.4.0 -// * ensure swap is configured correctly in /etc/fstab -async function platform_upgrade_2_4_0() { - dbg.log0('UPGRADE: running platform_upgrade_2_4_0'); - await ensure_swap(); -} - -async function platform_upgrade_common(params) { - await supervisor_tmp_deletion_rules(); -} - -function should_upgrade_platform() { - return os.type() === 'Linux'; -} - -async function exec(command, options = {}) { - try { - dbg.log0('UPGRADE: executing command:', command); - const stdout = await promise_utils.exec(command, EXEC_DEFAULTS); - return stdout; - } catch (err) { - dbg.error('UPGRADE: got error when executing command', command, err); - if (!options.ignore_err) throw err; - } -} - -async function ensure_swap() { - if (process.env.PLATFORM === 'docker') return; - - // skip swap configuration in azure - if (process.env.PLATFORM === 'azure') return; - - const swap_conf = await fs_utils.find_line_in_file('/etc/fstab', 'swapfile'); - if (swap_conf) { - dbg.log0('UPGRADE: swap is already configured in /etc/fstab'); - return; - } - - try { - const swap_summary = await exec(`swapon -s`); - if (swap_summary) { - dbg.log0('UPGRADE: setup_swap: Swap summary:', swap_summary); - } else { - dbg.log0('UPGRADE: setting up swap:'); - dbg.log0(`UPGRADE: allocate /swapfile of size ${SWAP_SIZE_MB}MB`); - await exec(`dd if=/dev/zero bs=1M count=${SWAP_SIZE_MB} of=/swapfile`); - await exec(`chmod 600 /swapfile`); - dbg.log0(`UPGRADE: create and enable swap on /swapfile`); - await exec(`mkswap /swapfile`); - await exec(`swapon /swapfile`); - } - dbg.log0(`UPGRADE: configure swap in /etc/fstab`); - await fs.appendFileAsync('/etc/fstab', '/swapfile\tswap\tswap\tsw\t0\t0\n'); - } catch (err) { - dbg.error('UPGRADE: got error on setup_swap. swap might not be configured', err); - throw err; - } -} - -async function set_new_node_version(ver) { - try { - await exec(`rm -f /usr/local/bin/node`); - dbg.log0(`UPGRADE: pre_upgrade: Removed /usr/local/bin/node`); - await exec(`ln -s ~/.nvm/versions/node/v${ver}/bin/node /usr/local/bin/node`); - await exec(`. ${NVM_DIR}/nvm.sh;nvm alias default ${ver}`); - await exec(`. ${NVM_DIR}/nvm.sh;nvm use ${ver}`); - } catch (err) { - dbg.error(`failed setting node version to ${ver}`, err); - throw err; - } -} - -async function update_npm_version() { - const REQUIRED_NPM_VERSION = '6.1.0'; - const npm_version = await promise_utils.exec(`source /root/.nvm/nvm.sh && npm --version`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - if (version_compare(npm_version, REQUIRED_NPM_VERSION) < 0) { - dbg.log0(`UPGRADE: npm version is ${npm_version}. upgrading to ${REQUIRED_NPM_VERSION}`); - const npm_update = await promise_utils.exec(`source /root/.nvm/nvm.sh && npm install -g npm@${REQUIRED_NPM_VERSION}`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - dbg.log0('npm update returned', npm_update); - } else { - dbg.log0(`UPGRADE: npm version is ${npm_version}. no need to upgrade`); - } -} - -async function update_nvm_version() { - const REQUIRED_NVM_VERSION = '0.33.11'; - const nvm_version = await promise_utils.exec(`source /root/.nvm/nvm.sh && nvm --version`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - if (version_compare(nvm_version, REQUIRED_NVM_VERSION) < 0) { - dbg.log0(`UPGRADE: nvm version is ${nvm_version}. upgrading to ${REQUIRED_NVM_VERSION}`); - const nvm_update = await promise_utils.exec( - `curl -o- https://raw.githubusercontent.com/creationix/nvm/v${REQUIRED_NVM_VERSION}/install.sh | bash`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - dbg.log0('nvm update returned', nvm_update); - } else { - dbg.log0(`UPGRADE: nvm version is ${nvm_version}. no need to upgrade`); - } -} - -async function update_node_version() { - - let old_nodever; - let nodever; - try { - nodever = (await fs.readFileAsync(`${EXTRACTION_PATH}/noobaa-core/.nvmrc`)).toString().trim(); - old_nodever = (await fs.readFileAsync(`${CORE_DIR}/.nvmrc`)).toString().trim(); - dbg.log0(`UPGRADE: old node version is ${old_nodever}. new node version is ${nodever}`); - if (nodever === old_nodever) { - dbg.log0(`UPGRADE: node version is not changed. skip node update`); - return; - } - } catch (err) { - dbg.warn('UPGRADE: failed getting node versions. abort', err); - throw err; - } - //TODO: remove: we no longer have nvm/node in /build/public (we should probably remove the whole function/file...) - await exec(`cp -f ${EXTRACTION_PATH}/noobaa-core/build/public/nvm.sh ~/.nvm/`); - dbg.log0('UPGRADE: pre_upgrade: Copied nvm.sh'); - await exec(`chmod 777 ~/.nvm/nvm.sh`); - dbg.log0('UPGRADE: pre_upgrade: Configured permissions to nvm.sh'); - - dbg.log0('UPGRADE: pre_upgrade: Nodever', nodever); - await fs_utils.create_fresh_path(`/tmp/v${nodever}`); - dbg.log0(`UPGRADE: pre_upgrade: Created dir /tmp/v${nodever}`); - await exec(`cp ${EXTRACTION_PATH}/noobaa-core/build/public/node-v${nodever}-linux-x64.tar.xz /tmp/`); - dbg.log0(`UPGRADE: pre_upgrade: Copied node package`); - await exec(`tar -xJf /tmp/node-v${nodever}-linux-x64.tar.xz -C /tmp/v${nodever} --strip-components 1`); - dbg.log0(`UPGRADE: pre_upgrade: Extracted node package`); - await exec(`mkdir -p ~/.nvm/versions/node/v${nodever}/`); - dbg.log0(`UPGRADE: pre_upgrade: Created node dir`); - await exec(`cp -r /tmp/v${nodever}/* ~/.nvm/versions/node/v${nodever}/`); - dbg.log0(`UPGRADE: pre_upgrade: copied node dir from /tmp to /.nvm`); - - // TODO: maybe backup the old node version in backup script - try { - await set_new_node_version(nodever); - } catch (err) { - dbg.error('failed when trying to set new node version. try to revert to version', old_nodever); - await set_new_node_version(old_nodever); - throw err; - } - dbg.log0('UPGRADE: pre_upgrade: Success'); -} - -async function platform_upgrade_init() { - if (!should_upgrade_platform()) return; - - await update_npm_version(); - await update_nvm_version(); - await update_node_version(); -} - -async function backup_old_version() { - if (!should_upgrade_platform()) return; - - dbg.log0('UPGRADE: backing up old version platform files'); - // init to default backup script for old versions that did not implement - let backup_script = path.join(NEW_VERSION_DIR, 'src/upgrade/default_upgrade_backup.sh'); - if (await fs_utils.file_exists(BACKUP_SCRIPT)) { - backup_script = BACKUP_SCRIPT; - } - // copy backup script from current location to a new stable location - await fs_utils.file_copy(backup_script, BACKUP_SCRIPT_NEW_PATH); - await exec(`${backup_script}`); - dbg.log0('UPGRADE: old version backed up successfully'); -} - -async function restore_old_version() { - if (!should_upgrade_platform()) return; - - dbg.log0('UPGRADE: restoring back to old version'); - await exec(`${BACKUP_SCRIPT_NEW_PATH} restore`); -} - -async function copy_new_code() { - if (!should_upgrade_platform()) return; - dbg.log0(`UPGRADE: deleting old code from ${CORE_DIR}`); - await fs_utils.folder_delete(CORE_DIR); - dbg.log0(`UPGRADE: copying ${NEW_VERSION_DIR} to ${CORE_DIR}`); - await fs_utils.full_dir_copy(NEW_VERSION_DIR, CORE_DIR); -} - -// build .env file in new version by taking all required env vars from old version -async function build_dotenv() { - dbg.log0('UPGRADE: generating dotenv file in the new version directory'); - const old_env = dotenv.parse(await fs.readFileAsync(`/data/.env`)); - const new_env_defaults = process.env.container === 'docker' ? - await fs.readFileAsync(`${CORE_DIR}/src/deploy/NVA_build/env.orig`) : - await fs.readFileAsync(`${NEW_VERSION_DIR}/src/deploy/NVA_build/env.orig`); - const new_env = Object.assign( - dotenv.parse(new_env_defaults), - _.pick(old_env, DOTENV_VARS_FROM_OLD_VER), - ); - - dbg.log0('UPGRADE: generating .env file for new version:', new_env); - - await fs.writeFileAsync(`/data/.env`, dotenv.stringify(new_env)); -} - -// TODO: make sure that update_services is synchronized between all cluster members -// (currently all members are upgraded serially, so we're good) -async function update_services(old_version) { - dbg.log0('UPGRADE: updating services in noobaa_supervisor.conf'); - // perform specific platform upgrades according to old version - const [major_ver, minor_ver] = parse_ver(old_version.split('-')[0]); - if (major_ver <= 2) { - if (minor_ver < 10) await update_services_2_10_0(); - } - -} - - -async function update_services_2_10_0() { - dbg.log0('UPGRADE: upgrading from version older than 2.10.0'); - dbg.log0('UPGRADE: change mongo kill signal from KILL to INT'); - // change_mongo_kill_signal will also restart mongo_wrapper so it will run mongod by mongod user - await change_mongo_kill_signal_and_update_mongod_user(); -} - -// attempt to send kill -2 to currently running mongod, so it can do a clean shutdown -async function clean_shutdown_old_mongo() { - try { - const mongo_pid = Number.parseInt(await exec('pgrep mongod'), 10); - if (mongo_pid) { - // send SIGINT to mongod - dbg.log0(`UPGRADE: mongod PID is ${mongo_pid}. sending SIGINT for clean shutdown`); - process.kill(mongo_pid, 'SIGINT'); - dbg.log0(`UPGRADE: sent SIGINT to ${mongo_pid}. wait for mongod to shutdown`); - - // wait for mongo to shutdown - let mongo_running = true; - const timeout_seconds = 5 * 60; - let secs = 0; - const delay_secs = 2; - while (mongo_running) { - await P.delay(delay_secs * 1000); - try { - // send signal 0 to test if pid is still alive - process.kill(mongo_pid, 0); - // process is still alive: - secs += delay_secs; - dbg.warn(`UPGRADE: mongod process[${mongo_pid}] is still alive. waiting for shutdown..`); - if (secs >= timeout_seconds) { - dbg.warn(`UPGRADE: mongod process[${mongo_pid}] did not shutdown in ${timeout_seconds}`, - ` seconds!! force killing mongod and continue with upgrade..`); - process.kill(mongo_pid, 'SIGKILL'); - mongo_running = false; - } - } catch (error) { - dbg.log0(`UPGRADE: mongod[${mongo_pid}] was shutdown successfully. continue with upgrade..`); - mongo_running = false; - } - } - } - // stop mongo_wrapper program - await supervisor.stop(['mongo_wrapper']); - } catch (err) { - dbg.warn('failed to send SIGINT to mongod', err); - } -} - -function disconnect_mongo_client() { - dbg.warn('UPGRADE: disconnecting mongo_client..'); - mongo_client.instance().disconnect(); -} - -function connect_mongo_client() { - dbg.warn('UPGRADE: connecting mongo_client..'); - mongo_client.instance().connect(); -} - -async function change_mongo_kill_signal_and_update_mongod_user() { - const mongo_wrapper_prog = await supervisor.get_program('mongo_wrapper'); - // before touching mongo, first disconnect mongo_client - disconnect_mongo_client(); - dbg.log0(`UPGRADE: changing mongo_wrapper stop signal in noobaa_supervisor.conf from ${mongo_wrapper_prog.stopsignal} to INT`); - mongo_wrapper_prog.stopsignal = 'INT'; - // stopwaitsecs is the time supervisord waits for the supervised program to end before using SIGKILL - dbg.log0(`UPGRADE: adding to mongo_wrapper stopwaitsecs=30`); - mongo_wrapper_prog.stopwaitsecs = 30; - - try { - await supervisor.update_program(mongo_wrapper_prog); - await clean_shutdown_old_mongo(); - - // after mongod is stopped fix ownership and - await fix_mongod_user(); - - await supervisor.apply_changes(); - await supervisor.start(['mongo_wrapper']); - } catch (err) { - dbg.error('failed updating mongo program in supervisor conf', err); - } - - try { - connect_mongo_client(); - dbg.log0('waiting for mongo..'); - await mongo_client.instance().wait_for_all_members(WAIT_FOR_MONGO_TIMEOUT); - dbg.log0('UPGRADE: all mongodb members are up'); - } catch (err) { - // if mongo is not up yet something went wrong - // exit upgrade manager and let it restart from current stage. can help if the problem is in mongo_client\mongo-driver - dbg.error('UPGRADE: failed waiting for mongo to start. exit and restart upgrade manager', err); - process.exit(1); - } -} - - -async function upgrade_mongodb_version(params) { - if (params.should_upgrade_mongodb) { - try { - // before touching mongo, first disconnect mongo_client - disconnect_mongo_client(); - if (params.is_cluster && await mongo_client.instance().is_master(params.ip)) { - // if this is the master, step down the and continue - try { - await mongo_client.instance().step_down_master({ force: true, duration: 120 }); - } catch (err) { - dbg.error(`UPGRADE: failed to step down master. stopping mongo and continuing with upgrade`); - } - } - mongo_client.instance().ignore_connect_timeout(); - dbg.log0('UPGRADE: stopping mongo_wrapper service before upgrading mongodb'); - await supervisor.stop(['mongo_wrapper']); - dbg.log0('UPGRADE: mongo_wrapper stopped'); - const mongo_repo_path = `${NEW_VERSION_DIR}/src/deploy/NVA_build/mongo.repo`; - dbg.log0(`UPGRADE: copying ${mongo_repo_path} to /etc/yum.repos.d/mongodb-org-3.6.repo`); - fs_utils.file_copy(mongo_repo_path, '/etc/yum.repos.d/mongodb-org-3.6.repo'); - fs_utils.file_delete('/etc/yum.repos.d/mongodb-org-3.4.repo'); - const mongo_packages_to_install = [ - `mongodb-org-${params.required_mongodb_version}`, - `mongodb-org-server-${params.required_mongodb_version}`, - `mongodb-org-shell-${params.required_mongodb_version}`, - `mongodb-org-mongos-${params.required_mongodb_version}`, - `mongodb-org-tools-${params.required_mongodb_version}` - ]; - const yum_clean_res = await promise_utils.exec(`yum clean all`, { - ignore_rc: true, - return_stdout: true, - trim_stdout: true - }); - dbg.log0('UPGRADE: yum clean all returned:', yum_clean_res); - const yum_res = await promise_utils.exec(`yum update -y ${mongo_packages_to_install.join(' ')} --disableexcludes=all`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - dbg.log0('UPGRADE: yum install returned:', yum_res); - - // make sure all files are owned by mongod user - await fix_mongod_user(); - - dbg.log0('UPGRADE: restarting mongo_wrapper'); - const mongo_wrapper_prog = await supervisor.get_program('mongo_wrapper'); - // in 3.6 the default bind_ip is 127.0.01 (mongo cannot get connections from outside). change to bind all interfaces - mongo_wrapper_prog.command += ' --bind_ip_all'; - await supervisor.update_program(mongo_wrapper_prog); - await supervisor.apply_changes(); - await supervisor.start(['mongo_wrapper']); - - - try { - connect_mongo_client(); - dbg.log0('waiting for mongo..'); - await mongo_client.instance().wait_for_all_members(WAIT_FOR_MONGO_TIMEOUT); - dbg.log0('UPGRADE: all mongodb members are up'); - } catch (err) { - dbg.error('UPGRADE: failed waiting for mongo to start.', err); - throw err; - } - - } catch (err) { - dbg.error('UPGRADE: failed upgrading mongodb version', err); - throw err; - } - - } -} - -async function get_mongo_shell_command(is_cluster) { - let mongo_shell = '/usr/bin/mongo nbcore'; - if (is_cluster) { - dbg.log0('UPGRADE: set_mongo_cluster_mode: Called'); - const rs_servers = await promise_utils.exec(`grep MONGO_RS_URL /data.env | cut -d'@' -f 2 | cut -d'/' -f 1`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - dbg.log0(`UPGRADE: set_mongo_cluster_mode: MONGO_SHELL`, rs_servers); - mongo_shell = `/data/bin/mongors --host mongodb://${rs_servers}/nbcore?replicaSet=shard1`; - } - dbg.log0(`UPGRADE: using this mongo shell command: ${mongo_shell}`); - return mongo_shell; - -} - -async function upgrade_mongodb_schemas(params) { - const secret = await os_utils.read_server_secret(); - const MONGO_SHELL = await get_mongo_shell_command(params.is_cluster); - const ver = pkg.version; - - let is_pure_version = false; - try { - // eslint-disable-next-line global-require - const old_config = require('/backup/noobaa-core/config'); - is_pure_version = old_config.EXPERIMENTAL_DISABLE_S3_COMPATIBLE_DELEGATION === true || - old_config.EXPERIMENTAL_DISABLE_S3_COMPATIBLE_METADATA === true; - } catch (err) { - if (err.code !== 'MODULE_NOT_FOUND') throw err; - console.log('did not find old config /backup/noobaa-core/config'); - } - async function set_mongo_debug_level(level) { - await promise_utils.exec(`${MONGO_SHELL} --quiet --eval 'db.setLogLevel(${level})'`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - } - - dbg.log0(`UPGRADE: upgrading mongodb schemas. secret=${secret} ver=${ver} params=`, params); - let UPGRADE_SCRIPTS = []; - if (params.should_upgrade_schemas) { - UPGRADE_SCRIPTS = [ - { file: 'mongo_upgrade_mark_completed.js' } - ]; - } else { - UPGRADE_SCRIPTS = [ - { file: 'mongo_upgrade_wait_for_master.js' } - ]; - } - - // set mongo debug level - await set_mongo_debug_level(5); - const [old_major, old_minor, old_patch] = parse_ver(params.old_version.split('-')[0]); - // calculate single int value to represent version - const old_ver_value = (old_major * 10000) + (old_minor * 100) + old_patch; - for (const script of UPGRADE_SCRIPTS) { - const [script_major, script_minor, script_patch] = script.version || [99, 99, 99]; - const script_ver_value = (script_major * 10000) + (script_minor * 100) + script_patch; - // only run upgrade script if we upgrade from a version older than the upgrade script version - if (old_ver_value < script_ver_value) { - dbg.log0(`UPGRADE: Running Mongo Upgrade Script ${script.file}`); - try { - const stdout = await promise_utils.exec(`${MONGO_SHELL} --eval "var param_secret='${secret}', version='${ver}', is_pure_version=${is_pure_version}" ${CORE_DIR}/src/deploy/mongo_upgrade/${script.file}`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - const lines = stdout.split('\n'); - dbg.log0(`UPGRADE: Mongo Upgrade Script output:`); - for (const line of lines) { - dbg.log0(`UPGRADE: [${script.file}] ${line}`); - } - } catch (err) { - dbg.error(`Failed Mongo Upgrade Script ${script.file}`, err); - await set_mongo_debug_level(0); - throw err; - } - } else { - dbg.log0(`UPGRADE: Skipping old Mongo Upgrade Script ${script.file}`); - } - } - - if (!params.is_cluster || - (params.is_cluster && params.mongodb_upgraded && await mongo_client.instance().is_master(params.ip))) { - // if mongodb was upgraded, once all members are up and schemas are upgraded, enable backwards-incompatible 3.6 features - dbg.log0(`this is master (${params.ip}). setting feature version to ${params.feature_version} after mongodb upgrade`); - await mongo_client.instance().set_feature_version({ version: params.feature_version }); - } - - await set_mongo_debug_level(0); - - dbg.log0('UPGRADE: upgrade_mongodb_schemas: Success'); -} - -async function update_data_version() { - const DATA_VERSION_PATH = '/data/noobaa_version'; - dbg.log0(`UPGRADE: updating data version to ${pkg.version}`); - await fs.writeFileAsync(DATA_VERSION_PATH, pkg.version); -} - -async function upgrade_agents() { - if (process.env.CONTAINER_PLATFORM !== 'KUBERNETES') return; - dbg.log0(`UPGRADE: updating agents yaml to the new version`); - // await exec - await exec(`kubectl set image statefulset noobaa-agent noobaa-agent=noobaa/noobaa-agent:${pkg.version}`, { - ignore_err: true - }); -} - -async function after_upgrade_cleanup() { - dbg.log0(`UPGRADE: deleting ${EXTRACTION_PATH}`); - await fs_utils.folder_delete(`${EXTRACTION_PATH}`); - await exec(`rm -f /tmp/*.tar.gz`, { ignore_err: true }); - await exec(`rm -rf /tmp/v*`, { ignore_err: true }); - await exec(`rm -rf /backup/build/public/*diagnostics*`, { ignore_err: true }); - await exec(`rm -f ${UPGRADE_INFO_FILE_PATH}`, { ignore_err: true }); -} - -function parse_ver(ver) { - return ver.split('.').map(i => Number.parseInt(i, 10)); -} - - -// compares 2 versions. returns positive if ver1 is larger, negative if ver2, 0 if equal -function version_compare(ver1, ver2) { - const ver1_arr = parse_ver(ver1); - const ver2_arr = parse_ver(ver2); - const max_length = Math.max(ver1_arr.length, ver2_arr.length); - for (let i = 0; i < max_length; ++i) { - const comp1 = ver1_arr[i] || 0; - const comp2 = ver2_arr[i] || 0; - const diff = comp1 - comp2; - // if version component is not the same, return the - if (diff) return diff; - } - return 0; -} - - -async function get_upgrade_info() { - try { - let upgrade_info = JSON.parse(await fs.readFileAsync(UPGRADE_INFO_FILE_PATH)); - dbg.log0('UPGRADE: found existing upgrade info', upgrade_info); - return upgrade_info; - } catch (err) { - if (err.code !== 'ENOENT') { - dbg.error(`got unexpected error when reading upgrade info file ${UPGRADE_INFO_FILE_PATH}`, err); - } - dbg.log0('there is no previous upgrade info'); - } -} - -async function set_upgrade_info(new_upgrade_info) { - try { - const upgrade_data = JSON.stringify(new_upgrade_info); - await fs.writeFileAsync(UPGRADE_INFO_FILE_PATH, upgrade_data); - dbg.log0('UPGRADE: upgrade info updated successfully with upgrade_info =', new_upgrade_info); - } catch (err) { - dbg.error(`got unexpected error when writing upgrade info file ${UPGRADE_INFO_FILE_PATH}`, err); - } -} - - - - - -exports.run_platform_upgrade_steps = run_platform_upgrade_steps; -exports.platform_upgrade_init = platform_upgrade_init; -exports.backup_old_version = backup_old_version; -exports.restore_old_version = restore_old_version; -exports.copy_new_code = copy_new_code; -exports.build_dotenv = build_dotenv; -exports.update_services = update_services; -exports.upgrade_mongodb_version = upgrade_mongodb_version; -exports.upgrade_mongodb_schemas = upgrade_mongodb_schemas; -exports.upgrade_agents = upgrade_agents; -exports.update_data_version = update_data_version; -exports.after_upgrade_cleanup = after_upgrade_cleanup; -exports.stopped_services_during_upgrade = stopped_services_during_upgrade; -exports.stop_services = stop_services; -exports.start_services = start_services; -exports.version_compare = version_compare; -exports.get_upgrade_info = get_upgrade_info; -exports.set_upgrade_info = set_upgrade_info; diff --git a/src/upgrade/pre_upgrade_backup.sh b/src/upgrade/pre_upgrade_backup.sh deleted file mode 100755 index 841322ab60..0000000000 --- a/src/upgrade/pre_upgrade_backup.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -set -e - -BACKUP_DIR=/backup -ETC_BACKUP_DIR=${BACKUP_DIR}/etc -CORE_DIR=/root/node_modules/noobaa-core - - -if [ "$1" == "restore" ]; then - echo "UPGRADE: restoring previous version files" - if [ -d ${BACKUP_DIR}/noobaa-core ]; then - echo "UPGRADE: restoring ${CORE_DIR}" - echo "UPGRADE: removing directory of failed upgrade: ${CORE_DIR}" - rm -rf ${CORE_DIR} - echo "UPGRADE: copying ${BACKUP_DIR}/noobaa-core to ${CORE_DIR}" - /bin/cp -rf ${BACKUP_DIR}/noobaa-core ${CORE_DIR} - echo "UPGRADE: ${CORE_DIR} restored successfully" - else - echo "UPGRADE: Restore error. could not find direcotry ${BACKUP_DIR}/noobaa-core to restore from" - fi - - if [ -d ${ETC_BACKUP_DIR} ]; then - echo "UPGRADE: restoring ${ETC_BACKUP_DIR}" - /bin/cp -rf ${ETC_BACKUP_DIR}/* /etc/ - echo "UPGRADE: ${ETC_BACKUP_DIR} restored successfully" - else - echo "UPGRADE: Restore error. could not find direcotry ${ETC_BACKUP_DIR} to restore from" - fi - - -else - echo "UPGRADE: removing old backup dir and creating new one" - rm -rf ${BACKUP_DIR} - mkdir -p ${BACKUP_DIR} - mkdir -p ${ETC_BACKUP_DIR} - - echo "UPGRADE: copying noobaa-core to ${BACKUP_DIR}" - /bin/cp -rf ${CORE_DIR} ${BACKUP_DIR} - - echo "UPGRADE: copy /etc files to ${ETC_BACKUP_DIR}" - /bin/cp /etc/noobaa_* ${ETC_BACKUP_DIR} - /bin/cp /etc/ntp.conf ${ETC_BACKUP_DIR} - /bin/cp /etc/yum.conf ${ETC_BACKUP_DIR} - /bin/cp /etc/dhclient.conf ${ETC_BACKUP_DIR} - /bin/cp /etc/resolv.conf ${ETC_BACKUP_DIR} - /bin/cp -r /data/mongo/ssl ${ETC_BACKUP_DIR} - - echo "UPGRADE: backup finished successfully" -fi diff --git a/src/upgrade/upgrade.js b/src/upgrade/upgrade.js deleted file mode 100644 index c35afc9037..0000000000 --- a/src/upgrade/upgrade.js +++ /dev/null @@ -1,158 +0,0 @@ -/* Copyright (C) 2016 NooBaa */ -"use strict"; - -var P = require('../util/promise'); -var argv = require('minimist')(process.argv); -var fs = require('fs'); -const path = require('path'); - -var promise_utils = require('../util/promise_utils'); -const spawn = require('child_process').spawn; -const dbg = require('../util/debug_module')(__filename); -const platform_upgrade = require('./platform_upgrade'); -const supervisor = require('../server/utils/supervisor_ctrl'); -dbg.set_process_name('Upgrade'); - -const OLD_VER = require('/root/node_modules/noobaa-core/package.json').version; -const EXTRACTION_PATH = '/tmp/test'; -const TMP_PATH = '/tmp'; -const NEW_UPGRADE_SCRIPT = `./src/upgrade/upgrade.js`; -const UPGRADE_MANAGER_SCRIPT = `./src/upgrade/upgrade_manager.js`; - - -// read node version from nvmrc -const NODE_VER = String(fs.readFileSync(path.join(EXTRACTION_PATH, 'noobaa-core/.nvmrc'))).trim(); -const NEW_NODE_BIN = path.join(TMP_PATH, 'v' + NODE_VER, 'bin/node'); - - -async function do_upgrade() { - dbg.log0('UPGRADE: starting do_upgrade in upgrade.js'); - try { - dbg.log0('UPGRADE: backup up old version before starting..'); - // TODO: move the backup stage into upgrade_manager - await platform_upgrade.backup_old_version(); - await start_upgrade_manager(); - } catch (err) { - // TODO: better error handling here. we should store the error in DB before aborting (should use mongo shell) - // also make sure that exiting here will not cause the upgrade to get stuck in the UI. - dbg.error('failed preparing environment for upgrade_manager', err); - // restart services to stop upgrade - // TODO: better solution than restarting services - supervisor.restart(['webserver']); - process.exit(1); - } -} - - - -async function start_upgrade_manager() { - // add new version upgrade agent to the supervisor.conf - const upgrade_prog = {}; - const args = [ - '--cluster_str', argv.cluster_str, - '--old_version', OLD_VER - ]; - upgrade_prog.name = 'upgrade_manager'; - upgrade_prog.command = `${NEW_NODE_BIN} ${UPGRADE_MANAGER_SCRIPT} ${args.join(' ')}`; - upgrade_prog.directory = `${EXTRACTION_PATH}/noobaa-core/`; - upgrade_prog.user = 'root'; - upgrade_prog.autostart = 'true'; - upgrade_prog.priority = '1'; - upgrade_prog.stopsignal = 'KILL'; - dbg.log0('UPGRADE: adding upgrade manager to supervisor and applying. configuration is', upgrade_prog); - try { - await supervisor.add_program(upgrade_prog); - await supervisor.apply_changes(); - } catch (err) { - dbg.error(`UPGRADE: failed adding upgrade manager to supervisor. - this probably means that supervisorctl is failing to connect to supervisord. restarting supervisord`, err); - await supervisor.restart_supervisord(); - throw err; - } -} - -function main() { - return run_upgrade() - .then(function() { - dbg.log0('run_upgrade: Upgrade finished successfully'); - }) - .catch(function(err) { - dbg.error('run_upgrade: Failed', err); - throw err; - }); -} - -if (require.main === module) { - main(); -} - -function run_upgrade() { - dbg.log0('run_upgrade: Called', argv); - let file_descriptors; - // #services under supervisord - return promise_utils.exec(`lsof -p $$ | grep LISTEN | awk '{print $4}' | sed 's:\\(.*\\)u:\\1:'`, { - ignore_rc: false, - return_stdout: true, - }) - .then(res => { - file_descriptors = res.split('\n'); - dbg.log0(`Detected File Descriptors ${file_descriptors}`); - }) - .then(() => P.map(file_descriptors, function(fd) { - return promise_utils.exec(`eval "exec ${fd}<&-"`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - }).catch(err => { - dbg.error(err); - })) - .then(() => { - if (argv.from_file === '') { - dbg.log0(`upgrade.js called with ${argv}`); - dbg.log0(`Must supply path to upgrade package`); - throw new Error('run_upgrade: Must supply path to upgrade package'); - } else if (argv.from_file) { - var stdout; - var stderr; - dbg.log0(`upgrade.js called for package extraction`); - return P.resolve() - .then(() => { - var fname = '/log/noobaa_deploy_out_' + argv.fsuffix + '.log'; - stdout = fs.openSync(fname, 'a'); - stderr = fs.openSync(fname, 'a'); - }) - .then(() => { - let upgrade_proc = spawn(NEW_NODE_BIN, [ - NEW_UPGRADE_SCRIPT, - '--do_upgrade', 'true', - '--fsuffix', argv.fsuffix, - '--cluster_str', argv.cluster_str - ], { - detached: true, - stdio: ['ignore', stdout, stderr, 'ipc'], - cwd: `${EXTRACTION_PATH}/noobaa-core/` - }); - upgrade_proc.on('exit', (code, signal) => { - // upgrade.js is supposed to kill this node process, so it should not exit while - // this node process is still running. treat exit as error. - if (code) { - const err_msg = `upgrade.js process was closed with code ${code} and signal ${signal}`; - dbg.error(err_msg); - } - }); - upgrade_proc.on('error', dbg.error); - }); - } else if (argv.do_upgrade) { - dbg.log0(`upgrade.js called with ${argv}`); - return do_upgrade(); - } - }) - .then(function() { - dbg.log0('run_upgrade: Success'); - }) - .catch(function(err) { - dbg.error('run_upgrade: Failure', err); - throw err; - }); -} diff --git a/src/upgrade/upgrade_utils.js b/src/upgrade/upgrade_utils.js deleted file mode 100644 index e063082bcd..0000000000 --- a/src/upgrade/upgrade_utils.js +++ /dev/null @@ -1,481 +0,0 @@ -/* Copyright (C) 2016 NooBaa */ -'use strict'; - -const _ = require('lodash'); -//const ini = require('ini'); - -const pkg = require('../../package.json'); -const fs = require('fs'); -const dbg = require('../util/debug_module')(__filename); -const spawn = require('child_process').spawn; -const fs_utils = require('../util/fs_utils'); -const P = require('../util/promise'); -const os_utils = require('../util/os_utils'); -const config = require('../../config'); -const promise_utils = require('../util/promise_utils'); -const phone_home_utils = require('../util/phone_home'); -const argv = require('minimist')(process.argv); - -const TMP_PATH = '/tmp'; -const EXTRACTION_PATH = `${TMP_PATH}/test`; -const NEW_TMP_ROOT = `${EXTRACTION_PATH}/noobaa-core`; -const PACKAGE_FILE_NAME = 'new_version.tar.gz'; -const SPAWN_SCRIPT = `${NEW_TMP_ROOT}/src/deploy/NVA_build/two_step_upgrade_checkups_spawn.sh`; -const ERRORS_PATH = `${TMP_PATH}/new_tests_errors.json`; -//const EPEL_REPO_PATH = '/etc/yum.repos.d/epel.repo'; - -const ERROR_MAPPING = { - COULD_NOT_COPY_PACKAGE: 'Failed to prepare the package for extraction, try to re-download the upgrade package and upload again.', - COULD_NOT_RUN_TESTS: 'Failed to perform pre-upgrade tests, try to re-download the upgrade package and upload again.', - PACKAGE_JSON_NOT_EXISTS: 'Uploaded package is not a NooBaa upgrade package, try to re-download the upgrade package and upload again.', - COULD_NOT_EXTRACT_VERSION: 'Uploaded package is not a NooBaa upgrade package, try to re-download the upgrade package and upload again.', - MAJOR_VERSION_CHANGE: 'Upgrade from the current version to the uploaded version is not supported.', - MIN_REQUIRED_VERSION: 'Upgrade to this version is only supported from version 2.3.1 and up.', - CANNOT_DOWNGRADE: 'Downgrade to an older version is not supported.', - INTERNET_CONNECTIVITY: 'Failed to verify internet connectivity. Check network connectivity or set a proxy address.', - COULD_NOT_GET_RAW_STORAGE: 'Failed to perform pre-upgrade tests.', - LOCAL_HARDDRIVE_MEMORY: `Not enough hard drive space on server required for upgrade, at least ${config.MIN_MEMORY_FOR_UPGRADE / 1024 / 1024}MB is required per server. Please increase local disk.`, - PACKAGE_EXTRACTION: 'Failed to extract NooBaa upload package. try to re-download the upgrade package and upload again.', - PACKAGE_INSTALLATION_TIMEOUT: 'Failed on pre-upgrade packages timeout.', - COULD_NOT_EXTRACT_PARAMS: 'Failed to perform pre-upgrade tests.', - CANNOT_UPGRADE_WITHOUT_SYSTEM: 'Failed to perform pre-upgrade tests due to no system on the server.', - COULD_NOT_INSTALL_PACKAGES: 'Failed on pre-upgrade packages.', - NTP_TIMESKEW_FAILED: 'The difference between the server time and a web NTP server time is too large.', - NTP_COMMUNICATION_ERROR: 'Failed to check time skew, please configure NTP server or verify internet connectivity.', - MISSING_SUPERVISOR_SOCK: 'We have encountered a known issue with the upgrade process which requires a manual intervention. Please contact support regarding internal issue #4849.', - UNKNOWN: 'Failed with an internal error.' -}; - -const ERROR_REPORT_MAPPING = { - MISSING_SUPERVISOR_SOCK: 'Encountered internal issue #4849' -}; - -let staged_package = 'UNKNOWN'; - -class ExtractionError extends Error {} -class VersionMismatchError extends Error {} -class NewTestsError extends Error {} - -function pre_upgrade(params) { - dbg.log0('UPGRADE:', 'pre_upgrade called with params =', params); - return P.resolve() - .then(() => promise_utils.exec(`cp -f ${params.upgrade_path} ${TMP_PATH}/${PACKAGE_FILE_NAME}`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - })) - .catch(err => { - dbg.error('pre_upgrade: package copying failed', err); - throw new ExtractionError('COULD_NOT_COPY_PACKAGE'); - }) - .then(() => pre_upgrade_checkups(params)) - .then(() => { - dbg.log0('new_pre_upgrade spawn'); - // TODO: Should probably do some sort of timeout on the spawn or something - // Since we already had problems of it just getting stuck and not returning - return promise_utils.exec(`chmod 777 ${SPAWN_SCRIPT}; ${SPAWN_SCRIPT} > /var/upgrade_log.log`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }) - .catch(err => { - dbg.error('new_pre_upgrade spawn had errors', err); - return fs.readFileAsync(ERRORS_PATH) - .catch(err_file => { - dbg.log0('could not read error file', err_file); - throw new Error('COULD_NOT_RUN_TESTS'); - }) - .then(errors => { - dbg.log0('found errors in spawn: ', String(errors)); - const stored_error = JSON.parse(errors); - const new_test_error = new NewTestsError(stored_error.message); - new_test_error.report_info = stored_error.report_info; - throw new_test_error; - }); - }); - }) - .then(() => _.omitBy({ - result: true, - tested_date: Date.now(), - staged_package: staged_package || 'UNKNOWN' - }, _.isUndefined)) - .catch(error => { - let err_message = ERROR_MAPPING[error.message] || ERROR_MAPPING.UNKNOWN; - let report_info = ERROR_REPORT_MAPPING[error.message]; - dbg.error('pre_upgrade: HAD ERRORS', error); - if (error instanceof ExtractionError) { //Failed in extracting, no staged package - staged_package = 'UNKNOWN'; - } - if (error instanceof NewTestsError) { - err_message = error.message; - report_info = error.report_info; - } - return _.omitBy({ - result: false, - error: err_message, - report_info, - tested_date: Date.now(), - staged_package: staged_package || 'UNKNOWN' - }, _.isUndefined); - }); -} - - -async function do_upgrade(upgrade_file, is_clusterized, err_handler) { - try { - err_handler = err_handler || dbg.error; - dbg.log0('UPGRADE file', upgrade_file, 'upgrade.js path:', process.cwd() + '/src/deploy/NVA_build'); - var fsuffix = new Date() - .toISOString() - .replace(/T/, '-') - .substr(5, 11); - var fname = '/var/noobaa_deploy_out_' + fsuffix + '.log'; - var stdout = fs.openSync(fname, 'a'); - var stderr = fs.openSync(fname, 'a'); - let cluster_str = is_clusterized ? 'cluster' : ' '; - dbg.log0('command: /usr/local/bin/node ', - process.cwd() + '/src/upgrade/upgrade.js --from_file ' + upgrade_file, '--fsuffix', fsuffix, '--cluster_str', cluster_str); - let upgrade_proc = spawn('nohup', [ - '/usr/local/bin/node', - process.cwd() + '/src/upgrade/upgrade.js', - '--from_file', upgrade_file, - '--fsuffix', fsuffix, - '--cluster_str', cluster_str - ], { - detached: true, - stdio: ['ignore', stdout, stderr], - cwd: process.cwd() - }); - upgrade_proc.on('exit', (code, signal) => { - // upgrade.js is supposed to kill this node process, so it should not exit while - // this node process is still running. treat exit as error. - if (code) { - const err_msg = `upgrade.js process was closed with code ${code} and signal ${signal}`; - err_handler(err_msg); - } - }); - upgrade_proc.on('error', err_handler); - } catch (err) { - err_handler(err); - } -} - -function test_major_version_change() { - const get_version = `tar -zxvOf ${TMP_PATH}/${PACKAGE_FILE_NAME} noobaa-core/package.json | grep version | awk '{print $2}'`; - const check_exists = `tar -tf ${TMP_PATH}/${PACKAGE_FILE_NAME} | grep noobaa-core/package.json`; - return promise_utils.exec(check_exists, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }).catch(err => { - dbg.error('test_major_version_change package.json not exists', err); - throw new Error('PACKAGE_JSON_NOT_EXISTS'); - }) - .then(() => promise_utils.exec(get_version, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }).catch(err => { - dbg.error('Major change extraction had errors', err); - throw new Error('COULD_NOT_EXTRACT_VERSION'); - })) - .then(ver => { - dbg.log0('new package version is', ver); - staged_package = ver.replace(/[",]/g, ''); - const [staged_major, staged_minor, staged_patch] = staged_package.split('-')[0].split('.').map(str => Number(str)); - const [current_major, current_minor, current_patch] = pkg.version.split('-')[0].split('.').map(str => Number(str)); - if (staged_major < 3) { - dbg.error('Unsupported upgrade, 3.X to 1.X/2.X'); - throw new VersionMismatchError('MAJOR_VERSION_CHANGE'); - } - - // calc value of versions to compare - const staged_ver_val = (staged_major * 10000) + (staged_minor * 100) + staged_patch; - const current_ver_val = (current_major * 10000) + (current_minor * 100) + current_patch; - if (staged_ver_val < current_ver_val) { - dbg.error(`Unsupported upgrade, cannot downgrade. staged version = ${staged_package}, current version = ${pkg.version}`); - throw new VersionMismatchError('CANNOT_DOWNGRADE'); - } - }); -} - -function test_internet_connectivity() { - return P.resolve() - .then(() => phone_home_utils.verify_connection_to_phonehome()) - .catch(err => { - dbg.error('test_internet_connectivity failed', err); - throw new Error('INTERNET_CONNECTIVITY'); - }); -} - -function test_local_harddrive_memory() { - return P.resolve() - //This logic is performed here on two accounts: - //Reading the HB from the mongo is possible, but would also require reading the noobaa_sec to identify which is - //the current server to find it in the shards array. In addition, HB storage status might be over the threshold but - //actual state is below and the package won't have space to be written - .then(() => os_utils.read_drives()) - .then(drives => { - let root = drives.find(drive => drive.mount === '/'); - if (root.storage.free < config.MIN_MEMORY_FOR_UPGRADE) { - dbg.error(`NOT_ENOUGH_MEMORY_IN_MACHINE MEM_IN_BYTES:${root.free}`); - throw new Error('LOCAL_HARDDRIVE_MEMORY'); - } - }); - -} - - -async function test_supervisor_sock() { - try { - await fs_utils.file_must_exist('/var/log/supervisor.sock'); - } catch (err) { - if (err.code) { - dbg.error('could not find /var/log/supervisor.sock'); - throw new Error('MISSING_SUPERVISOR_SOCK'); - } - dbg.error('encountered unknown error in test_supervisor_sock. continuing with upgrade', err); - } -} - - - -function test_package_extraction() { - return P.resolve() - .then(() => extract_package()) - .catch(err => { - dbg.error('extract_package: Failed with', err); - throw new Error('PACKAGE_EXTRACTION'); - }); -} - -function new_pre_upgrade() { - dbg.log0(`starting new_pre_upgrade `); - return P.resolve() - .then(() => new_pre_upgrade_checkups()) - //.then(() => packages_upgrade()) - .timeout(20 * 60 * 1000, 'PACKAGE_INSTALLATION_TIMEOUT'); -} - -function pre_upgrade_checkups(params) { - dbg.log0('performing pre_upgrade_checkups. extract_package = ', params.extract_package); - return P.join( - test_major_version_change(), - P.resolve(params.extract_package && test_package_extraction()) - ) - .catch(err => { - dbg.error('failed pre_upgrade_checkups with error', err); - throw new ExtractionError(err.message); - }); -} - - -function new_pre_upgrade_checkups() { - dbg.log0(`running new params checkups`); - return P.join( - test_internet_connectivity(), - // TODO: Check the NTP with consideration to the proxy - test_local_harddrive_memory(), - test_supervisor_sock() - ); -} - -/* -async function do_yum_update() { - try { - dbg.log0('UPGRADE: do_yum_update: Called'); - dbg.log0('UPGRADE: copying /etc/localtime to tmp'); - await promise_utils.exec(`cp -fd /etc/localtime /tmp`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - dbg.log0('UPGRADE: calling yum clean all'); - const clean_res = await promise_utils.exec(`yum clean all`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - dbg.log0('UPGRADE: yum clean result:', clean_res); - let update_retries = 3; - while (update_retries > 0) { - try { - dbg.log0('UPGRADE: calling yum update -y'); - const update_res = await promise_utils.exec(`yum update -y`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - dbg.log0('UPGRADE: yum update result:', update_res); - // no retires necessary - update_retries = 0; - } catch (err) { - update_retries -= 1; - if (update_retries === 0) throw err; - dbg.error(`yum update failed with error. retrying in 30 seconds`, err); - await P.delay(30000); - } - } - await promise_utils.exec(`yum clean all`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - - dbg.log0(`Updated yum packages`); - - await promise_utils.exec(`cp -fd /tmp/localtime /etc`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - } catch (err) { - dbg.error('do_yum_update: Failure', err); - throw err; - - } -} -*/ -/* -async function fix_epel_repo() { - const epel_repo_content = (await fs.readFileAsync(EPEL_REPO_PATH)).toString(); - let write_file = false; - // fix cases where quotes were inserted around links - caused yum parsing errors - if (epel_repo_content.includes('"')) { - dbg.log0('UPGRADE: found repo components wrapped in quotes. will remove them'); - write_file = true; - } - const repo_obj = ini.parse(epel_repo_content); - _.each(repo_obj, rep => { - if (!rep.mirrorlist) { - write_file = true; - rep.mirrorlist = 'https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch'; - } - }); - if (write_file) { - // remove quotes that are inserted by ini.stringify - const epel_repo_data = ini.stringify(repo_obj).replace(/"/g, ''); - dbg.log0(`UPGRADE: writing ${EPEL_REPO_PATH}:`, epel_repo_data); - await fs.writeFileAsync(EPEL_REPO_PATH, epel_repo_data); - } -} -*/ - -/* -async function packages_upgrade() { - try { - - await fix_epel_repo(); - - const packages_to_install = [ - 'sudo', - 'lsof', - 'wget', - 'curl', - 'ntp', - 'cronie', - 'openssh-server', - 'dialog', - 'expect', - 'nc', - 'tcpdump', - 'iperf3', - 'python-setuptools', - 'bind-utils', - 'bind', - 'screen', - 'strace', - 'vim', - 'net-tools', - 'iptables-services', - 'rng-tools', // random number generator tools - ]; - dbg.log0(`install additional packages`); - const res = await promise_utils.exec(`yum install -y ${packages_to_install.join(' ')}`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - dbg.log0(res); - - await promise_utils.exec(`systemctl enable rngd && systemctl start rngd`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }); - - await do_yum_update(); - - } catch (err) { - dbg.error('packages_upgrade: Failure', err); - throw new Error('COULD_NOT_INSTALL_PACKAGES'); - } - - -} -*/ - -function extract_package() { - dbg.log0('extract_package: Called'); - // Clean previous extracted package - return fs_utils.create_fresh_path(EXTRACTION_PATH) - .then(() => { - dbg.log0(`extract_package: Path ${EXTRACTION_PATH} was created`); - }) - .then(() => promise_utils.exec(`cp ${TMP_PATH}/${PACKAGE_FILE_NAME} ${EXTRACTION_PATH}`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - })) - .then(function() { - return promise_utils.exec(`cd ${EXTRACTION_PATH}/;tar --same-owner -xzvf ${EXTRACTION_PATH}/${PACKAGE_FILE_NAME} >& /dev/null`, { - ignore_rc: false, - return_stdout: true, - trim_stdout: true - }) - .then(res => { - dbg.log0(`extract_package: ${EXTRACTION_PATH}/${PACKAGE_FILE_NAME} was extracted`, res); - }) - .catch(function(err) { - dbg.error(`Corrupted package file, could not open`, err); - return fs_utils.folder_delete(EXTRACTION_PATH) - .finally(() => { - throw new Error(`extract_package: Corrupted package file, could not open`); - }); - }); - }) - .then(() => { - dbg.log0('extract_package: Success'); - }) - .catch(err => { - dbg.error('extract_package: Failure', err); - throw err; - }); -} - -if (require.main === module) { - if (argv.new_pre_upgrade) { - return new_pre_upgrade() - .then(() => { - dbg.log0('new_pre_upgrade: SUCCESS'); - process.exit(0); - }) - .catch(err => { - dbg.error('new_pre_upgrade: FAILED:', err); - // This is done because the new spawn only knowns the new errors - return fs.writeFileAsync(ERRORS_PATH, JSON.stringify({ - message: (ERROR_MAPPING[err.message] || ERROR_MAPPING.UNKNOWN), - report_info: ERROR_REPORT_MAPPING[err.message] - })) - .catch(error => { - dbg.error('new_pre_upgrade: failed to write error file', error); - }) - .finally(() => process.exit(1)); - }); - } -} - -//Exports -exports.pre_upgrade = pre_upgrade; -exports.do_upgrade = do_upgrade; -exports.pre_upgrade_failure_error = ERROR_MAPPING.COULD_NOT_RUN_TESTS; diff --git a/src/util/mongo_client.js b/src/util/mongo_client.js index f42001f79f..c70e22d877 100644 --- a/src/util/mongo_client.js +++ b/src/util/mongo_client.js @@ -180,7 +180,15 @@ class MongoClient extends EventEmitter { .then(() => col.db_indexes && P.map(col.db_indexes, index => db.collection(col.name).createIndex(index.fields, _.extend({ background: true }, index.options)) .then(res => dbg.log0('_init_collection: created index', col.name, res)) - )) + .catch(err => { + if (err.codeName === 'IndexOptionsConflict') { + return db.collection(col.name).dropIndex(index.fields) + .then(() => db.collection(col.name).createIndex(index.fields, _.extend({ background: true }, index.options))) + .then(res => dbg.log0('_init_collection: re-created index with new options', col.name, res)); + } else { + throw err; + } + }))) .catch(err => { dbg.error('_init_collection: FAILED', col.name, err); throw err; diff --git a/tsconfig.json b/tsconfig.json index 41481efbf5..4d2b6157c5 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -14,6 +14,7 @@ "src/server/object_services/*.js", "src/test/unit_tests/test_object_io.js", "src/test/unit_tests/test_agent_blocks_verifier.js", + "src/test/unit_tests/test_tiering_upload.js", "src/tools/mapper_speed.js", ], "exclude": [