From 97a6e19b31ca20b559cf06c32437ef2325b799cb Mon Sep 17 00:00:00 2001 From: Oleg Kulachenko Date: Mon, 21 Aug 2023 21:18:03 +0400 Subject: [PATCH] object: Add restart nodes lock object tests #556 The test_the_object_lock_should_be_kept_after_restarting_the_nodes test added. This test verifies the issue https://github.com/nspcc-dev/neofs-node/issues/1502 Signed-off-by: Oleg Kulachenko --- .../testsuites/object/test_object_lock.py | 113 +++++++++++++++++- 1 file changed, 111 insertions(+), 2 deletions(-) diff --git a/pytest_tests/testsuites/object/test_object_lock.py b/pytest_tests/testsuites/object/test_object_lock.py index 2f442b87b..b02b9dd08 100755 --- a/pytest_tests/testsuites/object/test_object_lock.py +++ b/pytest_tests/testsuites/object/test_object_lock.py @@ -1,13 +1,16 @@ import logging +import os import re +import subprocess import allure import pytest from cluster import Cluster from cluster_test_base import ClusterTestBase -from common import STORAGE_GC_TIME +from common import STORAGE_GC_TIME, DOCKER_COMPOSE_STORAGE_CONFIG_FILE, DOCKER_COMPOSE_ENV_FILE from complex_object_actions import get_link_object, get_storage_object_chunks from epoch import ensure_fresh_epoch, get_epoch, tick_epoch, tick_epoch_and_wait +from failover_utils import wait_all_storage_nodes_returned from grpc_responses import ( LIFETIME_REQUIRED, LOCK_NON_REGULAR_OBJECT, @@ -18,7 +21,7 @@ OBJECT_NOT_FOUND, ) from neofs_testlib.shell import Shell -from node_management import drop_object +from node_management import drop_object, delete_node_data, stop_storage_nodes from pytest import FixtureRequest from python_keywords.container import create_container from python_keywords.neofs_verbs import delete_object, head_object, lock_object @@ -119,6 +122,14 @@ def locked_storage_object( @pytest.mark.grpc_object_lock class TestObjectLockWithGrpc(ClusterTestBase): + @allure.title("Docker compose restart storage nodes containers with new env file") + def docker_compose_restart_storage_nodes(self): + stop_storage_nodes(self.cluster.storage_nodes) + # Not using docker-compose restart because the container needs to be started with new environment variables. + subprocess.run(["docker-compose", "-f", DOCKER_COMPOSE_STORAGE_CONFIG_FILE, "down"]) + subprocess.run(["docker-compose", "-f", DOCKER_COMPOSE_STORAGE_CONFIG_FILE, "up", "-d"]) + wait_all_storage_nodes_returned(self.cluster) + @pytest.fixture() def new_locked_storage_object( self, user_container: StorageContainer, request: FixtureRequest @@ -144,6 +155,61 @@ def new_locked_storage_object( return storage_object + @pytest.fixture(scope="function") + @allure.title("Enable metabase resync on start") + def enable_metabase_resync_on_start(self): + """ + If there were already any environment variables in the DOCKER_COMPOSE_ENV_FILE, they should be retained and + NEOFS_STORAGE_SHARD_0_RESYNC_METABASE and NEOFS_STORAGE_SHARD_1_RESYNC_METABASE should be added to the file. + + If NEOFS_STORAGE_SHARD_0_RESYNC_METABASE and NEOFS_STORAGE_SHARD_1_RESYNC_METABASE are explicitly specified + as false, they must be changed to true. + + If DOCKER_COMPOSE_ENV_FILE is empty, NEOFS_STORAGE_SHARD_0_RESYNC_METABASE and + NEOFS_STORAGE_SHARD_1_RESYNC_METABASE must be added to DOCKER_COMPOSE_ENV_FILE. + + Of course, after the test, the DOCKER_COMPOSE_ENV_FILE must return to its initial state. + """ + file_path = DOCKER_COMPOSE_ENV_FILE + if not os.path.exists(file_path): + pytest.fail(f'File {file_path} does not exist!') + + with open(file_path, 'r') as file: + lines = file.readlines() + logger.debug(f"Initial file content:\n{''.join(lines)}") + + replacements = { + 'NEOFS_STORAGE_SHARD_0_RESYNC_METABASE=false': 'NEOFS_STORAGE_SHARD_0_RESYNC_METABASE=true\n', + 'NEOFS_STORAGE_SHARD_1_RESYNC_METABASE=false': 'NEOFS_STORAGE_SHARD_1_RESYNC_METABASE=true\n' + } + + unprocessed_lines = set(replacements.values()) + + modified_lines = [] + + for line in lines: + for original, new in replacements.items(): + if original in line: + line = line.replace(original, new) + unprocessed_lines.discard(new) + modified_lines.append(line) + + modified_lines.extend(unprocessed_lines) + + modified_content = ''.join(modified_lines) + + with open(file_path, 'w') as file: + file.write(modified_content) + logger.debug(f"Modified file content:\n{modified_content}") + + yield + + with open(file_path, 'w') as file: + file.writelines(lines) + logger.debug(f"Restored file content:\n{''.join(lines)}") + # Restart docker compose to apply the changes + self.docker_compose_restart_storage_nodes() + @allure.title("Locked object should be protected from deletion") @pytest.mark.parametrize( "locked_storage_object", @@ -684,3 +750,46 @@ def test_link_object_of_complex_object_should_also_be_protected_from_deletion( self.shell, self.cluster.default_rpc_endpoint, ) + + @allure.title("The locked object must be protected from deletion after metabase deletion " + "(metabase resynchronization must be enabled), and after restarting storage nodes") + @pytest.mark.parametrize( + # Only complex objects are required for this test + "new_locked_storage_object", + [pytest.lazy_fixture("complex_object_size")], + indirect=True, + ) + def test_the_object_lock_should_be_kept_after_metabase_deletion( + self, + new_locked_storage_object: StorageObjectInfo, + enable_metabase_resync_on_start, + ): + """ + Lock objects should fill metabase on resync_metabase + """ + with allure.step(f"Delete metabase files from storage nodes"): + for node in self.cluster.storage_nodes: + delete_node_data(node) + + with allure.step(f"Try to delete object {new_locked_storage_object.oid} after metabase deletion"): + with pytest.raises(Exception, match=OBJECT_IS_LOCKED): + delete_object( + new_locked_storage_object.wallet_file_path, + new_locked_storage_object.cid, + new_locked_storage_object.oid, + self.shell, + self.cluster.default_rpc_endpoint, + ) + + with allure.step(f"Restart storage nodes to enable resync_metabase"): + self.docker_compose_restart_storage_nodes() + + with allure.step(f"Try to delete object {new_locked_storage_object.oid} after resync_metabase enabled"): + with pytest.raises(Exception, match=OBJECT_IS_LOCKED): + delete_object( + new_locked_storage_object.wallet_file_path, + new_locked_storage_object.cid, + new_locked_storage_object.oid, + self.shell, + self.cluster.default_rpc_endpoint, + )