Skip to content

Commit

Permalink
object: Add restart nodes lock object tests #556
Browse files Browse the repository at this point in the history
The test_the_object_lock_should_be_kept_after_restarting_the_nodes
test added.
This test verifies the issue
nspcc-dev/neofs-node#1502

Signed-off-by: Oleg Kulachenko <[email protected]>
  • Loading branch information
vvarg229 committed Sep 7, 2023
1 parent 930dac1 commit 5c18773
Showing 1 changed file with 116 additions and 3 deletions.
119 changes: 116 additions & 3 deletions pytest_tests/testsuites/object/test_object_lock.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,17 @@
import logging
import os
import re
import subprocess
from time import sleep

import allure
import pytest
from cluster import Cluster
from cluster_test_base import ClusterTestBase
from common import STORAGE_GC_TIME
from common import STORAGE_GC_TIME, DOCKER_COMPOSE_CONFIG_FILE, DOCKER_COMPOSE_ENV_FILE
from complex_object_actions import get_link_object, get_storage_object_chunks
from epoch import ensure_fresh_epoch, get_epoch, tick_epoch
from failover_utils import wait_all_storage_nodes_returned
from grpc_responses import (
LIFETIME_REQUIRED,
LOCK_NON_REGULAR_OBJECT,
Expand All @@ -18,12 +22,15 @@
OBJECT_NOT_FOUND,
)
from neofs_testlib.shell import Shell
from node_management import drop_object
from node_management import drop_object, restart_storage_nodes, delete_node_data, start_storage_nodes, \
stop_storage_nodes
from pytest import FixtureRequest
from docker_composer import DockerCompose
from python_keywords.container import create_container
from python_keywords.neofs_verbs import delete_object, head_object, lock_object
from python_keywords.neofs_verbs import delete_object, head_object, lock_object, put_object_to_random_node
from storage_policy import get_nodes_with_object
from test_control import expect_not_raises, wait_for_success
from testsuites.object.test_object_api import OBJECT_ATTRIBUTES
from utility import parse_time, wait_for_gc_pass_on_storage_nodes

from helpers.container import StorageContainer, StorageContainerInfo
Expand Down Expand Up @@ -119,6 +126,15 @@ def locked_storage_object(

@pytest.mark.grpc_object_lock
class TestObjectLockWithGrpc(ClusterTestBase):

@allure.title("Docker compose restart containers with new env file")
def docker_compose_restart(self):
stop_storage_nodes(self.cluster.storage_nodes)
# Not using docker-compose restart because the container needs to be started with new environment variables.
subprocess.run(["docker-compose", "-f", DOCKER_COMPOSE_CONFIG_FILE, "down"])
subprocess.run(["docker-compose", "-f", DOCKER_COMPOSE_CONFIG_FILE, "up", "-d"])
wait_all_storage_nodes_returned(self.cluster)

@pytest.fixture()
def new_locked_storage_object(
self, user_container: StorageContainer, request: FixtureRequest
Expand All @@ -144,6 +160,61 @@ def new_locked_storage_object(

return storage_object

@pytest.fixture(scope="function")
@allure.title("Enable metabase resync on start")
def enable_metabase_resync_on_start(self):
"""
If there were already any environment variables in the DOCKER_COMPOSE_ENV_FILE, they should be retained and
NEOFS_STORAGE_SHARD_0_RESYNC_METABASE and NEOFS_STORAGE_SHARD_1_RESYNC_METABASE should be added to the file.
If NEOFS_STORAGE_SHARD_0_RESYNC_METABASE and NEOFS_STORAGE_SHARD_1_RESYNC_METABASE are explicitly specified
as false, they must be changed to true.
If DOCKER_COMPOSE_ENV_FILE is empty, NEOFS_STORAGE_SHARD_0_RESYNC_METABASE and
NEOFS_STORAGE_SHARD_1_RESYNC_METABASE must be added to DOCKER_COMPOSE_ENV_FILE.
Of course, after the test, the DOCKER_COMPOSE_ENV_FILE must return to its initial state.
"""
file_path = DOCKER_COMPOSE_ENV_FILE
if not os.path.exists(file_path):
pytest.fail(f'File {file_path} does not exist!')

with open(file_path, 'r') as file:
lines = file.readlines()
logger.debug(f"Initial file content:\n{''.join(lines)}")

replacements = {
'NEOFS_STORAGE_SHARD_0_RESYNC_METABASE=false': 'NEOFS_STORAGE_SHARD_0_RESYNC_METABASE=true\n',
'NEOFS_STORAGE_SHARD_1_RESYNC_METABASE=false': 'NEOFS_STORAGE_SHARD_1_RESYNC_METABASE=true\n'
}

unprocessed_lines = set(replacements.values())

modified_lines = []

for line in lines:
for original, new in replacements.items():
if original in line:
line = line.replace(original, new)
unprocessed_lines.discard(new)
modified_lines.append(line)

modified_lines.extend(unprocessed_lines)

modified_content = ''.join(modified_lines)

with open(file_path, 'w') as file:
file.write(modified_content)
logger.debug(f"Modified file content:\n{modified_content}")

yield

with open(file_path, 'w') as file:
file.writelines(lines)
logger.debug(f"Restored file content:\n{''.join(lines)}")
# Restart docker compose to apply the changes
self.docker_compose_restart()

@allure.title("Locked object should be protected from deletion")
@pytest.mark.parametrize(
"locked_storage_object",
Expand Down Expand Up @@ -663,3 +734,45 @@ def test_link_object_of_complex_object_should_also_be_protected_from_deletion(
self.shell,
self.cluster.default_rpc_endpoint,
)

@allure.title("Locked object should be protected from deletion after the storage nodes are restarted")
@pytest.mark.parametrize(
# Only complex objects are required for this test
"new_locked_storage_object",
[pytest.lazy_fixture("complex_object_size")],
indirect=True,
)
def test_the_object_lock_should_be_kept_after_restarting_the_nodes(
self,
new_locked_storage_object: StorageObjectInfo,
enable_metabase_resync_on_start,
):
"""
Lock objects should fill metabase on resync_metabase
"""
with allure.step(f"Delete metabase files from storage nodes"):
for node in self.cluster.storage_nodes:
delete_node_data(node)

with allure.step(f"Try to delete object {new_locked_storage_object.oid} after metabase deletion"):
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
delete_object(
new_locked_storage_object.wallet_file_path,
new_locked_storage_object.cid,
new_locked_storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)

with allure.step(f"Restart storage nodes to enable resync_metabase"):
self.docker_compose_restart()

with allure.step(f"Try to delete object {new_locked_storage_object.oid} after resync_metabase enabled"):
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
delete_object(
new_locked_storage_object.wallet_file_path,
new_locked_storage_object.cid,
new_locked_storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
)

0 comments on commit 5c18773

Please sign in to comment.