Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PBM-1297 re-enable the test #220

Merged
merged 1 commit into from
Sep 18, 2024

PBM-1297 re-enable the test

d446fbc
Select commit
Loading
Failed to load commit list.
Sign in for the full log view
Merged

PBM-1297 re-enable the test #220

PBM-1297 re-enable the test
d446fbc
Select commit
Loading
Failed to load commit list.
GitHub Actions / JUnit Test Report failed Sep 18, 2024 in 0s

31 tests run, 19 passed, 10 skipped, 2 failed.

Annotations

Check failure on line 86 in pbm-functional/pytest/test_PBM-1297.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_PBM-1297.test_logical_pitr_PBM_T253

AssertionError: Starting restore 2024-09-18T07:20:59.32762455Z to point-in-time 2024-09-18T07:19:25 from '2024-09-18T07:18:02Z'...Started logical restore.
Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1726643886.1726643963: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1726643933,"I":18},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"config.databases","Object":[{"Key":"_id","Value":"test"},{"Key":"primary","Value":"rs2"},{"Key":"partitioned","Value":true},{"Key":"version","Value":[{"Key":"uuid","Value":{"Subtype":4,"Data":"D2vDP84lRjKwepSxAuPPOg=="}},{"Key":"timestamp","Value":{"T":1726643933,"I":16}},{"Key":"lastMod","Value":1}]}],"Query":null,"UI":{"Subtype":4,"Data":"IjC/7yERRkq/zPnYK7PMQQ=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null,"MultiOpType":null} | merr <nil>: applyOps: (NamespaceNotFound) cannot apply insert or update operation on a non-existent namespace config.databases: { ts: Timestamp(1726643933, 18), t: 1, v: 2, op: "i", ns: "config.databases", o: { _id: "test", primary: "rs2", partitioned: true, version: { uuid: UUID("0f6bc33f-ce25-4632-b07a-94b102e3cf3a"), timestamp: Timestamp(1726643933, 16), lastMod: 1 } }, ui: UUID("2230bfef-2111-464a-bfcc-f9d82bb3cc41") }
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7fa2ae686f10>
newcluster = <cluster.Cluster object at 0x7fa2aeee7710>

    @pytest.mark.timeout(600, func_only=True)
    def test_logical_pitr_PBM_T253(start_cluster,cluster,newcluster):
        cluster.check_pbm_status()
        cluster.make_backup("logical")
        cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
        #Create the first database during oplog slicing
        client=pymongo.MongoClient(cluster.connection)
        client.admin.command("enableSharding", "test")
        client.admin.command("shardCollection", "test.test", key={"_id": "hashed"})
        for i in range(100):
            pymongo.MongoClient(cluster.connection)["test"]["test"].insert_one({"doc":i})
        time.sleep(30)
        pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
        backup="--time=" + pitr
        Cluster.log("Time for PITR is: " + pitr)
        time.sleep(30)
        cluster.disable_pitr()
        time.sleep(10)
        cluster.destroy()
    
        newcluster.create()
        newcluster.setup_pbm()
        time.sleep(10)
        newcluster.check_pbm_status()
>       newcluster.make_restore(backup,check_pbm_status=True)

test_PBM-1297.py:86: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <cluster.Cluster object at 0x7fa2aeee7710>
name = '--time=2024-09-18T07:19:25', kwargs = {'check_pbm_status': True}
client = MongoClient(host=['newmongos:27017'], document_class=dict, tz_aware=False, connect=True)
result = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7fa2ae63c190>, exit_status=1, command=b'time...mestamp: Timestamp(1726643933, 16), lastMod: 1 } }, ui: UUID("2230bfef-2111-464a-bfcc-f9d82bb3cc41") }\n', _stderr=b'')
n = <testinfra.host.Host docker://newrscfg01>, timeout = 240, error = ''
host = 'newrscfg01', container = <Container: 77ead4e78d85>

    def make_restore(self, name, **kwargs):
        if self.layout == "sharded":
            client = pymongo.MongoClient(self.connection)
            result = client.admin.command("balancerStop")
            client.close()
            Cluster.log("Stopping balancer: " + str(result))
            self.stop_mongos()
        self.stop_arbiters()
        n = testinfra.get_host("docker://" + self.pbm_cli)
        timeout = time.time() + 60
    
        while True:
            if not self.get_status()['running']:
                break
            if time.time() > timeout:
                assert False, "Cannot start restore, another operation running"
            time.sleep(1)
        Cluster.log("Restore started")
        timeout=kwargs.get('timeout', 240)
        result = n.run('timeout ' + str(timeout) + ' pbm restore ' + name + ' --wait')
    
        if result.rc == 0:
            Cluster.log(result.stdout)
        else:
            # try to catch possible failures if timeout exceeded
            error=''
            for host in self.mongod_hosts:
                try:
                    container = docker.from_env().containers.get(host)
                    get_logs = container.exec_run(
                        'cat /var/lib/mongo/pbm.restore.log', stderr=False)
                    if get_logs.exit_code == 0:
                        Cluster.log(
                            "!!!!Possible failure on {}, file pbm.restore.log was found:".format(host))
                        logs = get_logs.output.decode('utf-8')
                        Cluster.log(logs)
                        if '"s":"F"' in logs:
                            error = logs
                except docker.errors.APIError:
                    pass
            if error:
                assert False, result.stdout + result.stderr + "\n" + error
            else:
>               assert False, result.stdout + result.stderr
E               AssertionError: Starting restore 2024-09-18T07:20:59.32762455Z to point-in-time 2024-09-18T07:19:25 from '2024-09-18T07:18:02Z'...Started logical restore.
E               Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1726643886.1726643963: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1726643933,"I":18},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"config.databases","Object":[{"Key":"_id","Value":"test"},{"Key":"primary","Value":"rs2"},{"Key":"partitioned","Value":true},{"Key":"version","Value":[{"Key":"uuid","Value":{"Subtype":4,"Data":"D2vDP84lRjKwepSxAuPPOg=="}},{"Key":"timestamp","Value":{"T":1726643933,"I":16}},{"Key":"lastMod","Value":1}]}],"Query":null,"UI":{"Subtype":4,"Data":"IjC/7yERRkq/zPnYK7PMQQ=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null,"MultiOpType":null} | merr <nil>: applyOps: (NamespaceNotFound) cannot apply insert or update operation on a non-existent namespace config.databases: { ts: Timestamp(1726643933, 18), t: 1, v: 2, op: "i", ns: "config.databases", o: { _id: "test", primary: "rs2", partitioned: true, version: { uuid: UUID("0f6bc33f-ce25-4632-b07a-94b102e3cf3a"), timestamp: Timestamp(1726643933, 16), lastMod: 1 } }, ui: UUID("2230bfef-2111-464a-bfcc-f9d82bb3cc41") }

cluster.py:464: AssertionError

Check failure on line 110 in pbm-functional/pytest/test_replicaset.py

See this annotation in the file changed.

@github-actions github-actions / JUnit Test Report

test_replicaset.test_logical_timeseries_PBM_T224

AssertionError: Starting restore 2024-09-18T07:49:56.878759423Z to point-in-time 2024-09-18T07:49:26 from '2024-09-18T07:48:14Z'...Started logical restore.
Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1726645759.1726645789: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1726645760,"I":2},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"test.system.views","Object":[{"Key":"_id","Value":"test.test2"},{"Key":"viewOn","Value":"system.buckets.test2"},{"Key":"pipeline","Value":[[{"Key":"$_internalUnpackBucket","Value":[{"Key":"timeField","Value":"timestamp"},{"Key":"metaField","Value":"data"},{"Key":"bucketMaxSpanSeconds","Value":3600},{"Key":"exclude","Value":[]}]}]]}],"Query":null,"UI":{"Subtype":4,"Data":"9R22xpviSk22JfuNmJw4kw=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null,"MultiOpType":null} | merr <nil>: applyOps: (NamespaceNotFound) Failed to apply operation due to missing collection (f51db6c6-9be2-4a4d-b625-fb8d989c3893): { ts: Timestamp(1726645760, 2), t: 1, v: 2, op: "i", ns: "test.system.views", o: { _id: "test.test2", viewOn: "system.buckets.test2", pipeline: [ { $_internalUnpackBucket: { timeField: "timestamp", metaField: "data", bucketMaxSpanSeconds: 3600, exclude: [] } } ] }, ui: UUID("f51db6c6-9be2-4a4d-b625-fb8d989c3893"), h: 0, wall: new Date(0) }
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7fa2ada34fd0>

    @pytest.mark.timeout(300,func_only=True)
    def test_logical_timeseries_PBM_T224(start_cluster,cluster):
        cluster.check_pbm_status()
        client=pymongo.MongoClient(cluster.connection)
        mongod_version=client.server_info()["version"]
        if version.parse(mongod_version) < version.parse("5.0.0"):
            pytest.skip("Unsupported version for timeseries")
        pymongo.MongoClient(cluster.connection)["test"].create_collection('test1',timeseries={'timeField':'timestamp','metaField': 'data'})
        for i in range(10):
            pymongo.MongoClient(cluster.connection)["test"]["test1"].insert_one({"timestamp": datetime.now(), "data": i})
            time.sleep(0.1)
        cluster.make_backup("logical")
        cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
        time.sleep(30)
        #create new timeseries collection
        pymongo.MongoClient(cluster.connection)["test"].create_collection('test2',timeseries={'timeField':'timestamp','metaField': 'data'})
        for i in range(10):
            pymongo.MongoClient(cluster.connection)["test"]["test1"].insert_one({"timestamp": datetime.now(), "data": i})
            pymongo.MongoClient(cluster.connection)["test"]["test2"].insert_one({"timestamp": datetime.now(), "data": i})
            time.sleep(0.1)
        time.sleep(5)
        pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
        backup="--time=" + pitr
        Cluster.log("Time for PITR is: " + pitr)
        time.sleep(30)
        pymongo.MongoClient(cluster.connection).drop_database('test')
>       cluster.make_restore(backup,check_pbm_status=True)

test_replicaset.py:110: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <cluster.Cluster object at 0x7fa2ada34fd0>
name = '--time=2024-09-18T07:49:26', kwargs = {'check_pbm_status': True}
result = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7fa2adfc7010>, exit_status=1, command=b'time...3600, exclude: [] } } ] }, ui: UUID("f51db6c6-9be2-4a4d-b625-fb8d989c3893"), h: 0, wall: new Date(0) }\n', _stderr=b'')
n = <testinfra.host.Host docker://rs101>, timeout = 240, error = ''
host = 'rs103', container = <Container: d640fa1cfaf7>

    def make_restore(self, name, **kwargs):
        if self.layout == "sharded":
            client = pymongo.MongoClient(self.connection)
            result = client.admin.command("balancerStop")
            client.close()
            Cluster.log("Stopping balancer: " + str(result))
            self.stop_mongos()
        self.stop_arbiters()
        n = testinfra.get_host("docker://" + self.pbm_cli)
        timeout = time.time() + 60
    
        while True:
            if not self.get_status()['running']:
                break
            if time.time() > timeout:
                assert False, "Cannot start restore, another operation running"
            time.sleep(1)
        Cluster.log("Restore started")
        timeout=kwargs.get('timeout', 240)
        result = n.run('timeout ' + str(timeout) + ' pbm restore ' + name + ' --wait')
    
        if result.rc == 0:
            Cluster.log(result.stdout)
        else:
            # try to catch possible failures if timeout exceeded
            error=''
            for host in self.mongod_hosts:
                try:
                    container = docker.from_env().containers.get(host)
                    get_logs = container.exec_run(
                        'cat /var/lib/mongo/pbm.restore.log', stderr=False)
                    if get_logs.exit_code == 0:
                        Cluster.log(
                            "!!!!Possible failure on {}, file pbm.restore.log was found:".format(host))
                        logs = get_logs.output.decode('utf-8')
                        Cluster.log(logs)
                        if '"s":"F"' in logs:
                            error = logs
                except docker.errors.APIError:
                    pass
            if error:
                assert False, result.stdout + result.stderr + "\n" + error
            else:
>               assert False, result.stdout + result.stderr
E               AssertionError: Starting restore 2024-09-18T07:49:56.878759423Z to point-in-time 2024-09-18T07:49:26 from '2024-09-18T07:48:14Z'...Started logical restore.
E               Waiting to finish...Error: operation failed with: reply oplog: replay chunk 1726645759.1726645789: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1726645760,"I":2},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"test.system.views","Object":[{"Key":"_id","Value":"test.test2"},{"Key":"viewOn","Value":"system.buckets.test2"},{"Key":"pipeline","Value":[[{"Key":"$_internalUnpackBucket","Value":[{"Key":"timeField","Value":"timestamp"},{"Key":"metaField","Value":"data"},{"Key":"bucketMaxSpanSeconds","Value":3600},{"Key":"exclude","Value":[]}]}]]}],"Query":null,"UI":{"Subtype":4,"Data":"9R22xpviSk22JfuNmJw4kw=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null,"MultiOpType":null} | merr <nil>: applyOps: (NamespaceNotFound) Failed to apply operation due to missing collection (f51db6c6-9be2-4a4d-b625-fb8d989c3893): { ts: Timestamp(1726645760, 2), t: 1, v: 2, op: "i", ns: "test.system.views", o: { _id: "test.test2", viewOn: "system.buckets.test2", pipeline: [ { $_internalUnpackBucket: { timeField: "timestamp", metaField: "data", bucketMaxSpanSeconds: 3600, exclude: [] } } ] }, ui: UUID("f51db6c6-9be2-4a4d-b625-fb8d989c3893"), h: 0, wall: new Date(0) }

cluster.py:464: AssertionError