PBM-1114 #173
GitHub Actions / JUnit Test Report
failed
Sep 19, 2024 in 0s
32 tests run, 21 passed, 9 skipped, 2 failed.
Annotations
Check failure on line 56 in pbm-functional/pytest/test_PBM-1114.py
github-actions / JUnit Test Report
test_PBM-1114.test_logical_PBM_T266
AssertionError: Starting backup '2024-09-19T06:45:25Z'....
Waiting for '2024-09-19T06:45:25Z' backup.......................... done
Backup '2024-09-19T06:45:25Z' to remote store 'http://nginx-minio:21114/bcp/pbme2etest'
assert 0 != 0
+ where 0 = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f7a46c15150>, exit_status=0, command=b'pbm ........... done\nBackup '2024-09-19T06:45:25Z' to remote store 'http://nginx-minio:21114/bcp/pbme2etest'\n", _stderr=b'').rc
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f7a47ad5110>
@pytest.mark.timeout(300, func_only=True)
def test_logical_PBM_T266(start_cluster, cluster):
cluster.check_pbm_status()
pymongo.MongoClient(cluster.connection)["test"]["test"].insert_many(documents)
result = cluster.exec_pbm_cli("backup --wait")
> assert result.rc != 0, result.stdout + result.stderr
E AssertionError: Starting backup '2024-09-19T06:45:25Z'....
E Waiting for '2024-09-19T06:45:25Z' backup.......................... done
E Backup '2024-09-19T06:45:25Z' to remote store 'http://nginx-minio:21114/bcp/pbme2etest'
E
E assert 0 != 0
E + where 0 = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f7a46c15150>, exit_status=0, command=b'pbm ........... done\nBackup '2024-09-19T06:45:25Z' to remote store 'http://nginx-minio:21114/bcp/pbme2etest'\n", _stderr=b'').rc
test_PBM-1114.py:56: AssertionError
Check failure on line 86 in pbm-functional/pytest/test_PBM-1297.py
github-actions / JUnit Test Report
test_PBM-1297.test_logical_pitr_PBM_T253
AssertionError: Starting restore 2024-09-19T07:05:40.242397046Z to point-in-time 2024-09-19T07:04:04 from '2024-09-19T07:02:41Z'...Started logical restore.
Waiting to finish....Error: operation failed with: reply oplog: replay chunk 1726729365.1726729442: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1726729413,"I":2},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"config.databases","Object":[{"Key":"_id","Value":"test"},{"Key":"primary","Value":"rs2"},{"Key":"partitioned","Value":false},{"Key":"version","Value":[{"Key":"uuid","Value":{"Subtype":4,"Data":"RkJgQzaMQhCaGZoKNJqwEQ=="}},{"Key":"timestamp","Value":{"T":1726729412,"I":15}},{"Key":"lastMod","Value":1}]}],"Query":[{"Key":"_id","Value":"test"}],"UI":{"Subtype":4,"Data":"QGp36c8PQdSDE85QA4+n2A=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null,"MultiOpType":null} | merr <nil>: applyOps: (NamespaceNotFound) cannot apply insert or update operation on a non-existent namespace config.databases: { ts: Timestamp(1726729413, 2), t: 1, v: 2, op: "i", ns: "config.databases", o: { _id: "test", primary: "rs2", partitioned: false, version: { uuid: UUID("46426043-368c-4210-9a19-9a0a349ab011"), timestamp: Timestamp(1726729412, 15), lastMod: 1 } }, o2: { _id: "test" }, ui: UUID("406a77e9-cf0f-41d4-8313-ce50038fa7d8") }
Raw output
start_cluster = True, cluster = <cluster.Cluster object at 0x7f7a468bb310>
newcluster = <cluster.Cluster object at 0x7f7a46a50f90>
@pytest.mark.timeout(600, func_only=True)
def test_logical_pitr_PBM_T253(start_cluster,cluster,newcluster):
cluster.check_pbm_status()
cluster.make_backup("logical")
cluster.enable_pitr(pitr_extra_args="--set pitr.oplogSpanMin=0.5")
#Create the first database during oplog slicing
client=pymongo.MongoClient(cluster.connection)
client.admin.command("enableSharding", "test")
client.admin.command("shardCollection", "test.test", key={"_id": "hashed"})
for i in range(100):
pymongo.MongoClient(cluster.connection)["test"]["test"].insert_one({"doc":i})
time.sleep(30)
pitr = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
backup="--time=" + pitr
Cluster.log("Time for PITR is: " + pitr)
time.sleep(30)
cluster.disable_pitr()
time.sleep(10)
cluster.destroy()
newcluster.create()
newcluster.setup_pbm()
time.sleep(10)
newcluster.check_pbm_status()
> newcluster.make_restore(backup,check_pbm_status=True)
test_PBM-1297.py:86:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <cluster.Cluster object at 0x7f7a46a50f90>
name = '--time=2024-09-19T07:04:04', kwargs = {'check_pbm_status': True}
client = MongoClient(host=['newmongos:27017'], document_class=dict, tz_aware=False, connect=True)
result = CommandResult(backend=<testinfra.backend.docker.DockerBackend object at 0x7f7a468ae590>, exit_status=1, command=b'time...26729412, 15), lastMod: 1 } }, o2: { _id: "test" }, ui: UUID("406a77e9-cf0f-41d4-8313-ce50038fa7d8") }\n', _stderr=b'')
n = <testinfra.host.Host docker://newrscfg01>, timeout = 240, error = ''
host = 'newrscfg01', container = <Container: 76096c7ddb2b>
def make_restore(self, name, **kwargs):
if self.layout == "sharded":
client = pymongo.MongoClient(self.connection)
result = client.admin.command("balancerStop")
client.close()
Cluster.log("Stopping balancer: " + str(result))
self.stop_mongos()
self.stop_arbiters()
n = testinfra.get_host("docker://" + self.pbm_cli)
timeout = time.time() + 60
while True:
if not self.get_status()['running']:
break
if time.time() > timeout:
assert False, "Cannot start restore, another operation running"
time.sleep(1)
Cluster.log("Restore started")
timeout=kwargs.get('timeout', 240)
result = n.run('timeout ' + str(timeout) + ' pbm restore ' + name + ' --wait')
if result.rc == 0:
Cluster.log(result.stdout)
else:
# try to catch possible failures if timeout exceeded
error=''
for host in self.mongod_hosts:
try:
container = docker.from_env().containers.get(host)
get_logs = container.exec_run(
'cat /var/lib/mongo/pbm.restore.log', stderr=False)
if get_logs.exit_code == 0:
Cluster.log(
"!!!!Possible failure on {}, file pbm.restore.log was found:".format(host))
logs = get_logs.output.decode('utf-8')
Cluster.log(logs)
if '"s":"F"' in logs:
error = logs
except docker.errors.APIError:
pass
if error:
assert False, result.stdout + result.stderr + "\n" + error
else:
> assert False, result.stdout + result.stderr
E AssertionError: Starting restore 2024-09-19T07:05:40.242397046Z to point-in-time 2024-09-19T07:04:04 from '2024-09-19T07:02:41Z'...Started logical restore.
E Waiting to finish....Error: operation failed with: reply oplog: replay chunk 1726729365.1726729442: apply oplog for chunk: applying an entry: op: {"Timestamp":{"T":1726729413,"I":2},"Term":1,"Hash":null,"Version":2,"Operation":"i","Namespace":"config.databases","Object":[{"Key":"_id","Value":"test"},{"Key":"primary","Value":"rs2"},{"Key":"partitioned","Value":false},{"Key":"version","Value":[{"Key":"uuid","Value":{"Subtype":4,"Data":"RkJgQzaMQhCaGZoKNJqwEQ=="}},{"Key":"timestamp","Value":{"T":1726729412,"I":15}},{"Key":"lastMod","Value":1}]}],"Query":[{"Key":"_id","Value":"test"}],"UI":{"Subtype":4,"Data":"QGp36c8PQdSDE85QA4+n2A=="},"LSID":null,"TxnNumber":null,"PrevOpTime":null,"MultiOpType":null} | merr <nil>: applyOps: (NamespaceNotFound) cannot apply insert or update operation on a non-existent namespace config.databases: { ts: Timestamp(1726729413, 2), t: 1, v: 2, op: "i", ns: "config.databases", o: { _id: "test", primary: "rs2", partitioned: false, version: { uuid: UUID("46426043-368c-4210-9a19-9a0a349ab011"), timestamp: Timestamp(1726729412, 15), lastMod: 1 } }, o2: { _id: "test" }, ui: UUID("406a77e9-cf0f-41d4-8313-ce50038fa7d8") }
cluster.py:464: AssertionError
Loading