From 94e650f751086eacd1a272b68ae240cc57801243 Mon Sep 17 00:00:00 2001 From: joseph-sentry Date: Tue, 10 Dec 2024 11:23:52 -0500 Subject: [PATCH] fix: test results processor return type there's a sentry bug currently happening because the finisher is not handling one of the possible return values of the processor so i'm generally changing this to make it clearer: - a processor either succeeds or fails, and this is represented by the return value being a boolean - a success means that the processor successfully parsed at least one file in any of the raw uploads it was processing - else, fail - since the finisher is called in a chord with the processor it will receive a list of such booleans, if there is any success in this list of results then there is some valid test result data - otherwise, we can just fail the finisher and make an error comment and try to notify coverage since there may be some valid coverage data --- tasks/test_results_finisher.py | 14 ++-- tasks/test_results_processor.py | 20 +++--- .../tests/unit/test_test_results_finisher.py | 24 +++---- .../unit/test_test_results_processor_task.py | 65 ++++--------------- 4 files changed, 38 insertions(+), 85 deletions(-) diff --git a/tasks/test_results_finisher.py b/tasks/test_results_finisher.py index 1c6c7050c..76fd84f41 100644 --- a/tasks/test_results_finisher.py +++ b/tasks/test_results_finisher.py @@ -53,7 +53,7 @@ class TestResultsFinisherTask(BaseCodecovTask, name=test_results_finisher_task_n def run_impl( self, db_session: Session, - chord_result: dict[str, Any], + chord_result: list[bool], *, repoid: int, commitid: str, @@ -109,7 +109,7 @@ def process_impl_within_lock( repoid: int, commitid: str, commit_yaml: UserYaml, - previous_result: dict[str, Any], + previous_result: list[bool], **kwargs, ): log.info("Running test results finishers", extra=self.extra_dict) @@ -149,7 +149,7 @@ def process_impl_within_lock( db_session.add(totals) db_session.flush() - if self.check_if_no_success(previous_result): + if not self.check_if_any_success(previous_result): # every processor errored, nothing to notify on queue_notify = False @@ -337,12 +337,8 @@ def get_flaky_tests( } return flaky_test_ids - def check_if_no_success(self, previous_result): - return all( - testrun_list["successful"] is False - for result in previous_result - for testrun_list in result - ) + def check_if_any_success(self, previous_result: list[bool]) -> bool: + return any(result for result in previous_result) RegisteredTestResultsFinisherTask = celery_app.register_task(TestResultsFinisherTask()) diff --git a/tasks/test_results_processor.py b/tasks/test_results_processor.py index f5c8a2de7..55203886c 100644 --- a/tasks/test_results_processor.py +++ b/tasks/test_results_processor.py @@ -147,14 +147,12 @@ def run_impl( commit_yaml, arguments_list: list[UploadArguments], **kwargs, - ): + ) -> bool: log.info("Received upload test result processing task") commit_yaml = UserYaml(commit_yaml) repoid = int(repoid) - results = [] - repo_flakes = ( db_session.query(Flake.testid) .filter(Flake.repoid == repoid, Flake.end_date.is_(None)) @@ -170,6 +168,8 @@ def run_impl( should_delete_archive = self.should_delete_archive(commit_yaml) archive_service = ArchiveService(repository) + successful = False + # process each report session's test information for arguments in arguments_list: upload = ( @@ -184,10 +184,10 @@ def run_impl( flaky_test_set, should_delete_archive, ) + if result: + successful = True - results.append(result) - - return results + return successful @sentry_sdk.trace def _bulk_write_tests_to_db( @@ -407,12 +407,12 @@ def process_individual_upload( upload: Upload, flaky_test_set: set[str], should_delete_archive: bool, - ): + ) -> bool: upload_id = upload.id log.info("Processing individual upload", extra=dict(upload_id=upload_id)) if upload.state == "processed" or upload.state == "has_failed": - return [] + return False payload_bytes = archive_service.read_file(upload.storage_path) try: @@ -424,7 +424,7 @@ def process_individual_upload( upload.state = "not parsed" db_session.flush() - return [] + return False parsing_results: list[ParsingInfo] = [] network: list[str] | None = data.get("network_files") @@ -478,7 +478,7 @@ def process_individual_upload( readable_report = self.rewrite_readable(network, report_contents) archive_service.write_file(upload.storage_path, readable_report) - return {"successful": successful} + return successful def rewrite_readable( self, network: list[str] | None, report_contents: list[ReadableFile] diff --git a/tasks/tests/unit/test_test_results_finisher.py b/tasks/tests/unit/test_test_results_finisher.py index d22c870ac..e25461f07 100644 --- a/tasks/tests/unit/test_test_results_finisher.py +++ b/tasks/tests/unit/test_test_results_finisher.py @@ -360,7 +360,7 @@ def test_upload_finisher_task_call( result = TestResultsFinisherTask().run_impl( dbsession, [ - [{"successful": True}], + True, ], repoid=repoid, commitid=commit.commitid, @@ -486,7 +486,7 @@ def test_upload_finisher_task_call_no_failures( result = TestResultsFinisherTask().run_impl( dbsession, [ - [{"successful": True}], + True, ], repoid=repoid, commitid=commit.commitid, @@ -541,9 +541,7 @@ def test_upload_finisher_task_call_no_success( result = TestResultsFinisherTask().run_impl( dbsession, - [ - [{"successful": False}], - ], + [False], repoid=repoid, commitid=commit.commitid, commit_yaml={"codecov": {"max_report_age": False}}, @@ -627,7 +625,7 @@ def test_upload_finisher_task_call_upgrade_comment( result = TestResultsFinisherTask().run_impl( dbsession, [ - [{"successful": True}], + True, ], repoid=repoid, commitid=commit.commitid, @@ -681,7 +679,7 @@ def test_upload_finisher_task_call_existing_comment( result = TestResultsFinisherTask().run_impl( dbsession, [ - [{"successful": True}], + True, ], repoid=repoid, commitid=commit.commitid, @@ -807,7 +805,7 @@ def test_upload_finisher_task_call_comment_fails( result = TestResultsFinisherTask().run_impl( dbsession, [ - [{"successful": True}], + True, ], repoid=repoid, commitid=commit.commitid, @@ -883,7 +881,7 @@ def test_upload_finisher_task_call_with_flaky( result = TestResultsFinisherTask().run_impl( dbsession, [ - [{"successful": True}], + True, ], repoid=repoid, commitid=commit.commitid, @@ -972,7 +970,7 @@ def test_upload_finisher_task_call_main_branch( result = TestResultsFinisherTask().run_impl( dbsession, [ - [{"successful": True}], + True, ], repoid=repoid, commitid=commit.commitid, @@ -1032,9 +1030,7 @@ def test_upload_finisher_task_call_computed_name( result = TestResultsFinisherTask().run_impl( dbsession, - [ - [{"successful": True}], - ], + [True], repoid=repoid, commitid=commit.commitid, commit_yaml={"codecov": {"max_report_age": False}}, @@ -1160,7 +1156,7 @@ def test_upload_finisher_task_call_main_with_plan( result = TestResultsFinisherTask().run_impl( dbsession, [ - [{"successful": True}], + True, ], repoid=repoid, commitid=commit.commitid, diff --git a/tasks/tests/unit/test_test_results_processor_task.py b/tasks/tests/unit/test_test_results_processor_task.py index 832c3bff8..686335c09 100644 --- a/tasks/tests/unit/test_test_results_processor_task.py +++ b/tasks/tests/unit/test_test_results_processor_task.py @@ -70,11 +70,7 @@ def test_upload_processor_task_call( commit_yaml={"codecov": {"max_report_age": False}}, arguments_list=redis_queue, ) - expected_result = [ - { - "successful": True, - } - ] + tests = dbsession.query(Test).all() test_instances = dbsession.query(TestInstance).all() failures = ( @@ -93,7 +89,7 @@ def test_upload_processor_task_call( failures[0].test.name == "api.temp.calculator.test_calculator\x1ftest_divide" ) - assert expected_result == result + assert result is True assert commit.message == "hello world" assert ( mock_storage.read_file("archive", url) @@ -152,7 +148,7 @@ def test_test_result_processor_task_error_parsing_file( dbsession.add(current_report_row) dbsession.flush() - result = TestResultsProcessorTask().run_impl( + _ = TestResultsProcessorTask().run_impl( dbsession, repoid=commit.repoid, commitid=commit.commitid, @@ -208,11 +204,6 @@ def test_test_result_processor_task_delete_archive( commit_yaml={"codecov": {"max_report_age": False}}, arguments_list=redis_queue, ) - expected_result = [ - { - "successful": True, - } - ] tests = dbsession.query(Test).all() test_instances = dbsession.query(TestInstance).all() @@ -220,7 +211,7 @@ def test_test_result_processor_task_delete_archive( dbsession.query(TestInstance).filter_by(outcome=str(Outcome.Failure)).all() ) - assert result == expected_result + assert result is True assert len(tests) == 4 assert len(test_instances) == 4 @@ -281,9 +272,8 @@ def test_test_result_processor_task_bad_file( commit_yaml={"codecov": {"max_report_age": False}}, arguments_list=redis_queue, ) - expected_result = [{"successful": False}] - assert expected_result == result + assert result == False assert ( "No test result files were successfully parsed for this upload" in caplog.text @@ -350,11 +340,6 @@ def test_upload_processor_task_call_existing_test( commit_yaml={"codecov": {"max_report_age": False}}, arguments_list=redis_queue, ) - expected_result = [ - { - "successful": True, - } - ] tests = dbsession.query(Test).all() test_instances = dbsession.query(TestInstance).all() failures = ( @@ -373,7 +358,7 @@ def test_upload_processor_task_call_existing_test( failures[0].test.name == "api.temp.calculator.test_calculator\x1ftest_divide" ) - assert expected_result == result + assert result is True assert commit.message == "hello world" @pytest.mark.integration @@ -444,11 +429,7 @@ def test_upload_processor_task_call_existing_test_diff_flags_hash( commit_yaml={"codecov": {"max_report_age": False}}, arguments_list=redis_queue, ) - expected_result = [ - { - "successful": True, - } - ] + tests = dbsession.query(Test).all() test_instances = dbsession.query(TestInstance).all() failures = ( @@ -477,7 +458,7 @@ def test_upload_processor_task_call_existing_test_diff_flags_hash( failures[0].test.name == "api.temp.calculator.test_calculator\x1ftest_divide" ) - assert expected_result == result + assert result is True assert commit.message == "hello world" @pytest.mark.integration @@ -532,11 +513,6 @@ def test_upload_processor_task_call_daily_test_totals( commit_yaml={"codecov": {"max_report_age": False}}, arguments_list=redis_queue, ) - expected_result = [ - { - "successful": True, - } - ] rollups = dbsession.query(DailyTestRollup).all() @@ -591,13 +567,8 @@ def test_upload_processor_task_call_daily_test_totals( commit_yaml={"codecov": {"max_report_age": False}}, arguments_list=redis_queue, ) - expected_result = [ - { - "successful": True, - } - ] - assert result == expected_result + assert result is True rollups_first_branch: list[DailyTestRollup] = ( dbsession.query(DailyTestRollup).filter_by(branch="first_branch").all() @@ -701,11 +672,7 @@ def test_upload_processor_task_call_network( commit_yaml={"codecov": {"max_report_age": False}}, arguments_list=redis_queue, ) - expected_result = [ - { - "successful": True, - } - ] + tests = dbsession.query(Test).all() test_instances = dbsession.query(TestInstance).all() failures = ( @@ -730,7 +697,7 @@ def test_upload_processor_task_call_network( failures[0].test.name == "api.temp.calculator.test_calculator\x1ftest_divide" ) - assert expected_result == result + assert result is True assert commit.message == "hello world" assert mock_storage.read_file("archive", url).startswith( @@ -784,9 +751,8 @@ def test_upload_processor_task_call_already_processed( commit_yaml={"codecov": {"max_report_age": False}}, arguments_list=redis_queue, ) - expected_result = [[]] - assert expected_result == result + assert result is False @pytest.mark.integration def test_upload_processor_task_call_already_processed_with_junit( @@ -847,15 +813,10 @@ def test_upload_processor_task_call_already_processed_with_junit( commit_yaml={"codecov": {"max_report_age": False}}, arguments_list=redis_queue, ) - expected_result = [ - { - "successful": True, - } - ] tests = dbsession.query(Test).all() test_instances = dbsession.query(TestInstance).all() - assert expected_result == result + assert result is True assert len(tests) == 4 assert len(test_instances) == 4