diff --git a/.ci/generate_test_report_lib.py b/.ci/generate_test_report_lib.py index 8987ed6d1bd8..5b16599cc743 100644 --- a/.ci/generate_test_report_lib.py +++ b/.ci/generate_test_report_lib.py @@ -158,6 +158,17 @@ def get_failures(junit_objects) -> dict[str, list[tuple[str, str]]]: return failures +def are_all_failures_explained( + failures: list[tuple[str, str]], failure_explanations: dict[str, FailureExplanation] +) -> bool: + for failed_action, _ in failures: + if failed_action not in failure_explanations: + return False + else: + assert failure_explanations[failed_action]["explained"] + return True + + # Set size_limit to limit the byte size of the report. The default is 1MB as this # is the most that can be put into an annotation. If the generated report exceeds # this limit and failures are listed, it will be generated again without failures @@ -172,7 +183,7 @@ def generate_report( size_limit=1024 * 1024, list_failures=True, failure_explanations_list: list[FailureExplanation] = [], -): +) -> tuple[str, bool]: failures = get_failures(junit_objects) tests_run = 0 tests_skipped = 0 @@ -183,6 +194,12 @@ def generate_report( if not failure_explanation["explained"]: continue failure_explanations[failure_explanation["name"]] = failure_explanation + all_failures_explained = True + if failures: + for _, failures_list in failures.items(): + all_failures_explained &= are_all_failures_explained( + failures_list, failure_explanations + ) for results in junit_objects: for testsuite in results: @@ -202,7 +219,11 @@ def generate_report( ) else: ninja_failures = find_failure_in_ninja_logs(ninja_logs) + all_failures_explained &= are_all_failures_explained( + ninja_failures, failure_explanations + ) if not ninja_failures: + all_failures_explained = False report.extend( [ "The build failed before running any tests. Detailed " @@ -229,7 +250,7 @@ def generate_report( UNRELATED_FAILURES_STR, ] ) - return "\n".join(report) + return ("\n".join(report), all_failures_explained) tests_passed = tests_run - tests_skipped - tests_failed @@ -264,6 +285,7 @@ def generate_report( # attention. ninja_failures = find_failure_in_ninja_logs(ninja_logs) if not ninja_failures: + all_failures_explained = False report.extend( [ "", @@ -275,6 +297,9 @@ def generate_report( ] ) else: + all_failures_explained &= are_all_failures_explained( + ninja_failures, failure_explanations + ) report.extend( [ "", @@ -303,7 +328,7 @@ def generate_report( list_failures=False, ) - return report + return (report, all_failures_explained) def load_info_from_files(build_log_files): diff --git a/.ci/generate_test_report_lib_test.py b/.ci/generate_test_report_lib_test.py index 0f88bdab78da..4cef4b913e6b 100644 --- a/.ci/generate_test_report_lib_test.py +++ b/.ci/generate_test_report_lib_test.py @@ -191,19 +191,23 @@ class TestReports(unittest.TestCase): def test_title_only(self): self.assertEqual( generate_test_report_lib.generate_report("Foo", 0, [], []), - dedent( - """\ + ( + dedent( + """\ # Foo :white_check_mark: The build succeeded and no tests ran. This is expected in some build configurations.""" + ), + True, ), ) def test_title_only_failure(self): self.assertEqual( generate_test_report_lib.generate_report("Foo", 1, [], []), - dedent( - """\ + ( + dedent( + """\ # Foo The build failed before running any tests. Detailed information about the build failure could not be automatically obtained. @@ -211,6 +215,8 @@ class TestReports(unittest.TestCase): Download the build's log file to see the details. If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label.""" + ), + False, ), ) @@ -233,8 +239,9 @@ class TestReports(unittest.TestCase): ] ], ), - dedent( - """\ + ( + dedent( + """\ # Foo The build failed before running any tests. Click on a failure below to see the details. @@ -250,6 +257,8 @@ class TestReports(unittest.TestCase): If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label.""" + ), + False, ), ) @@ -272,8 +281,9 @@ class TestReports(unittest.TestCase): ], [], ), - dedent( - """\ + ( + dedent( + """\ # Foo The build failed before running any tests. Detailed information about the build failure could not be automatically obtained. @@ -281,6 +291,8 @@ class TestReports(unittest.TestCase): Download the build's log file to see the details. If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label.""" + ), + False, ), ) @@ -312,7 +324,8 @@ class TestReports(unittest.TestCase): * 1 test passed :white_check_mark: The build succeeded and all tests passed.""" - ) + ), + True, ), ) @@ -348,7 +361,8 @@ class TestReports(unittest.TestCase): Download the build's log file to see the details. If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label.""" - ) + ), + False, ), ) @@ -403,7 +417,8 @@ class TestReports(unittest.TestCase): If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label.""" - ) + ), + False, ), ) @@ -466,7 +481,8 @@ class TestReports(unittest.TestCase): If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label.""" - ) + ), + False, ), ) @@ -528,7 +544,8 @@ class TestReports(unittest.TestCase): If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label.""" - ) + ), + False, ), ) @@ -595,7 +612,7 @@ class TestReports(unittest.TestCase): ], [], ), - self.MULTI_SUITE_OUTPUT, + (self.MULTI_SUITE_OUTPUT, False), ) def test_report_multiple_files_multiple_testsuites(self): @@ -637,7 +654,7 @@ class TestReports(unittest.TestCase): ], [], ), - self.MULTI_SUITE_OUTPUT, + (self.MULTI_SUITE_OUTPUT, False), ) def test_report_dont_list_failures(self): @@ -673,7 +690,8 @@ class TestReports(unittest.TestCase): Failed tests and their output was too large to report. Download the build's log file to see the details. If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label.""" - ) + ), + False, ), ) @@ -710,7 +728,8 @@ class TestReports(unittest.TestCase): Failed tests and their output was too large to report. Download the build's log file to see the details. If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label.""" - ) + ), + False, ), ) @@ -750,7 +769,8 @@ class TestReports(unittest.TestCase): Failed tests and their output was too large to report. Download the build's log file to see the details. If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label.""" - ) + ), + False, ), ) @@ -780,8 +800,9 @@ class TestReports(unittest.TestCase): } ], ), - dedent( - """\ + ( + dedent( + """\ # Foo The build failed before running any tests. Click on a failure below to see the details. @@ -798,6 +819,8 @@ class TestReports(unittest.TestCase): If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label.""" + ), + True, ), ) @@ -851,7 +874,8 @@ class TestReports(unittest.TestCase): If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label.""" - ) + ), + True, ), ) @@ -904,7 +928,8 @@ class TestReports(unittest.TestCase): If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label.""" - ) + ), + False, ), ) @@ -942,8 +967,9 @@ class TestReports(unittest.TestCase): generate_test_report_lib.generate_report_from_files( "Foo", 1, [junit_xml_file, ninja_log_file] ), - dedent( - """\ + ( + dedent( + """\ # Foo * 1 test passed @@ -961,5 +987,7 @@ class TestReports(unittest.TestCase): If these failures are unrelated to your changes (for example tests are broken or flaky at HEAD), please open an issue at https://github.com/llvm/llvm-project/issues and add the `infrastructure` label.""" + ), + False, ), ) diff --git a/.ci/premerge_advisor_explain.py b/.ci/premerge_advisor_explain.py index a88bfd223d9a..4a3900021ede 100644 --- a/.ci/premerge_advisor_explain.py +++ b/.ci/premerge_advisor_explain.py @@ -53,7 +53,7 @@ def main( github_token: str, pr_number: int, return_code: int, -): +) -> bool: """The main entrypoint for the script. This function parses failures from files, requests information from the @@ -112,19 +112,14 @@ def main( advisor_explanations = advisor_response.json() else: print(advisor_response.reason) - comments.append( - get_comment( - github_token, - pr_number, - generate_test_report_lib.generate_report( - generate_test_report_lib.compute_platform_title(), - return_code, - junit_objects, - ninja_logs, - failure_explanations_list=advisor_explanations, - ), - ) + report, failures_explained = generate_test_report_lib.generate_report( + generate_test_report_lib.compute_platform_title(), + return_code, + junit_objects, + ninja_logs, + failure_explanations_list=advisor_explanations, ) + comments.append(get_comment(github_token, pr_number, report)) if return_code == 0 and "id" not in comments[0]: # If the job succeeds and there is not an existing comment, we # should not write one to reduce noise. @@ -133,6 +128,7 @@ def main( with open(comments_file_name, "w") as comment_file_handle: json.dump(comments, comment_file_handle) print(f"Wrote comments to {comments_file_name}") + return failures_explained if __name__ == "__main__": @@ -151,7 +147,7 @@ if __name__ == "__main__": if platform.machine() == "arm64" or platform.machine() == "aarch64": sys.exit(0) - main( + failures_explained = main( args.commit_sha, args.build_log_files, args.github_token, @@ -159,4 +155,7 @@ if __name__ == "__main__": args.return_code, ) + if failures_explained: + sys.exit(0) + sys.exit(args.return_code)