diff --git a/.buildkite/lm-eval-harness/test_lm_eval_correctness.py b/.buildkite/lm-eval-harness/test_lm_eval_correctness.py index 671231697b196..2b33274db2a8d 100644 --- a/.buildkite/lm-eval-harness/test_lm_eval_correctness.py +++ b/.buildkite/lm-eval-harness/test_lm_eval_correctness.py @@ -25,7 +25,7 @@ def launch_lm_eval(eval_config): try: trust_remote_code = eval_config['trust_remote_code'] - except: + except Exception trust_remote_code = False model_args = f"pretrained={eval_config['model_name']}," \