From 1bd33f1aa4cc701b640aa6bdfa71a5aa91820389 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 20 Sep 2024 12:51:52 +0000 Subject: [PATCH] Fix flaky FLUX LoRA unit test that fails occasionally due to numerical precision. --- tests/backend/lora/test_lora_patcher.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/backend/lora/test_lora_patcher.py b/tests/backend/lora/test_lora_patcher.py index 9daa277e92a..b99a06ac5f5 100644 --- a/tests/backend/lora/test_lora_patcher.py +++ b/tests/backend/lora/test_lora_patcher.py @@ -192,4 +192,6 @@ def test_apply_lora_sidecar_patches_matches_apply_lora_patches(num_layers: int): with LoRAPatcher.apply_lora_sidecar_patches(model=model, patches=lora_models, prefix="", dtype=dtype): output_lora_sidecar_patches = model(input) - assert torch.allclose(output_lora_patches, output_lora_sidecar_patches) + # Note: We set atol=1e-5 because the test failed occasionally with the default atol=1e-8. Slight numerical + # differences are tolerable and expected due to the difference between sidecar vs. patching. + assert torch.allclose(output_lora_patches, output_lora_sidecar_patches, atol=1e-5)