From 72d6fc761d7d4a1def93a43eb0c46496a4515c25 Mon Sep 17 00:00:00 2001 From: Ximin Luo Date: Wed, 10 Jul 2024 12:01:59 +0100 Subject: [PATCH] Release memory on ROCm as well, it works fine here, fixes #3257 --- ldm_patched/modules/model_management.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/ldm_patched/modules/model_management.py b/ldm_patched/modules/model_management.py index 840d79a07..725410aa1 100644 --- a/ldm_patched/modules/model_management.py +++ b/ldm_patched/modules/model_management.py @@ -759,16 +759,15 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True): return True -def soft_empty_cache(force=False): +def soft_empty_cache(_force_deprecated=False): global cpu_state if cpu_state == CPUState.MPS: torch.mps.empty_cache() elif is_intel_xpu(): torch.xpu.empty_cache() elif torch.cuda.is_available(): - if force or is_nvidia(): #This seems to make things worse on ROCm so I only do it for cuda - torch.cuda.empty_cache() - torch.cuda.ipc_collect() + torch.cuda.empty_cache() + torch.cuda.ipc_collect() def unload_all_models(): free_memory(1e30, get_torch_device())