From 8594c8be4d8c0d7c9b5eb3d69d0c96cc80cffcc4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 22 Oct 2023 13:53:59 -0400 Subject: [PATCH] Empty the cache when torch cache is more than 25% free mem. --- comfy/model_management.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 64ed19727f4..53582fc736d 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -339,7 +339,11 @@ def free_memory(memory_required, device, keep_loaded=[]): if unloaded_model: soft_empty_cache() - + else: + if vram_state != VRAMState.HIGH_VRAM: + mem_free_total, mem_free_torch = get_free_memory(device, torch_free_too=True) + if mem_free_torch > mem_free_total * 0.25: + soft_empty_cache() def load_models_gpu(models, memory_required=0): global vram_state