From deb2b93e797cb345d18e5fd54dff20837fd5ba02 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 15 Apr 2023 11:19:07 -0400 Subject: [PATCH] Move code to empty gpu cache to model_management.py --- comfy/model_management.py | 9 +++++++++ execution.py | 9 ++------- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 8303cb43..76455e4a 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -307,6 +307,15 @@ def should_use_fp16(): return True +def soft_empty_cache(): + global xpu_available + if xpu_available: + torch.xpu.empty_cache() + elif torch.cuda.is_available(): + if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda + torch.cuda.empty_cache() + torch.cuda.ipc_collect() + #TODO: might be cleaner to put this somewhere else import threading diff --git a/execution.py b/execution.py index 9d9ca5f6..73be6db0 100644 --- a/execution.py +++ b/execution.py @@ -10,7 +10,7 @@ import gc import torch import nodes -from model_management import xpu_available +import comfy.model_management def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}): valid_inputs = class_def.INPUT_TYPES() @@ -204,12 +204,7 @@ class PromptExecutor: self.server.send_sync("executing", { "node": None }, self.server.client_id) gc.collect() - if torch.cuda.is_available(): - if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda - torch.cuda.empty_cache() - torch.cuda.ipc_collect() - elif xpu_available: - torch.xpu.empty_cache() + comfy.model_management.soft_empty_cache() def validate_inputs(prompt, item):