Move code to empty gpu cache to model_management.py
This commit is contained in:
parent
f4c689ea89
commit
deb2b93e79
|
@ -307,6 +307,15 @@ def should_use_fp16():
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def soft_empty_cache():
|
||||||
|
global xpu_available
|
||||||
|
if xpu_available:
|
||||||
|
torch.xpu.empty_cache()
|
||||||
|
elif torch.cuda.is_available():
|
||||||
|
if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
torch.cuda.ipc_collect()
|
||||||
|
|
||||||
#TODO: might be cleaner to put this somewhere else
|
#TODO: might be cleaner to put this somewhere else
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ import gc
|
||||||
import torch
|
import torch
|
||||||
import nodes
|
import nodes
|
||||||
|
|
||||||
from model_management import xpu_available
|
import comfy.model_management
|
||||||
|
|
||||||
def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}):
|
def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}):
|
||||||
valid_inputs = class_def.INPUT_TYPES()
|
valid_inputs = class_def.INPUT_TYPES()
|
||||||
|
@ -204,12 +204,7 @@ class PromptExecutor:
|
||||||
self.server.send_sync("executing", { "node": None }, self.server.client_id)
|
self.server.send_sync("executing", { "node": None }, self.server.client_id)
|
||||||
|
|
||||||
gc.collect()
|
gc.collect()
|
||||||
if torch.cuda.is_available():
|
comfy.model_management.soft_empty_cache()
|
||||||
if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda
|
|
||||||
torch.cuda.empty_cache()
|
|
||||||
torch.cuda.ipc_collect()
|
|
||||||
elif xpu_available:
|
|
||||||
torch.xpu.empty_cache()
|
|
||||||
|
|
||||||
|
|
||||||
def validate_inputs(prompt, item):
|
def validate_inputs(prompt, item):
|
||||||
|
|
Loading…
Reference in New Issue