Support releases all unoccupied cached memory from XPU
This commit is contained in:
parent
04d9bc13af
commit
d63705d919
|
@ -10,6 +10,8 @@ import gc
|
||||||
import torch
|
import torch
|
||||||
import nodes
|
import nodes
|
||||||
|
|
||||||
|
from model_management import xpu_available
|
||||||
|
|
||||||
def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}):
|
def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}):
|
||||||
valid_inputs = class_def.INPUT_TYPES()
|
valid_inputs = class_def.INPUT_TYPES()
|
||||||
input_data_all = {}
|
input_data_all = {}
|
||||||
|
@ -206,6 +208,8 @@ class PromptExecutor:
|
||||||
if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda
|
if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
torch.cuda.ipc_collect()
|
torch.cuda.ipc_collect()
|
||||||
|
elif xpu_available:
|
||||||
|
torch.xpu.empty_cache()
|
||||||
|
|
||||||
|
|
||||||
def validate_inputs(prompt, item):
|
def validate_inputs(prompt, item):
|
||||||
|
|
Loading…
Reference in New Issue