--disable-smart-memory now unloads everything like it did originally.
This commit is contained in:
parent
36a7953142
commit
a252963f95
|
@ -754,6 +754,10 @@ def soft_empty_cache(force=False):
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
torch.cuda.ipc_collect()
|
torch.cuda.ipc_collect()
|
||||||
|
|
||||||
|
def unload_all_models():
|
||||||
|
free_memory(1e30, get_torch_device())
|
||||||
|
|
||||||
|
|
||||||
def resolve_lowvram_weight(weight, model, key): #TODO: remove
|
def resolve_lowvram_weight(weight, model, key): #TODO: remove
|
||||||
return weight
|
return weight
|
||||||
|
|
||||||
|
|
|
@ -382,6 +382,8 @@ class PromptExecutor:
|
||||||
for x in executed:
|
for x in executed:
|
||||||
self.old_prompt[x] = copy.deepcopy(prompt[x])
|
self.old_prompt[x] = copy.deepcopy(prompt[x])
|
||||||
self.server.last_node_id = None
|
self.server.last_node_id = None
|
||||||
|
if comfy.model_management.DISABLE_SMART_MEMORY:
|
||||||
|
comfy.model_management.unload_all_models()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue