--disable-smart-memory now unloads everything like it did originally.

This commit is contained in:
comfyanonymous 2023-12-23 04:25:06 -05:00
parent 36a7953142
commit a252963f95
2 changed files with 6 additions and 0 deletions

View File

@ -754,6 +754,10 @@ def soft_empty_cache(force=False):
torch.cuda.empty_cache() torch.cuda.empty_cache()
torch.cuda.ipc_collect() torch.cuda.ipc_collect()
def unload_all_models():
free_memory(1e30, get_torch_device())
def resolve_lowvram_weight(weight, model, key): #TODO: remove def resolve_lowvram_weight(weight, model, key): #TODO: remove
return weight return weight

View File

@ -382,6 +382,8 @@ class PromptExecutor:
for x in executed: for x in executed:
self.old_prompt[x] = copy.deepcopy(prompt[x]) self.old_prompt[x] = copy.deepcopy(prompt[x])
self.server.last_node_id = None self.server.last_node_id = None
if comfy.model_management.DISABLE_SMART_MEMORY:
comfy.model_management.unload_all_models()