Increased MemoryCounter minimum memory to leave free by *2 until a better way to get inference memory estimate of currently loaded models exists
This commit is contained in:
parent
0850ae5c04
commit
de6013ccc4
|
@ -986,7 +986,7 @@ class ModelPatcher:
|
|||
if self.hook_mode == comfy.hooks.EnumHookMode.MaxSpeed:
|
||||
# TODO: minimum_counter should have a minimum that conforms to loaded model requirements
|
||||
memory_counter = MemoryCounter(initial=comfy.model_management.get_free_memory(self.load_device),
|
||||
minimum=comfy.model_management.minimum_inference_memory())
|
||||
minimum=comfy.model_management.minimum_inference_memory()*2)
|
||||
# if have cached weights for hooks, use it
|
||||
cached_weights = self.cached_hook_patches.get(hooks, None)
|
||||
if cached_weights is not None:
|
||||
|
|
Loading…
Reference in New Issue