Increased MemoryCounter minimum memory to leave free by *2 until a better way to get inference memory estimate of currently loaded models exists

This commit is contained in:
Jedrzej Kosinski 2024-11-18 08:08:00 -06:00
parent 0850ae5c04
commit de6013ccc4
1 changed files with 1 additions and 1 deletions

View File

@ -986,7 +986,7 @@ class ModelPatcher:
if self.hook_mode == comfy.hooks.EnumHookMode.MaxSpeed:
# TODO: minimum_counter should have a minimum that conforms to loaded model requirements
memory_counter = MemoryCounter(initial=comfy.model_management.get_free_memory(self.load_device),
minimum=comfy.model_management.minimum_inference_memory())
minimum=comfy.model_management.minimum_inference_memory()*2)
# if have cached weights for hooks, use it
cached_weights = self.cached_hook_patches.get(hooks, None)
if cached_weights is not None: