Try to improve inference speed on some machines.

This commit is contained in:
comfyanonymous 2024-08-08 17:28:35 -04:00
parent 1e11d2d1f5
commit 037c38eb0f
1 changed files with 2 additions and 2 deletions

View File

@ -432,11 +432,11 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu
global vram_state
inference_memory = minimum_inference_memory()
extra_mem = max(inference_memory, memory_required)
extra_mem = max(inference_memory, memory_required) + 100 * 1024 * 1024
if minimum_memory_required is None:
minimum_memory_required = extra_mem
else:
minimum_memory_required = max(inference_memory, minimum_memory_required)
minimum_memory_required = max(inference_memory, minimum_memory_required) + 100 * 1024 * 1024
models = set(models)