Memory tweaks.

This commit is contained in:
comfyanonymous 2024-08-12 15:03:33 -04:00
parent ce37c11164
commit b8ffb2937f
1 changed files with 2 additions and 2 deletions

View File

@ -438,11 +438,11 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu
global vram_state
inference_memory = minimum_inference_memory()
extra_mem = max(inference_memory, memory_required) + 100 * 1024 * 1024
extra_mem = max(inference_memory, memory_required + 300 * 1024 * 1024)
if minimum_memory_required is None:
minimum_memory_required = extra_mem
else:
minimum_memory_required = max(inference_memory, minimum_memory_required) + 100 * 1024 * 1024
minimum_memory_required = max(inference_memory, minimum_memory_required + 300 * 1024 * 1024)
models = set(models)