From 8edbcf520900112d4e11f510ba33949503b58f51 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 5 Aug 2024 16:24:04 -0400 Subject: [PATCH] Improve performance on some lowend GPUs. --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index fb274701..3d9ed525 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -528,7 +528,7 @@ def unet_inital_load_device(parameters, dtype): return cpu_dev def maximum_vram_for_weights(device=None): - return (get_total_memory(device) * 0.8 - minimum_inference_memory()) + return (get_total_memory(device) * 0.88 - minimum_inference_memory()) def unet_dtype(device=None, model_params=0, supported_dtypes=[torch.float16, torch.bfloat16, torch.float32]): if args.bf16_unet: