Remove useless line, adjust windows default reserved vram.

This commit is contained in:
comfyanonymous 2024-08-21 00:47:19 -04:00
parent f257fc999f
commit 03ec517afb
2 changed files with 1 additions and 2 deletions

View File

@ -369,7 +369,7 @@ def minimum_inference_memory():
EXTRA_RESERVED_VRAM = 200 * 1024 * 1024
if any(platform.win32_ver()):
EXTRA_RESERVED_VRAM = 400 * 1024 * 1024 #Windows is higher because of the shared vram issue
EXTRA_RESERVED_VRAM = 500 * 1024 * 1024 #Windows is higher because of the shared vram issue
if args.reserve_vram is not None:
EXTRA_RESERVED_VRAM = args.reserve_vram * 1024 * 1024 * 1024

View File

@ -250,7 +250,6 @@ def fp8_linear(self, input):
return None
if len(input.shape) == 3:
out = torch.empty((input.shape[0], input.shape[1], self.weight.shape[0]), device=input.device, dtype=input.dtype)
inn = input.view(-1, input.shape[2]).to(dtype)
non_blocking = comfy.model_management.device_supports_non_blocking(input.device)
w = cast_to(self.weight, device=input.device, non_blocking=non_blocking).t()