Revert "Disable cuda malloc by default."
This reverts commit 50bf66e5c4
.
This commit is contained in:
parent
33fb282d5c
commit
f1d6cef71c
|
@ -51,8 +51,8 @@ parser.add_argument("--auto-launch", action="store_true", help="Automatically la
|
||||||
parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
|
parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
|
||||||
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
|
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
|
||||||
cm_group = parser.add_mutually_exclusive_group()
|
cm_group = parser.add_mutually_exclusive_group()
|
||||||
cm_group.add_argument("--cuda-malloc", action="store_true", help="Enable cudaMallocAsync.")
|
cm_group.add_argument("--cuda-malloc", action="store_true", help="Enable cudaMallocAsync (enabled by default for torch 2.0 and up).")
|
||||||
cm_group.add_argument("--disable-cuda-malloc", action="store_true", help="Disable cudaMallocAsync (The current default).")
|
cm_group.add_argument("--disable-cuda-malloc", action="store_true", help="Disable cudaMallocAsync.")
|
||||||
|
|
||||||
|
|
||||||
fp_group = parser.add_mutually_exclusive_group()
|
fp_group = parser.add_mutually_exclusive_group()
|
||||||
|
|
|
@ -2,7 +2,6 @@ import os
|
||||||
import importlib.util
|
import importlib.util
|
||||||
from comfy.cli_args import args
|
from comfy.cli_args import args
|
||||||
import subprocess
|
import subprocess
|
||||||
import logging
|
|
||||||
|
|
||||||
#Can't use pytorch to get the GPU names because the cuda malloc has to be set before the first import.
|
#Can't use pytorch to get the GPU names because the cuda malloc has to be set before the first import.
|
||||||
def get_gpu_names():
|
def get_gpu_names():
|
||||||
|
@ -64,7 +63,7 @@ def cuda_malloc_supported():
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
if args.cuda_malloc:
|
if not args.cuda_malloc:
|
||||||
try:
|
try:
|
||||||
version = ""
|
version = ""
|
||||||
torch_spec = importlib.util.find_spec("torch")
|
torch_spec = importlib.util.find_spec("torch")
|
||||||
|
@ -75,11 +74,8 @@ if args.cuda_malloc:
|
||||||
module = importlib.util.module_from_spec(spec)
|
module = importlib.util.module_from_spec(spec)
|
||||||
spec.loader.exec_module(module)
|
spec.loader.exec_module(module)
|
||||||
version = module.__version__
|
version = module.__version__
|
||||||
supported = False
|
|
||||||
if int(version[0]) >= 2: #enable by default for torch version 2.0 and up
|
if int(version[0]) >= 2: #enable by default for torch version 2.0 and up
|
||||||
supported = cuda_malloc_supported()
|
args.cuda_malloc = cuda_malloc_supported()
|
||||||
if not supported:
|
|
||||||
logging.warning("WARNING: cuda malloc enabled but not supported.")
|
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue