From 861fd58819534e72c548c26c9050dc193342a505 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 13 Aug 2023 12:37:53 -0400 Subject: [PATCH] Add a warning if a card that doesn't support cuda malloc has it enabled. --- cuda_malloc.py | 12 ++++++------ main.py | 14 ++++++++++++++ 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/cuda_malloc.py b/cuda_malloc.py index d033529c..9527f30e 100644 --- a/cuda_malloc.py +++ b/cuda_malloc.py @@ -36,13 +36,13 @@ def get_gpu_names(): else: return set() -def cuda_malloc_supported(): - blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M", - "GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745", "Quadro K620", - "Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000", - "Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000", - "GeForce MX110", "GeForce MX130", "GeForce 830M", "GeForce 840M", "GeForce GTX 850M", "GeForce GTX 860M"} +blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M", + "GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745", "Quadro K620", + "Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000", + "Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000", + "GeForce MX110", "GeForce MX130", "GeForce 830M", "GeForce 840M", "GeForce GTX 850M", "GeForce GTX 860M"} +def cuda_malloc_supported(): try: names = get_gpu_names() except: diff --git a/main.py b/main.py index 1571376b..a4038db4 100644 --- a/main.py +++ b/main.py @@ -72,6 +72,17 @@ from server import BinaryEventTypes from nodes import init_custom_nodes import comfy.model_management +def cuda_malloc_warning(): + device = comfy.model_management.get_torch_device() + device_name = comfy.model_management.get_torch_device_name(device) + cuda_malloc_warning = False + if "cudaMallocAsync" in device_name: + for b in cuda_malloc.blacklist: + if b in device_name: + cuda_malloc_warning = True + if cuda_malloc_warning: + print("\nWARNING: this card most likely does not support cuda-malloc, if you get \"CUDA error\" please run ComfyUI with: --disable-cuda-malloc\n") + def prompt_worker(q, server): e = execution.PromptExecutor(server) while True: @@ -147,6 +158,9 @@ if __name__ == "__main__": load_extra_path_config(config_path) init_custom_nodes() + + cuda_malloc_warning() + server.add_routes() hijack_progress(server)