import os import importlib.util import folder_paths import time def execute_prestartup_script(): def execute_script(script_path): module_name = os.path.splitext(script_path)[0] try: spec = importlib.util.spec_from_file_location(module_name, script_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return True except Exception as e: print(f"Failed to execute startup-script: {script_path} / {e}") return False node_paths = folder_paths.get_folder_paths("custom_nodes") for custom_node_path in node_paths: possible_modules = os.listdir(custom_node_path) node_prestartup_times = [] for possible_module in possible_modules: module_path = os.path.join(custom_node_path, possible_module) if os.path.isfile(module_path) or module_path.endswith(".disabled") or module_path == "__pycache__": continue script_path = os.path.join(module_path, "prestartup_script.py") if os.path.exists(script_path): time_before = time.perf_counter() success = execute_script(script_path) node_prestartup_times.append((time.perf_counter() - time_before, module_path, success)) if len(node_prestartup_times) > 0: print("\nPrestartup times for custom nodes:") for n in sorted(node_prestartup_times): if n[2]: import_message = "" else: import_message = " (PRESTARTUP FAILED)" print("{:6.1f} seconds{}:".format(n[0], import_message), n[1]) print() execute_prestartup_script() # Main code import asyncio import itertools import shutil import threading import gc from comfy.cli_args import args if os.name == "nt": import logging logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) if __name__ == "__main__": if args.cuda_device is not None: os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) print("Set cuda device to:", args.cuda_device) if not args.cuda_malloc: try: #if there's a better way to check the torch version without importing it let me know version = "" torch_spec = importlib.util.find_spec("torch") for folder in torch_spec.submodule_search_locations: ver_file = os.path.join(folder, "version.py") if os.path.isfile(ver_file): spec = importlib.util.spec_from_file_location("torch_version_import", ver_file) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) version = module.__version__ if int(version[0]) >= 2: #enable by default for torch version 2.0 and up args.cuda_malloc = True except: pass if args.cuda_malloc and not args.disable_cuda_malloc: env_var = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', None) if env_var is None: env_var = "backend:cudaMallocAsync" else: env_var += ",backend:cudaMallocAsync" os.environ['PYTORCH_CUDA_ALLOC_CONF'] = env_var import comfy.utils import yaml import execution import server from server import BinaryEventTypes from nodes import init_custom_nodes import comfy.model_management def prompt_worker(q, server): e = execution.PromptExecutor(server) while True: item, item_id = q.get() execution_start_time = time.perf_counter() prompt_id = item[1] e.execute(item[2], prompt_id, item[3], item[4]) q.task_done(item_id, e.outputs_ui) if server.client_id is not None: server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id) print("Prompt executed in {:.2f} seconds".format(time.perf_counter() - execution_start_time)) gc.collect() comfy.model_management.soft_empty_cache() async def run(server, address='', port=8188, verbose=True, call_on_start=None): await asyncio.gather(server.start(address, port, verbose, call_on_start), server.publish_loop()) def hijack_progress(server): def hook(value, total, preview_image_bytes): server.send_sync("progress", {"value": value, "max": total}, server.client_id) if preview_image_bytes is not None: server.send_sync(BinaryEventTypes.PREVIEW_IMAGE, preview_image_bytes, server.client_id) comfy.utils.set_progress_bar_global_hook(hook) def cleanup_temp(): temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") if os.path.exists(temp_dir): shutil.rmtree(temp_dir, ignore_errors=True) def load_extra_path_config(yaml_path): with open(yaml_path, 'r') as stream: config = yaml.safe_load(stream) for c in config: conf = config[c] if conf is None: continue base_path = None if "base_path" in conf: base_path = conf.pop("base_path") for x in conf: for y in conf[x].split("\n"): if len(y) == 0: continue full_path = y if base_path is not None: full_path = os.path.join(base_path, full_path) print("Adding extra search path", x, full_path) folder_paths.add_model_folder_path(x, full_path) if __name__ == "__main__": cleanup_temp() loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) server = server.PromptServer(loop) q = execution.PromptQueue(server) extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml") if os.path.isfile(extra_model_paths_config_path): load_extra_path_config(extra_model_paths_config_path) if args.extra_model_paths_config: for config_path in itertools.chain(*args.extra_model_paths_config): load_extra_path_config(config_path) init_custom_nodes() server.add_routes() hijack_progress(server) threading.Thread(target=prompt_worker, daemon=True, args=(q, server,)).start() if args.output_directory: output_dir = os.path.abspath(args.output_directory) print(f"Setting output directory to: {output_dir}") folder_paths.set_output_directory(output_dir) if args.quick_test_for_ci: exit(0) call_on_start = None if args.auto_launch: def startup_server(address, port): import webbrowser webbrowser.open(f"http://{address}:{port}") call_on_start = startup_server try: loop.run_until_complete(run(server, address=args.listen, port=args.port, verbose=not args.dont_print_server, call_on_start=call_on_start)) except KeyboardInterrupt: print("\nStopped server") cleanup_temp()