2023-09-13 15:38:20 +00:00
import comfy . options
comfy . options . enable_args_parsing ( )
2023-07-11 06:33:21 +00:00
import os
import importlib . util
import folder_paths
2023-07-13 17:01:45 +00:00
import time
2024-07-01 21:54:03 +00:00
from comfy . cli_args import args
2023-07-11 06:33:21 +00:00
def execute_prestartup_script ( ) :
def execute_script ( script_path ) :
2023-07-13 17:01:45 +00:00
module_name = os . path . splitext ( script_path ) [ 0 ]
try :
spec = importlib . util . spec_from_file_location ( module_name , script_path )
module = importlib . util . module_from_spec ( spec )
spec . loader . exec_module ( module )
return True
except Exception as e :
print ( f " Failed to execute startup-script: { script_path } / { e } " )
return False
2023-07-11 06:33:21 +00:00
2024-07-01 21:54:03 +00:00
if args . disable_all_custom_nodes :
return
2023-07-11 06:33:21 +00:00
node_paths = folder_paths . get_folder_paths ( " custom_nodes " )
for custom_node_path in node_paths :
possible_modules = os . listdir ( custom_node_path )
2023-07-13 17:01:45 +00:00
node_prestartup_times = [ ]
2023-07-11 06:33:21 +00:00
for possible_module in possible_modules :
module_path = os . path . join ( custom_node_path , possible_module )
if os . path . isfile ( module_path ) or module_path . endswith ( " .disabled " ) or module_path == " __pycache__ " :
continue
script_path = os . path . join ( module_path , " prestartup_script.py " )
2023-07-13 17:01:45 +00:00
if os . path . exists ( script_path ) :
time_before = time . perf_counter ( )
success = execute_script ( script_path )
node_prestartup_times . append ( ( time . perf_counter ( ) - time_before , module_path , success ) )
if len ( node_prestartup_times ) > 0 :
print ( " \n Prestartup times for custom nodes: " )
for n in sorted ( node_prestartup_times ) :
if n [ 2 ] :
import_message = " "
else :
import_message = " (PRESTARTUP FAILED) "
print ( " {:6.1f} seconds {} : " . format ( n [ 0 ] , import_message ) , n [ 1 ] )
print ( )
2023-07-11 06:33:21 +00:00
execute_prestartup_script ( )
# Main code
2023-04-06 00:32:59 +00:00
import asyncio
2023-04-06 23:06:39 +00:00
import itertools
2023-03-13 19:34:05 +00:00
import shutil
2023-01-03 06:53:32 +00:00
import threading
2023-06-13 17:36:47 +00:00
import gc
2023-04-06 23:06:39 +00:00
2024-03-11 20:24:47 +00:00
import logging
2023-01-03 06:53:32 +00:00
2023-02-16 18:19:26 +00:00
if os . name == " nt " :
logging . getLogger ( " xformers " ) . addFilter ( lambda record : ' A matching Triton is not available ' not in record . getMessage ( ) )
2023-02-08 03:12:56 +00:00
if __name__ == " __main__ " :
2023-04-06 00:32:59 +00:00
if args . cuda_device is not None :
os . environ [ ' CUDA_VISIBLE_DEVICES ' ] = str ( args . cuda_device )
2024-03-11 20:24:47 +00:00
logging . info ( " Set cuda device to: {} " . format ( args . cuda_device ) )
2023-04-06 00:32:59 +00:00
2023-12-17 21:59:21 +00:00
if args . deterministic :
if ' CUBLAS_WORKSPACE_CONFIG ' not in os . environ :
os . environ [ ' CUBLAS_WORKSPACE_CONFIG ' ] = " :4096:8 "
2023-07-19 18:43:55 +00:00
import cuda_malloc
2023-04-06 00:32:59 +00:00
2024-07-24 16:49:29 +00:00
if args . windows_standalone_build :
try :
import fix_torch
except :
pass
2023-07-17 15:00:14 +00:00
import comfy . utils
2023-04-06 00:32:59 +00:00
import yaml
2023-03-30 03:28:21 +00:00
2023-03-13 15:36:48 +00:00
import execution
2023-04-06 00:32:59 +00:00
import server
2023-05-31 01:43:29 +00:00
from server import BinaryEventTypes
2024-07-05 01:43:23 +00:00
import nodes
2023-06-13 17:36:47 +00:00
import comfy . model_management
2023-03-13 15:36:48 +00:00
2023-08-13 16:37:53 +00:00
def cuda_malloc_warning ( ) :
device = comfy . model_management . get_torch_device ( )
device_name = comfy . model_management . get_torch_device_name ( device )
cuda_malloc_warning = False
if " cudaMallocAsync " in device_name :
for b in cuda_malloc . blacklist :
if b in device_name :
cuda_malloc_warning = True
if cuda_malloc_warning :
2024-03-11 20:24:47 +00:00
logging . warning ( " \n WARNING: this card most likely does not support cuda-malloc, if you get \" CUDA error \" please run ComfyUI with: --disable-cuda-malloc \n " )
2023-08-13 16:37:53 +00:00
2023-02-21 19:29:49 +00:00
def prompt_worker ( q , server ) :
2023-02-28 00:43:55 +00:00
e = execution . PromptExecutor ( server )
2023-11-28 19:20:56 +00:00
last_gc_collect = 0
2023-11-30 20:22:32 +00:00
need_gc = False
gc_collect_interval = 10.0
2023-01-03 06:53:32 +00:00
while True :
2024-01-04 19:28:11 +00:00
timeout = 1000.0
2023-11-30 20:22:32 +00:00
if need_gc :
timeout = max ( gc_collect_interval - ( current_time - last_gc_collect ) , 0.0 )
queue_item = q . get ( timeout = timeout )
if queue_item is not None :
item , item_id = queue_item
execution_start_time = time . perf_counter ( )
prompt_id = item [ 1 ]
2024-01-01 19:27:56 +00:00
server . last_prompt_id = prompt_id
2023-11-30 20:22:32 +00:00
e . execute ( item [ 2 ] , prompt_id , item [ 3 ] , item [ 4 ] )
need_gc = True
2024-01-11 13:38:18 +00:00
q . task_done ( item_id ,
e . outputs_ui ,
status = execution . PromptQueue . ExecutionStatus (
status_str = ' success ' if e . success else ' error ' ,
completed = e . success ,
2024-01-12 23:17:06 +00:00
messages = e . status_messages ) )
2023-11-30 20:22:32 +00:00
if server . client_id is not None :
server . send_sync ( " executing " , { " node " : None , " prompt_id " : prompt_id } , server . client_id )
current_time = time . perf_counter ( )
execution_time = current_time - execution_start_time
2024-03-11 20:24:47 +00:00
logging . info ( " Prompt executed in {:.2f} seconds " . format ( execution_time ) )
2023-11-30 20:22:32 +00:00
2024-01-04 19:28:11 +00:00
flags = q . get_flags ( )
free_memory = flags . get ( " free_memory " , False )
if flags . get ( " unload_models " , free_memory ) :
comfy . model_management . unload_all_models ( )
need_gc = True
last_gc_collect = 0
if free_memory :
e . reset ( )
need_gc = True
last_gc_collect = 0
2023-11-30 20:22:32 +00:00
if need_gc :
current_time = time . perf_counter ( )
if ( current_time - last_gc_collect ) > gc_collect_interval :
2024-03-23 21:27:10 +00:00
comfy . model_management . cleanup_models ( )
2023-11-30 20:22:32 +00:00
gc . collect ( )
comfy . model_management . soft_empty_cache ( )
last_gc_collect = current_time
need_gc = False
2023-06-07 13:15:38 +00:00
2023-03-12 19:44:16 +00:00
async def run ( server , address = ' ' , port = 8188 , verbose = True , call_on_start = None ) :
await asyncio . gather ( server . start ( address , port , verbose , call_on_start ) , server . publish_loop ( ) )
2023-01-03 06:53:32 +00:00
2023-06-07 13:15:38 +00:00
2023-02-21 19:29:49 +00:00
def hijack_progress ( server ) :
2023-07-19 21:37:27 +00:00
def hook ( value , total , preview_image ) :
2023-09-08 03:37:03 +00:00
comfy . model_management . throw_exception_if_processing_interrupted ( )
2024-01-01 19:27:56 +00:00
progress = { " value " : value , " max " : total , " prompt_id " : server . last_prompt_id , " node " : server . last_node_id }
server . send_sync ( " progress " , progress , server . client_id )
2023-07-19 21:37:27 +00:00
if preview_image is not None :
server . send_sync ( BinaryEventTypes . UNENCODED_PREVIEW_IMAGE , preview_image , server . client_id )
2023-05-03 03:00:49 +00:00
comfy . utils . set_progress_bar_global_hook ( hook )
2023-01-03 06:53:32 +00:00
2023-06-07 13:15:38 +00:00
2023-03-13 19:34:05 +00:00
def cleanup_temp ( ) :
2023-08-11 09:00:25 +00:00
temp_dir = folder_paths . get_temp_directory ( )
2023-03-13 19:34:05 +00:00
if os . path . exists ( temp_dir ) :
2023-03-14 22:07:09 +00:00
shutil . rmtree ( temp_dir , ignore_errors = True )
2023-03-13 19:34:05 +00:00
2023-06-07 13:15:38 +00:00
2023-03-18 06:52:43 +00:00
def load_extra_path_config ( yaml_path ) :
with open ( yaml_path , ' r ' ) as stream :
config = yaml . safe_load ( stream )
for c in config :
conf = config [ c ]
if conf is None :
continue
base_path = None
if " base_path " in conf :
base_path = conf . pop ( " base_path " )
for x in conf :
for y in conf [ x ] . split ( " \n " ) :
if len ( y ) == 0 :
continue
full_path = y
if base_path is not None :
full_path = os . path . join ( base_path , full_path )
2024-03-11 20:24:47 +00:00
logging . info ( " Adding extra search path {} {} " . format ( x , full_path ) )
2023-03-18 06:52:43 +00:00
folder_paths . add_model_folder_path ( x , full_path )
2023-06-07 13:15:38 +00:00
2023-01-03 06:53:32 +00:00
if __name__ == " __main__ " :
2023-08-11 09:00:25 +00:00
if args . temp_directory :
temp_dir = os . path . join ( os . path . abspath ( args . temp_directory ) , " temp " )
2024-03-11 20:24:47 +00:00
logging . info ( f " Setting temp directory to: { temp_dir } " )
2023-08-11 09:00:25 +00:00
folder_paths . set_temp_directory ( temp_dir )
2023-03-13 19:34:05 +00:00
cleanup_temp ( )
2024-02-26 18:32:14 +00:00
if args . windows_standalone_build :
try :
import new_updater
new_updater . update_windows_updater ( )
except :
pass
2023-02-12 15:53:48 +00:00
loop = asyncio . new_event_loop ( )
asyncio . set_event_loop ( loop )
2023-02-21 19:29:49 +00:00
server = server . PromptServer ( loop )
2023-02-28 00:43:55 +00:00
q = execution . PromptQueue ( server )
2023-02-12 15:53:48 +00:00
2023-04-16 05:36:15 +00:00
extra_model_paths_config_path = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , " extra_model_paths.yaml " )
if os . path . isfile ( extra_model_paths_config_path ) :
load_extra_path_config ( extra_model_paths_config_path )
if args . extra_model_paths_config :
for config_path in itertools . chain ( * args . extra_model_paths_config ) :
load_extra_path_config ( config_path )
2024-07-05 01:43:23 +00:00
nodes . init_extra_nodes ( init_custom_nodes = not args . disable_all_custom_nodes )
2023-08-13 16:37:53 +00:00
cuda_malloc_warning ( )
2023-04-01 11:44:29 +00:00
server . add_routes ( )
2023-02-21 19:29:49 +00:00
hijack_progress ( server )
2023-06-07 13:15:38 +00:00
threading . Thread ( target = prompt_worker , daemon = True , args = ( q , server , ) ) . start ( )
2023-02-08 02:57:17 +00:00
2023-04-06 00:32:59 +00:00
if args . output_directory :
output_dir = os . path . abspath ( args . output_directory )
2024-03-11 20:24:47 +00:00
logging . info ( f " Setting output directory to: { output_dir } " )
2023-04-05 18:01:01 +00:00
folder_paths . set_output_directory ( output_dir )
2023-10-10 05:25:47 +00:00
#These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes
folder_paths . add_model_folder_path ( " checkpoints " , os . path . join ( folder_paths . get_output_directory ( ) , " checkpoints " ) )
folder_paths . add_model_folder_path ( " clip " , os . path . join ( folder_paths . get_output_directory ( ) , " clip " ) )
folder_paths . add_model_folder_path ( " vae " , os . path . join ( folder_paths . get_output_directory ( ) , " vae " ) )
2023-10-04 22:45:15 +00:00
if args . input_directory :
input_dir = os . path . abspath ( args . input_directory )
2024-03-11 20:24:47 +00:00
logging . info ( f " Setting input directory to: { input_dir } " )
2023-10-04 22:45:15 +00:00
folder_paths . set_input_directory ( input_dir )
2023-04-06 00:32:59 +00:00
if args . quick_test_for_ci :
2023-03-15 03:02:57 +00:00
exit ( 0 )
2023-03-12 19:44:16 +00:00
call_on_start = None
2023-05-06 20:59:40 +00:00
if args . auto_launch :
2024-05-01 00:17:02 +00:00
def startup_server ( scheme , address , port ) :
2023-03-12 19:44:16 +00:00
import webbrowser
2023-08-01 05:14:17 +00:00
if os . name == ' nt ' and address == ' 0.0.0.0 ' :
address = ' 127.0.0.1 '
2024-05-01 00:17:02 +00:00
webbrowser . open ( f " { scheme } :// { address } : { port } " )
2023-03-12 19:44:16 +00:00
call_on_start = startup_server
2023-06-07 13:15:38 +00:00
try :
2024-08-13 19:48:52 +00:00
loop . run_until_complete ( server . setup ( ) )
2023-05-06 20:59:40 +00:00
loop . run_until_complete ( run ( server , address = args . listen , port = args . port , verbose = not args . dont_print_server , call_on_start = call_on_start ) )
2023-06-07 13:15:38 +00:00
except KeyboardInterrupt :
2024-03-11 20:24:47 +00:00
logging . info ( " \n Stopped server " )
2023-01-03 06:53:32 +00:00
2023-03-13 19:34:05 +00:00
cleanup_temp ( )