Change log levels.
Logging level now defaults to info. --verbose sets it to debug.
This commit is contained in:
parent
dc6d4151a2
commit
0ed72befe1
|
@ -129,7 +129,7 @@ if args.disable_auto_launch:
|
||||||
args.auto_launch = False
|
args.auto_launch = False
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
logging_level = logging.WARNING
|
logging_level = logging.INFO
|
||||||
if args.verbose:
|
if args.verbose:
|
||||||
logging_level = logging.DEBUG
|
logging_level = logging.DEBUG
|
||||||
|
|
||||||
|
|
|
@ -432,7 +432,7 @@ def load_controlnet(ckpt_path, model=None):
|
||||||
logging.warning("missing controlnet keys: {}".format(missing))
|
logging.warning("missing controlnet keys: {}".format(missing))
|
||||||
|
|
||||||
if len(unexpected) > 0:
|
if len(unexpected) > 0:
|
||||||
logging.info("unexpected controlnet keys: {}".format(unexpected))
|
logging.debug("unexpected controlnet keys: {}".format(unexpected))
|
||||||
|
|
||||||
global_average_pooling = False
|
global_average_pooling = False
|
||||||
filename = os.path.splitext(ckpt_path)[0]
|
filename = os.path.splitext(ckpt_path)[0]
|
||||||
|
@ -545,6 +545,6 @@ def load_t2i_adapter(t2i_data):
|
||||||
logging.warning("t2i missing {}".format(missing))
|
logging.warning("t2i missing {}".format(missing))
|
||||||
|
|
||||||
if len(unexpected) > 0:
|
if len(unexpected) > 0:
|
||||||
logging.info("t2i unexpected {}".format(unexpected))
|
logging.debug("t2i unexpected {}".format(unexpected))
|
||||||
|
|
||||||
return T2IAdapter(model_ad, model_ad.input_channels, compression_ratio, upscale_algorithm)
|
return T2IAdapter(model_ad, model_ad.input_channels, compression_ratio, upscale_algorithm)
|
||||||
|
|
|
@ -178,7 +178,7 @@ def convert_vae_state_dict(vae_state_dict):
|
||||||
for k, v in new_state_dict.items():
|
for k, v in new_state_dict.items():
|
||||||
for weight_name in weights_to_convert:
|
for weight_name in weights_to_convert:
|
||||||
if f"mid.attn_1.{weight_name}.weight" in k:
|
if f"mid.attn_1.{weight_name}.weight" in k:
|
||||||
logging.info(f"Reshaping {k} for SD format")
|
logging.debug(f"Reshaping {k} for SD format")
|
||||||
new_state_dict[k] = reshape_weight_for_sd(v)
|
new_state_dict[k] = reshape_weight_for_sd(v)
|
||||||
return new_state_dict
|
return new_state_dict
|
||||||
|
|
||||||
|
|
|
@ -67,8 +67,8 @@ class BaseModel(torch.nn.Module):
|
||||||
if self.adm_channels is None:
|
if self.adm_channels is None:
|
||||||
self.adm_channels = 0
|
self.adm_channels = 0
|
||||||
self.inpaint_model = False
|
self.inpaint_model = False
|
||||||
logging.warning("model_type {}".format(model_type.name))
|
logging.info("model_type {}".format(model_type.name))
|
||||||
logging.info("adm {}".format(self.adm_channels))
|
logging.debug("adm {}".format(self.adm_channels))
|
||||||
|
|
||||||
def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, transformer_options={}, **kwargs):
|
def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, transformer_options={}, **kwargs):
|
||||||
sigma = t
|
sigma = t
|
||||||
|
|
|
@ -30,7 +30,7 @@ lowvram_available = True
|
||||||
xpu_available = False
|
xpu_available = False
|
||||||
|
|
||||||
if args.deterministic:
|
if args.deterministic:
|
||||||
logging.warning("Using deterministic algorithms for pytorch")
|
logging.info("Using deterministic algorithms for pytorch")
|
||||||
torch.use_deterministic_algorithms(True, warn_only=True)
|
torch.use_deterministic_algorithms(True, warn_only=True)
|
||||||
|
|
||||||
directml_enabled = False
|
directml_enabled = False
|
||||||
|
@ -42,7 +42,7 @@ if args.directml is not None:
|
||||||
directml_device = torch_directml.device()
|
directml_device = torch_directml.device()
|
||||||
else:
|
else:
|
||||||
directml_device = torch_directml.device(device_index)
|
directml_device = torch_directml.device(device_index)
|
||||||
logging.warning("Using directml with device: {}".format(torch_directml.device_name(device_index)))
|
logging.info("Using directml with device: {}".format(torch_directml.device_name(device_index)))
|
||||||
# torch_directml.disable_tiled_resources(True)
|
# torch_directml.disable_tiled_resources(True)
|
||||||
lowvram_available = False #TODO: need to find a way to get free memory in directml before this can be enabled by default.
|
lowvram_available = False #TODO: need to find a way to get free memory in directml before this can be enabled by default.
|
||||||
|
|
||||||
|
@ -118,7 +118,7 @@ def get_total_memory(dev=None, torch_total_too=False):
|
||||||
|
|
||||||
total_vram = get_total_memory(get_torch_device()) / (1024 * 1024)
|
total_vram = get_total_memory(get_torch_device()) / (1024 * 1024)
|
||||||
total_ram = psutil.virtual_memory().total / (1024 * 1024)
|
total_ram = psutil.virtual_memory().total / (1024 * 1024)
|
||||||
logging.warning("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram))
|
logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram))
|
||||||
if not args.normalvram and not args.cpu:
|
if not args.normalvram and not args.cpu:
|
||||||
if lowvram_available and total_vram <= 4096:
|
if lowvram_available and total_vram <= 4096:
|
||||||
logging.warning("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram")
|
logging.warning("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram")
|
||||||
|
@ -144,7 +144,7 @@ else:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
XFORMERS_VERSION = xformers.version.__version__
|
XFORMERS_VERSION = xformers.version.__version__
|
||||||
logging.warning("xformers version: {}".format(XFORMERS_VERSION))
|
logging.info("xformers version: {}".format(XFORMERS_VERSION))
|
||||||
if XFORMERS_VERSION.startswith("0.0.18"):
|
if XFORMERS_VERSION.startswith("0.0.18"):
|
||||||
logging.warning("\nWARNING: This version of xformers has a major bug where you will get black images when generating high resolution images.")
|
logging.warning("\nWARNING: This version of xformers has a major bug where you will get black images when generating high resolution images.")
|
||||||
logging.warning("Please downgrade or upgrade xformers to a different version.\n")
|
logging.warning("Please downgrade or upgrade xformers to a different version.\n")
|
||||||
|
@ -212,11 +212,11 @@ elif args.highvram or args.gpu_only:
|
||||||
FORCE_FP32 = False
|
FORCE_FP32 = False
|
||||||
FORCE_FP16 = False
|
FORCE_FP16 = False
|
||||||
if args.force_fp32:
|
if args.force_fp32:
|
||||||
logging.warning("Forcing FP32, if this improves things please report it.")
|
logging.info("Forcing FP32, if this improves things please report it.")
|
||||||
FORCE_FP32 = True
|
FORCE_FP32 = True
|
||||||
|
|
||||||
if args.force_fp16:
|
if args.force_fp16:
|
||||||
logging.warning("Forcing FP16.")
|
logging.info("Forcing FP16.")
|
||||||
FORCE_FP16 = True
|
FORCE_FP16 = True
|
||||||
|
|
||||||
if lowvram_available:
|
if lowvram_available:
|
||||||
|
@ -230,12 +230,12 @@ if cpu_state != CPUState.GPU:
|
||||||
if cpu_state == CPUState.MPS:
|
if cpu_state == CPUState.MPS:
|
||||||
vram_state = VRAMState.SHARED
|
vram_state = VRAMState.SHARED
|
||||||
|
|
||||||
logging.warning(f"Set vram state to: {vram_state.name}")
|
logging.info(f"Set vram state to: {vram_state.name}")
|
||||||
|
|
||||||
DISABLE_SMART_MEMORY = args.disable_smart_memory
|
DISABLE_SMART_MEMORY = args.disable_smart_memory
|
||||||
|
|
||||||
if DISABLE_SMART_MEMORY:
|
if DISABLE_SMART_MEMORY:
|
||||||
logging.warning("Disabling smart memory management")
|
logging.info("Disabling smart memory management")
|
||||||
|
|
||||||
def get_torch_device_name(device):
|
def get_torch_device_name(device):
|
||||||
if hasattr(device, 'type'):
|
if hasattr(device, 'type'):
|
||||||
|
@ -253,11 +253,11 @@ def get_torch_device_name(device):
|
||||||
return "CUDA {}: {}".format(device, torch.cuda.get_device_name(device))
|
return "CUDA {}: {}".format(device, torch.cuda.get_device_name(device))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
logging.warning("Device: {}".format(get_torch_device_name(get_torch_device())))
|
logging.info("Device: {}".format(get_torch_device_name(get_torch_device())))
|
||||||
except:
|
except:
|
||||||
logging.warning("Could not pick default device.")
|
logging.warning("Could not pick default device.")
|
||||||
|
|
||||||
logging.warning("VAE dtype: {}".format(VAE_DTYPE))
|
logging.info("VAE dtype: {}".format(VAE_DTYPE))
|
||||||
|
|
||||||
current_loaded_models = []
|
current_loaded_models = []
|
||||||
|
|
||||||
|
@ -300,7 +300,7 @@ class LoadedModel:
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
if lowvram_model_memory > 0:
|
if lowvram_model_memory > 0:
|
||||||
logging.warning("loading in lowvram mode {}".format(lowvram_model_memory/(1024 * 1024)))
|
logging.info("loading in lowvram mode {}".format(lowvram_model_memory/(1024 * 1024)))
|
||||||
mem_counter = 0
|
mem_counter = 0
|
||||||
for m in self.real_model.modules():
|
for m in self.real_model.modules():
|
||||||
if hasattr(m, "comfy_cast_weights"):
|
if hasattr(m, "comfy_cast_weights"):
|
||||||
|
@ -347,7 +347,7 @@ def unload_model_clones(model):
|
||||||
to_unload = [i] + to_unload
|
to_unload = [i] + to_unload
|
||||||
|
|
||||||
for i in to_unload:
|
for i in to_unload:
|
||||||
logging.warning("unload clone {}".format(i))
|
logging.debug("unload clone {}".format(i))
|
||||||
current_loaded_models.pop(i).model_unload()
|
current_loaded_models.pop(i).model_unload()
|
||||||
|
|
||||||
def free_memory(memory_required, device, keep_loaded=[]):
|
def free_memory(memory_required, device, keep_loaded=[]):
|
||||||
|
@ -389,7 +389,7 @@ def load_models_gpu(models, memory_required=0):
|
||||||
models_already_loaded.append(loaded_model)
|
models_already_loaded.append(loaded_model)
|
||||||
else:
|
else:
|
||||||
if hasattr(x, "model"):
|
if hasattr(x, "model"):
|
||||||
logging.warning(f"Requested to load {x.model.__class__.__name__}")
|
logging.info(f"Requested to load {x.model.__class__.__name__}")
|
||||||
models_to_load.append(loaded_model)
|
models_to_load.append(loaded_model)
|
||||||
|
|
||||||
if len(models_to_load) == 0:
|
if len(models_to_load) == 0:
|
||||||
|
@ -399,7 +399,7 @@ def load_models_gpu(models, memory_required=0):
|
||||||
free_memory(extra_mem, d, models_already_loaded)
|
free_memory(extra_mem, d, models_already_loaded)
|
||||||
return
|
return
|
||||||
|
|
||||||
logging.warning(f"Loading {len(models_to_load)} new model{'s' if len(models_to_load) > 1 else ''}")
|
logging.info(f"Loading {len(models_to_load)} new model{'s' if len(models_to_load) > 1 else ''}")
|
||||||
|
|
||||||
total_memory_required = {}
|
total_memory_required = {}
|
||||||
for loaded_model in models_to_load:
|
for loaded_model in models_to_load:
|
||||||
|
|
12
comfy/sd.py
12
comfy/sd.py
|
@ -229,7 +229,7 @@ class VAE:
|
||||||
logging.warning("Missing VAE keys {}".format(m))
|
logging.warning("Missing VAE keys {}".format(m))
|
||||||
|
|
||||||
if len(u) > 0:
|
if len(u) > 0:
|
||||||
logging.info("Leftover VAE keys {}".format(u))
|
logging.debug("Leftover VAE keys {}".format(u))
|
||||||
|
|
||||||
if device is None:
|
if device is None:
|
||||||
device = model_management.vae_device()
|
device = model_management.vae_device()
|
||||||
|
@ -397,7 +397,7 @@ def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DI
|
||||||
logging.warning("clip missing: {}".format(m))
|
logging.warning("clip missing: {}".format(m))
|
||||||
|
|
||||||
if len(u) > 0:
|
if len(u) > 0:
|
||||||
logging.info("clip unexpected: {}".format(u))
|
logging.debug("clip unexpected: {}".format(u))
|
||||||
return clip
|
return clip
|
||||||
|
|
||||||
def load_gligen(ckpt_path):
|
def load_gligen(ckpt_path):
|
||||||
|
@ -538,18 +538,18 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o
|
||||||
logging.warning("clip missing: {}".format(m))
|
logging.warning("clip missing: {}".format(m))
|
||||||
|
|
||||||
if len(u) > 0:
|
if len(u) > 0:
|
||||||
logging.info("clip unexpected {}:".format(u))
|
logging.debug("clip unexpected {}:".format(u))
|
||||||
else:
|
else:
|
||||||
logging.warning("no CLIP/text encoder weights in checkpoint, the text encoder model will not be loaded.")
|
logging.warning("no CLIP/text encoder weights in checkpoint, the text encoder model will not be loaded.")
|
||||||
|
|
||||||
left_over = sd.keys()
|
left_over = sd.keys()
|
||||||
if len(left_over) > 0:
|
if len(left_over) > 0:
|
||||||
logging.info("left over keys: {}".format(left_over))
|
logging.debug("left over keys: {}".format(left_over))
|
||||||
|
|
||||||
if output_model:
|
if output_model:
|
||||||
model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device(), current_device=inital_load_device)
|
model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device(), current_device=inital_load_device)
|
||||||
if inital_load_device != torch.device("cpu"):
|
if inital_load_device != torch.device("cpu"):
|
||||||
logging.warning("loaded straight to GPU")
|
logging.info("loaded straight to GPU")
|
||||||
model_management.load_model_gpu(model_patcher)
|
model_management.load_model_gpu(model_patcher)
|
||||||
|
|
||||||
return (model_patcher, clip, vae, clipvision)
|
return (model_patcher, clip, vae, clipvision)
|
||||||
|
@ -589,7 +589,7 @@ def load_unet_state_dict(sd): #load unet in diffusers format
|
||||||
model.load_model_weights(new_sd, "")
|
model.load_model_weights(new_sd, "")
|
||||||
left_over = sd.keys()
|
left_over = sd.keys()
|
||||||
if len(left_over) > 0:
|
if len(left_over) > 0:
|
||||||
logging.warning("left over keys in unet: {}".format(left_over))
|
logging.info("left over keys in unet: {}".format(left_over))
|
||||||
return comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=offload_device)
|
return comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=offload_device)
|
||||||
|
|
||||||
def load_unet(unet_path):
|
def load_unet(unet_path):
|
||||||
|
|
|
@ -22,7 +22,7 @@ def load_torch_file(ckpt, safe_load=False, device=None):
|
||||||
else:
|
else:
|
||||||
pl_sd = torch.load(ckpt, map_location=device, pickle_module=comfy.checkpoint_pickle)
|
pl_sd = torch.load(ckpt, map_location=device, pickle_module=comfy.checkpoint_pickle)
|
||||||
if "global_step" in pl_sd:
|
if "global_step" in pl_sd:
|
||||||
logging.info(f"Global Step: {pl_sd['global_step']}")
|
logging.debug(f"Global Step: {pl_sd['global_step']}")
|
||||||
if "state_dict" in pl_sd:
|
if "state_dict" in pl_sd:
|
||||||
sd = pl_sd["state_dict"]
|
sd = pl_sd["state_dict"]
|
||||||
else:
|
else:
|
||||||
|
|
6
nodes.py
6
nodes.py
|
@ -1925,14 +1925,14 @@ def load_custom_nodes():
|
||||||
node_import_times.append((time.perf_counter() - time_before, module_path, success))
|
node_import_times.append((time.perf_counter() - time_before, module_path, success))
|
||||||
|
|
||||||
if len(node_import_times) > 0:
|
if len(node_import_times) > 0:
|
||||||
logging.warning("\nImport times for custom nodes:")
|
logging.info("\nImport times for custom nodes:")
|
||||||
for n in sorted(node_import_times):
|
for n in sorted(node_import_times):
|
||||||
if n[2]:
|
if n[2]:
|
||||||
import_message = ""
|
import_message = ""
|
||||||
else:
|
else:
|
||||||
import_message = " (IMPORT FAILED)"
|
import_message = " (IMPORT FAILED)"
|
||||||
logging.warning("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1]))
|
logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1]))
|
||||||
logging.warning("")
|
logging.info("")
|
||||||
|
|
||||||
def init_custom_nodes():
|
def init_custom_nodes():
|
||||||
extras_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras")
|
extras_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras")
|
||||||
|
|
15
server.py
15
server.py
|
@ -17,6 +17,7 @@ from io import BytesIO
|
||||||
|
|
||||||
import aiohttp
|
import aiohttp
|
||||||
from aiohttp import web
|
from aiohttp import web
|
||||||
|
import logging
|
||||||
|
|
||||||
import mimetypes
|
import mimetypes
|
||||||
from comfy.cli_args import args
|
from comfy.cli_args import args
|
||||||
|
@ -33,7 +34,7 @@ async def send_socket_catch_exception(function, message):
|
||||||
try:
|
try:
|
||||||
await function(message)
|
await function(message)
|
||||||
except (aiohttp.ClientError, aiohttp.ClientPayloadError, ConnectionResetError) as err:
|
except (aiohttp.ClientError, aiohttp.ClientPayloadError, ConnectionResetError) as err:
|
||||||
print("send error:", err)
|
logging.warning("send error: {}".format(err))
|
||||||
|
|
||||||
@web.middleware
|
@web.middleware
|
||||||
async def cache_control(request: web.Request, handler):
|
async def cache_control(request: web.Request, handler):
|
||||||
|
@ -111,7 +112,7 @@ class PromptServer():
|
||||||
|
|
||||||
async for msg in ws:
|
async for msg in ws:
|
||||||
if msg.type == aiohttp.WSMsgType.ERROR:
|
if msg.type == aiohttp.WSMsgType.ERROR:
|
||||||
print('ws connection closed with exception %s' % ws.exception())
|
logging.warning('ws connection closed with exception %s' % ws.exception())
|
||||||
finally:
|
finally:
|
||||||
self.sockets.pop(sid, None)
|
self.sockets.pop(sid, None)
|
||||||
return ws
|
return ws
|
||||||
|
@ -446,7 +447,7 @@ class PromptServer():
|
||||||
|
|
||||||
@routes.post("/prompt")
|
@routes.post("/prompt")
|
||||||
async def post_prompt(request):
|
async def post_prompt(request):
|
||||||
print("got prompt")
|
logging.info("got prompt")
|
||||||
resp_code = 200
|
resp_code = 200
|
||||||
out_string = ""
|
out_string = ""
|
||||||
json_data = await request.json()
|
json_data = await request.json()
|
||||||
|
@ -478,7 +479,7 @@ class PromptServer():
|
||||||
response = {"prompt_id": prompt_id, "number": number, "node_errors": valid[3]}
|
response = {"prompt_id": prompt_id, "number": number, "node_errors": valid[3]}
|
||||||
return web.json_response(response)
|
return web.json_response(response)
|
||||||
else:
|
else:
|
||||||
print("invalid prompt:", valid[1])
|
logging.warning("invalid prompt: {}".format(valid[1]))
|
||||||
return web.json_response({"error": valid[1], "node_errors": valid[3]}, status=400)
|
return web.json_response({"error": valid[1], "node_errors": valid[3]}, status=400)
|
||||||
else:
|
else:
|
||||||
return web.json_response({"error": "no prompt", "node_errors": []}, status=400)
|
return web.json_response({"error": "no prompt", "node_errors": []}, status=400)
|
||||||
|
@ -626,8 +627,8 @@ class PromptServer():
|
||||||
await site.start()
|
await site.start()
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print("Starting server\n")
|
logging.info("Starting server\n")
|
||||||
print("To see the GUI go to: http://{}:{}".format(address, port))
|
logging.info("To see the GUI go to: http://{}:{}".format(address, port))
|
||||||
if call_on_start is not None:
|
if call_on_start is not None:
|
||||||
call_on_start(address, port)
|
call_on_start(address, port)
|
||||||
|
|
||||||
|
@ -639,7 +640,7 @@ class PromptServer():
|
||||||
try:
|
try:
|
||||||
json_data = handler(json_data)
|
json_data = handler(json_data)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"[ERROR] An error occurred during the on_prompt_handler processing")
|
logging.warning(f"[ERROR] An error occurred during the on_prompt_handler processing")
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
return json_data
|
return json_data
|
||||||
|
|
Loading…
Reference in New Issue