Pull some small changes from the other repo.

This commit is contained in:
comfyanonymous 2023-10-11 20:35:50 -04:00
parent ac7d8cfa87
commit 20d3852aa1
6 changed files with 22 additions and 13 deletions

View File

@ -354,6 +354,8 @@ def load_models_gpu(models, memory_required=0):
current_loaded_models.insert(0, current_loaded_models.pop(index)) current_loaded_models.insert(0, current_loaded_models.pop(index))
models_already_loaded.append(loaded_model) models_already_loaded.append(loaded_model)
else: else:
if hasattr(x, "model"):
print(f"Requested to load {x.model.__class__.__name__}")
models_to_load.append(loaded_model) models_to_load.append(loaded_model)
if len(models_to_load) == 0: if len(models_to_load) == 0:
@ -363,7 +365,7 @@ def load_models_gpu(models, memory_required=0):
free_memory(extra_mem, d, models_already_loaded) free_memory(extra_mem, d, models_already_loaded)
return return
print("loading new") print(f"Loading {len(models_to_load)} new model{'s' if len(models_to_load) > 1 else ''}")
total_memory_required = {} total_memory_required = {}
for loaded_model in models_to_load: for loaded_model in models_to_load:
@ -405,7 +407,6 @@ def load_model_gpu(model):
def cleanup_models(): def cleanup_models():
to_delete = [] to_delete = []
for i in range(len(current_loaded_models)): for i in range(len(current_loaded_models)):
print(sys.getrefcount(current_loaded_models[i].model))
if sys.getrefcount(current_loaded_models[i].model) <= 2: if sys.getrefcount(current_loaded_models[i].model) <= 2:
to_delete = [i] + to_delete to_delete = [i] + to_delete

View File

@ -408,6 +408,10 @@ def tiled_scale(samples, function, tile_x=64, tile_y=64, overlap = 8, upscale_am
output[b:b+1] = out/out_div output[b:b+1] = out/out_div
return output return output
PROGRESS_BAR_ENABLED = True
def set_progress_bar_enabled(enabled):
global PROGRESS_BAR_ENABLED
PROGRESS_BAR_ENABLED = enabled
PROGRESS_BAR_HOOK = None PROGRESS_BAR_HOOK = None
def set_progress_bar_global_hook(function): def set_progress_bar_global_hook(function):

View File

@ -3,6 +3,7 @@ import comfy.sample
from comfy.k_diffusion import sampling as k_diffusion_sampling from comfy.k_diffusion import sampling as k_diffusion_sampling
import latent_preview import latent_preview
import torch import torch
import comfy.utils
class BasicScheduler: class BasicScheduler:
@ -219,7 +220,7 @@ class SamplerCustom:
x0_output = {} x0_output = {}
callback = latent_preview.prepare_callback(model, sigmas.shape[-1] - 1, x0_output) callback = latent_preview.prepare_callback(model, sigmas.shape[-1] - 1, x0_output)
disable_pbar = False disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=noise_seed) samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=noise_seed)
out = latent.copy() out = latent.copy()

View File

@ -2,6 +2,7 @@ import os
import sys import sys
import copy import copy
import json import json
import logging
import threading import threading
import heapq import heapq
import traceback import traceback
@ -156,7 +157,7 @@ def recursive_execute(server, prompt, outputs, current_item, extra_data, execute
if server.client_id is not None: if server.client_id is not None:
server.send_sync("executed", { "node": unique_id, "output": output_ui, "prompt_id": prompt_id }, server.client_id) server.send_sync("executed", { "node": unique_id, "output": output_ui, "prompt_id": prompt_id }, server.client_id)
except comfy.model_management.InterruptProcessingException as iex: except comfy.model_management.InterruptProcessingException as iex:
print("Processing interrupted") logging.info("Processing interrupted")
# skip formatting inputs/outputs # skip formatting inputs/outputs
error_details = { error_details = {
@ -177,8 +178,8 @@ def recursive_execute(server, prompt, outputs, current_item, extra_data, execute
for node_id, node_outputs in outputs.items(): for node_id, node_outputs in outputs.items():
output_data_formatted[node_id] = [[format_value(x) for x in l] for l in node_outputs] output_data_formatted[node_id] = [[format_value(x) for x in l] for l in node_outputs]
print("!!! Exception during processing !!!") logging.error("!!! Exception during processing !!!")
print(traceback.format_exc()) logging.error(traceback.format_exc())
error_details = { error_details = {
"node_id": unique_id, "node_id": unique_id,
@ -636,11 +637,11 @@ def validate_prompt(prompt):
if valid is True: if valid is True:
good_outputs.add(o) good_outputs.add(o)
else: else:
print(f"Failed to validate prompt for output {o}:") logging.error(f"Failed to validate prompt for output {o}:")
if len(reasons) > 0: if len(reasons) > 0:
print("* (prompt):") logging.error("* (prompt):")
for reason in reasons: for reason in reasons:
print(f" - {reason['message']}: {reason['details']}") logging.error(f" - {reason['message']}: {reason['details']}")
errors += [(o, reasons)] errors += [(o, reasons)]
for node_id, result in validated.items(): for node_id, result in validated.items():
valid = result[0] valid = result[0]
@ -656,11 +657,11 @@ def validate_prompt(prompt):
"dependent_outputs": [], "dependent_outputs": [],
"class_type": class_type "class_type": class_type
} }
print(f"* {class_type} {node_id}:") logging.error(f"* {class_type} {node_id}:")
for reason in reasons: for reason in reasons:
print(f" - {reason['message']}: {reason['details']}") logging.error(f" - {reason['message']}: {reason['details']}")
node_errors[node_id]["dependent_outputs"].append(o) node_errors[node_id]["dependent_outputs"].append(o)
print("Output will be ignored") logging.error("Output will be ignored")
if len(good_outputs) == 0: if len(good_outputs) == 0:
errors_list = [] errors_list = []

View File

@ -29,6 +29,8 @@ folder_names_and_paths["custom_nodes"] = ([os.path.join(base_path, "custom_nodes
folder_names_and_paths["hypernetworks"] = ([os.path.join(models_dir, "hypernetworks")], supported_pt_extensions) folder_names_and_paths["hypernetworks"] = ([os.path.join(models_dir, "hypernetworks")], supported_pt_extensions)
folder_names_and_paths["classifiers"] = ([os.path.join(models_dir, "classifiers")], {""})
output_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output") output_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
temp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") temp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
input_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input") input_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")

View File

@ -1202,7 +1202,7 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
noise_mask = latent["noise_mask"] noise_mask = latent["noise_mask"]
callback = latent_preview.prepare_callback(model, steps) callback = latent_preview.prepare_callback(model, steps)
disable_pbar = False disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)