2024-04-04 04:48:42 +00:00
|
|
|
import torch
|
|
|
|
import comfy.model_management
|
|
|
|
import comfy.conds
|
2024-09-16 21:22:41 +00:00
|
|
|
import comfy.hooks
|
2024-09-19 12:43:58 +00:00
|
|
|
from typing import TYPE_CHECKING, Dict, List
|
|
|
|
if TYPE_CHECKING:
|
|
|
|
from comfy.model_patcher import ModelPatcher
|
|
|
|
from comfy.model_base import BaseModel
|
|
|
|
from comfy.controlnet import ControlBase
|
2024-04-04 04:48:42 +00:00
|
|
|
|
|
|
|
def prepare_mask(noise_mask, shape, device):
|
|
|
|
"""ensures noise mask is of proper dimensions"""
|
|
|
|
noise_mask = torch.nn.functional.interpolate(noise_mask.reshape((-1, 1, noise_mask.shape[-2], noise_mask.shape[-1])), size=(shape[2], shape[3]), mode="bilinear")
|
|
|
|
noise_mask = torch.cat([noise_mask] * shape[1], dim=1)
|
|
|
|
noise_mask = comfy.utils.repeat_to_batch_size(noise_mask, shape[0])
|
|
|
|
noise_mask = noise_mask.to(device)
|
|
|
|
return noise_mask
|
|
|
|
|
|
|
|
def get_models_from_cond(cond, model_type):
|
|
|
|
models = []
|
|
|
|
for c in cond:
|
|
|
|
if model_type in c:
|
2024-09-19 12:43:58 +00:00
|
|
|
if isinstance(c[model_type], list):
|
|
|
|
models += c[model_type]
|
|
|
|
else:
|
|
|
|
models += [c[model_type]]
|
2024-04-04 04:48:42 +00:00
|
|
|
return models
|
|
|
|
|
2024-09-19 12:43:58 +00:00
|
|
|
def get_hooks_from_cond(cond, filter_types: List[comfy.hooks.EnumHookType]=None):
|
|
|
|
hooks: Dict[comfy.hooks.Hook, None] = {}
|
|
|
|
for c in cond:
|
|
|
|
if 'hooks' in c:
|
|
|
|
for hook in c['hooks'].hooks:
|
|
|
|
hook: comfy.hooks.Hook
|
|
|
|
if not filter_types or hook.hook_type in filter_types:
|
|
|
|
hooks[hook] = None
|
|
|
|
return hooks
|
|
|
|
|
2024-04-04 04:48:42 +00:00
|
|
|
def convert_cond(cond):
|
|
|
|
out = []
|
|
|
|
for c in cond:
|
|
|
|
temp = c[1].copy()
|
|
|
|
model_conds = temp.get("model_conds", {})
|
|
|
|
if c[0] is not None:
|
|
|
|
model_conds["c_crossattn"] = comfy.conds.CONDCrossAttn(c[0]) #TODO: remove
|
|
|
|
temp["cross_attn"] = c[0]
|
|
|
|
temp["model_conds"] = model_conds
|
|
|
|
out.append(temp)
|
|
|
|
return out
|
|
|
|
|
|
|
|
def get_additional_models(conds, dtype):
|
|
|
|
"""loads additional models in conditioning"""
|
2024-09-19 12:43:58 +00:00
|
|
|
cnets: List[ControlBase] = []
|
2024-04-04 04:48:42 +00:00
|
|
|
gligen = []
|
2024-09-19 12:43:58 +00:00
|
|
|
add_models = []
|
|
|
|
hooks: Dict[comfy.hooks.AddModelHook, None] = {}
|
2024-04-04 04:48:42 +00:00
|
|
|
|
|
|
|
for k in conds:
|
|
|
|
cnets += get_models_from_cond(conds[k], "control")
|
|
|
|
gligen += get_models_from_cond(conds[k], "gligen")
|
2024-09-19 12:43:58 +00:00
|
|
|
add_models += get_models_from_cond(conds[k], "additional_models")
|
|
|
|
hooks.update(get_hooks_from_cond(conds[k], [comfy.hooks.EnumHookType.AddModel]))
|
2024-04-04 04:48:42 +00:00
|
|
|
|
|
|
|
control_nets = set(cnets)
|
|
|
|
|
|
|
|
inference_memory = 0
|
|
|
|
control_models = []
|
|
|
|
for m in control_nets:
|
|
|
|
control_models += m.get_models()
|
|
|
|
inference_memory += m.inference_memory_requirements(dtype)
|
|
|
|
|
|
|
|
gligen = [x[1] for x in gligen]
|
2024-09-19 12:43:58 +00:00
|
|
|
hook_models = [x.model for x in hooks]
|
|
|
|
models = control_models + gligen + add_models + hook_models
|
|
|
|
|
2024-04-04 04:48:42 +00:00
|
|
|
return models, inference_memory
|
|
|
|
|
|
|
|
def cleanup_additional_models(models):
|
|
|
|
"""cleanup additional models that were loaded"""
|
|
|
|
for m in models:
|
|
|
|
if hasattr(m, 'cleanup'):
|
|
|
|
m.cleanup()
|
|
|
|
|
|
|
|
|
2024-09-19 12:43:58 +00:00
|
|
|
def prepare_sampling(model: 'ModelPatcher', noise_shape, conds):
|
2024-04-04 04:48:42 +00:00
|
|
|
device = model.load_device
|
2024-09-19 12:43:58 +00:00
|
|
|
real_model: 'BaseModel' = None
|
2024-04-04 04:48:42 +00:00
|
|
|
models, inference_memory = get_additional_models(conds, model.model_dtype())
|
2024-09-19 12:43:58 +00:00
|
|
|
models += model.get_all_additional_models() # TODO: does this require inference_memory update?
|
2024-08-01 20:39:59 +00:00
|
|
|
memory_required = model.memory_required([noise_shape[0] * 2] + list(noise_shape[1:])) + inference_memory
|
|
|
|
minimum_memory_required = model.memory_required([noise_shape[0]] + list(noise_shape[1:])) + inference_memory
|
|
|
|
comfy.model_management.load_models_gpu([model] + models, memory_required=memory_required, minimum_memory_required=minimum_memory_required)
|
2024-04-04 04:48:42 +00:00
|
|
|
real_model = model.model
|
2024-09-13 12:35:35 +00:00
|
|
|
real_model.current_patcher = model
|
2024-04-04 04:48:42 +00:00
|
|
|
|
|
|
|
return real_model, conds, models
|
|
|
|
|
|
|
|
def cleanup_models(conds, models):
|
|
|
|
cleanup_additional_models(models)
|
|
|
|
|
|
|
|
control_cleanup = []
|
|
|
|
for k in conds:
|
|
|
|
control_cleanup += get_models_from_cond(conds[k], "control")
|
|
|
|
|
|
|
|
cleanup_additional_models(set(control_cleanup))
|
2024-09-16 21:22:41 +00:00
|
|
|
|
2024-09-19 12:43:58 +00:00
|
|
|
def prepare_model_patcher(model: 'ModelPatcher', conds):
|
2024-09-16 21:22:41 +00:00
|
|
|
# check for hooks in conds - if not registered, see if can be applied
|
|
|
|
hooks = {}
|
|
|
|
for k in conds:
|
2024-09-19 12:43:58 +00:00
|
|
|
hooks.update(get_hooks_from_cond(conds[k]))
|
2024-09-16 21:22:41 +00:00
|
|
|
model.register_all_hook_patches(hooks, comfy.hooks.EnumWeightTarget.Model)
|