Add a CLIPLoader node to load standalone clip weights.
Put them in models/clip
This commit is contained in:
parent
56d802e1f3
commit
1de5aa6a59
16
comfy/sd.py
16
comfy/sd.py
|
@ -274,9 +274,15 @@ class CLIP:
|
|||
n.tokenizer = self.tokenizer
|
||||
return n
|
||||
|
||||
def load_from_state_dict(self, sd):
|
||||
self.cond_stage_model.transformer.load_state_dict(sd, strict=False)
|
||||
|
||||
def add_patches(self, patches, strength=1.0):
|
||||
return self.patcher.add_patches(patches, strength)
|
||||
|
||||
def clip_layer(self, layer_idx):
|
||||
return self.cond_stage_model.clip_layer(layer_idx)
|
||||
|
||||
def encode(self, text):
|
||||
tokens = self.tokenizer.tokenize_with_weights(text)
|
||||
try:
|
||||
|
@ -317,6 +323,16 @@ class VAE:
|
|||
samples = samples.cpu()
|
||||
return samples
|
||||
|
||||
def load_clip(ckpt_path, embedding_directory=None):
|
||||
clip_data = load_torch_file(ckpt_path)
|
||||
config = {}
|
||||
if "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data:
|
||||
config['target'] = 'ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
|
||||
else:
|
||||
config['target'] = 'ldm.modules.encoders.modules.FrozenCLIPEmbedder'
|
||||
clip = CLIP(config=config, embedding_directory=embedding_directory)
|
||||
clip.load_from_state_dict(clip_data)
|
||||
return clip
|
||||
|
||||
def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=None):
|
||||
config = OmegaConf.load(config_path)
|
||||
|
|
|
@ -8,15 +8,26 @@ class SD2ClipModel(sd1_clip.SD1ClipModel):
|
|||
super().__init__(device=device, freeze=freeze, textmodel_json_config=textmodel_json_config)
|
||||
self.empty_tokens = [[49406] + [49407] + [0] * 75]
|
||||
if layer == "last":
|
||||
layer_idx = -1
|
||||
pass
|
||||
elif layer == "penultimate":
|
||||
layer_idx = -2
|
||||
layer_idx = -1
|
||||
self.clip_layer(layer_idx)
|
||||
elif self.layer == "hidden":
|
||||
assert layer_idx is not None
|
||||
assert abs(layer_idx) < 24
|
||||
self.clip_layer(layer_idx)
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
self.clip_layer(layer_idx)
|
||||
|
||||
def clip_layer(self, layer_idx):
|
||||
if layer_idx < 0:
|
||||
layer_idx -= 1 #The real last layer of SD2.x clip is the penultimate one. The last one might contain garbage.
|
||||
if abs(layer_idx) >= 24:
|
||||
self.layer = "hidden"
|
||||
self.layer_idx = -2
|
||||
else:
|
||||
self.layer = "hidden"
|
||||
self.layer_idx = layer_idx
|
||||
|
||||
class SD2Tokenizer(sd1_clip.SD1Tokenizer):
|
||||
def __init__(self, tokenizer_path=None):
|
||||
|
|
26
nodes.py
26
nodes.py
|
@ -17,7 +17,7 @@ import comfy.samplers
|
|||
import comfy.sd
|
||||
|
||||
supported_ckpt_extensions = ['.ckpt']
|
||||
supported_pt_extensions = ['.ckpt', '.pt']
|
||||
supported_pt_extensions = ['.ckpt', '.pt', '.bin']
|
||||
try:
|
||||
import safetensors.torch
|
||||
supported_ckpt_extensions += ['.safetensors']
|
||||
|
@ -114,6 +114,7 @@ class CheckpointLoader:
|
|||
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
|
||||
config_dir = os.path.join(models_dir, "configs")
|
||||
ckpt_dir = os.path.join(models_dir, "checkpoints")
|
||||
embedding_directory = os.path.join(models_dir, "embeddings")
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
|
@ -127,8 +128,7 @@ class CheckpointLoader:
|
|||
def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
|
||||
config_path = os.path.join(self.config_dir, config_name)
|
||||
ckpt_path = os.path.join(self.ckpt_dir, ckpt_name)
|
||||
embedding_directory = os.path.join(self.models_dir, "embeddings")
|
||||
return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=embedding_directory)
|
||||
return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=self.embedding_directory)
|
||||
|
||||
class LoraLoader:
|
||||
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
|
||||
|
@ -168,6 +168,25 @@ class VAELoader:
|
|||
vae = comfy.sd.VAE(ckpt_path=vae_path)
|
||||
return (vae,)
|
||||
|
||||
class CLIPLoader:
|
||||
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
|
||||
clip_dir = os.path.join(models_dir, "clip")
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "clip_name": (filter_files_extensions(os.listdir(s.clip_dir), supported_pt_extensions), ),
|
||||
"stop_at_clip_layer": ("INT", {"default": -1, "min": -24, "max": -1, "step": 1}),
|
||||
}}
|
||||
RETURN_TYPES = ("CLIP",)
|
||||
FUNCTION = "load_clip"
|
||||
|
||||
CATEGORY = "loaders"
|
||||
|
||||
def load_clip(self, clip_name, stop_at_clip_layer):
|
||||
clip_path = os.path.join(self.clip_dir, clip_name)
|
||||
clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=CheckpointLoader.embedding_directory)
|
||||
clip.clip_layer(stop_at_clip_layer)
|
||||
return (clip,)
|
||||
|
||||
class EmptyLatentImage:
|
||||
def __init__(self, device="cpu"):
|
||||
self.device = device
|
||||
|
@ -549,6 +568,7 @@ NODE_CLASS_MAPPINGS = {
|
|||
"LatentFlip": LatentFlip,
|
||||
"LatentCrop": LatentCrop,
|
||||
"LoraLoader": LoraLoader,
|
||||
"CLIPLoader": CLIPLoader,
|
||||
}
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue