Embeddings/textual inversion support for SD2.x
This commit is contained in:
parent
1de5aa6a59
commit
b1a7c9ebf6
|
@ -252,18 +252,15 @@ class CLIP:
|
|||
else:
|
||||
params = {}
|
||||
|
||||
tokenizer_params = {}
|
||||
|
||||
if self.target_clip == "ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder":
|
||||
clip = sd2_clip.SD2ClipModel
|
||||
tokenizer = sd2_clip.SD2Tokenizer
|
||||
elif self.target_clip == "ldm.modules.encoders.modules.FrozenCLIPEmbedder":
|
||||
clip = sd1_clip.SD1ClipModel
|
||||
tokenizer = sd1_clip.SD1Tokenizer
|
||||
tokenizer_params['embedding_directory'] = embedding_directory
|
||||
|
||||
self.cond_stage_model = clip(**(params))
|
||||
self.tokenizer = tokenizer(**(tokenizer_params))
|
||||
self.tokenizer = tokenizer(embedding_directory=embedding_directory)
|
||||
self.patcher = ModelPatcher(self.cond_stage_model)
|
||||
|
||||
def clone(self):
|
||||
|
|
|
@ -30,7 +30,5 @@ class SD2ClipModel(sd1_clip.SD1ClipModel):
|
|||
self.layer_idx = layer_idx
|
||||
|
||||
class SD2Tokenizer(sd1_clip.SD1Tokenizer):
|
||||
def __init__(self, tokenizer_path=None):
|
||||
super().__init__(tokenizer_path, pad_with_end=False)
|
||||
|
||||
|
||||
def __init__(self, tokenizer_path=None, embedding_directory=None):
|
||||
super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory)
|
||||
|
|
Loading…
Reference in New Issue