from comfy import sd1_clip import comfy.text_encoders.t5 import comfy.model_management from transformers import T5TokenizerFast import torch import os class T5XXLModel(sd1_clip.SDClipModel): def __init__(self, device="cpu", layer="last", layer_idx=None, dtype=None): textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_config_xxl.json") super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"end": 1, "pad": 0}, model_class=comfy.text_encoders.t5.T5) class T5XXLTokenizer(sd1_clip.SDTokenizer): def __init__(self, embedding_directory=None, tokenizer_data={}): tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "t5_tokenizer") super().__init__(tokenizer_path, pad_with_end=False, embedding_size=4096, embedding_key='t5xxl', tokenizer_class=T5TokenizerFast, has_start_token=False, pad_to_max_length=False, max_length=99999999, min_length=256) class FluxTokenizer: def __init__(self, embedding_directory=None, tokenizer_data={}): self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory) self.t5xxl = T5XXLTokenizer(embedding_directory=embedding_directory) def tokenize_with_weights(self, text:str, return_word_ids=False): out = {} out["l"] = self.clip_l.tokenize_with_weights(text, return_word_ids) out["t5xxl"] = self.t5xxl.tokenize_with_weights(text, return_word_ids) return out def untokenize(self, token_weight_pair): return self.clip_l.untokenize(token_weight_pair) def state_dict(self): return {} class FluxClipModel(torch.nn.Module): def __init__(self, dtype_t5=None, device="cpu", dtype=None): super().__init__() dtype_t5 = comfy.model_management.pick_weight_dtype(dtype_t5, dtype, device) self.clip_l = sd1_clip.SDClipModel(device=device, dtype=dtype, return_projected_pooled=False) self.t5xxl = T5XXLModel(device=device, dtype=dtype_t5) self.dtypes = set([dtype, dtype_t5]) def set_clip_options(self, options): self.clip_l.set_clip_options(options) self.t5xxl.set_clip_options(options) def reset_clip_options(self): self.clip_l.reset_clip_options() self.t5xxl.reset_clip_options() def encode_token_weights(self, token_weight_pairs): token_weight_pairs_l = token_weight_pairs["l"] token_weight_pars_t5 = token_weight_pairs["t5xxl"] t5_out, t5_pooled = self.t5xxl.encode_token_weights(token_weight_pars_t5) l_out, l_pooled = self.clip_l.encode_token_weights(token_weight_pairs_l) return t5_out, l_pooled def load_sd(self, sd): if "text_model.encoder.layers.1.mlp.fc1.weight" in sd: return self.clip_l.load_sd(sd) else: return self.t5xxl.load_sd(sd) def flux_clip(dtype_t5=None): class FluxClipModel_(FluxClipModel): def __init__(self, device="cpu", dtype=None): super().__init__(dtype_t5=dtype_t5, device=device, dtype=dtype) return FluxClipModel_