Alternative fix for #5767

This commit is contained in:
comfyanonymous 2024-11-26 17:51:40 -05:00
parent 24dc581dc3
commit 497db6212f
1 changed files with 7 additions and 1 deletions

View File

@ -712,7 +712,13 @@ class Flux(BaseModel):
super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.flux.model.Flux) super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.flux.model.Flux)
def concat_cond(self, **kwargs): def concat_cond(self, **kwargs):
try:
#Handle Flux control loras dynamically changing the img_in weight.
num_channels = self.diffusion_model.img_in.weight.shape[1] // (self.diffusion_model.patch_size * self.diffusion_model.patch_size) num_channels = self.diffusion_model.img_in.weight.shape[1] // (self.diffusion_model.patch_size * self.diffusion_model.patch_size)
except:
#Some cases like tensorrt might not have the weights accessible
num_channels = self.model_config.unet_config["in_channels"]
out_channels = self.model_config.unet_config["out_channels"] out_channels = self.model_config.unet_config["out_channels"]
if num_channels <= out_channels: if num_channels <= out_channels: