2023-01-03 06:53:32 +00:00
import torch
import os
import sys
import json
2023-01-23 02:42:22 +00:00
import hashlib
2023-02-17 16:19:49 +00:00
import traceback
2023-05-02 04:53:15 +00:00
import math
2023-05-13 15:54:45 +00:00
import time
2023-07-11 21:35:55 +00:00
import random
2024-03-11 04:56:41 +00:00
import logging
2023-01-03 06:53:32 +00:00
2024-05-07 09:41:06 +00:00
from PIL import Image , ImageOps , ImageSequence , ImageFile
2023-01-03 06:53:32 +00:00
from PIL . PngImagePlugin import PngInfo
2024-05-04 07:32:41 +00:00
2023-01-03 06:53:32 +00:00
import numpy as np
2023-05-18 03:04:40 +00:00
import safetensors . torch
2023-01-03 06:53:32 +00:00
2024-03-19 15:17:37 +00:00
sys . path . insert ( 0 , os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , " comfy " ) )
2023-05-28 05:52:09 +00:00
import comfy . diffusers_load
2023-01-03 06:53:32 +00:00
import comfy . samplers
2023-04-23 18:02:08 +00:00
import comfy . sample
2023-01-03 06:53:32 +00:00
import comfy . sd
2023-02-16 15:38:08 +00:00
import comfy . utils
2023-08-25 21:25:39 +00:00
import comfy . controlnet
2023-02-16 15:38:08 +00:00
2023-04-02 03:19:15 +00:00
import comfy . clip_vision
2023-03-05 23:39:25 +00:00
2023-04-15 22:55:17 +00:00
import comfy . model_management
2023-07-28 16:31:41 +00:00
from comfy . cli_args import args
2023-02-15 14:48:10 +00:00
import importlib
2023-01-03 06:53:32 +00:00
2023-03-17 21:57:57 +00:00
import folder_paths
2023-06-06 05:26:52 +00:00
import latent_preview
2024-04-07 18:27:40 +00:00
import node_helpers
2023-06-05 23:39:56 +00:00
2023-03-02 19:42:03 +00:00
def before_node_execution ( ) :
2023-04-15 22:55:17 +00:00
comfy . model_management . throw_exception_if_processing_interrupted ( )
2023-03-02 19:42:03 +00:00
2023-03-02 20:24:51 +00:00
def interrupt_processing ( value = True ) :
2023-04-15 22:55:17 +00:00
comfy . model_management . interrupt_current_processing ( value )
2023-03-02 19:42:03 +00:00
2024-03-26 08:00:53 +00:00
MAX_RESOLUTION = 16384
2023-03-22 16:22:48 +00:00
2023-01-03 06:53:32 +00:00
class CLIPTextEncode :
@classmethod
def INPUT_TYPES ( s ) :
2024-08-14 05:22:10 +00:00
return {
" required " : {
" text " : ( " STRING " , { " multiline " : True , " dynamicPrompts " : True , " tooltip " : " The text to be encoded. " } ) ,
" clip " : ( " CLIP " , { " tooltip " : " The CLIP model used for encoding the text. " } )
}
}
2023-01-03 06:53:32 +00:00
RETURN_TYPES = ( " CONDITIONING " , )
2024-08-14 05:22:10 +00:00
OUTPUT_TOOLTIPS = ( " A conditioning containing the embedded text used to guide the diffusion model. " , )
2023-01-03 06:53:32 +00:00
FUNCTION = " encode "
2023-01-26 17:23:15 +00:00
CATEGORY = " conditioning "
2024-08-14 05:22:10 +00:00
DESCRIPTION = " Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images. "
2023-01-26 17:23:15 +00:00
2023-01-03 06:53:32 +00:00
def encode ( self , clip , text ) :
2023-06-22 17:03:50 +00:00
tokens = clip . tokenize ( text )
2024-07-11 00:06:50 +00:00
output = clip . encode_from_tokens ( tokens , return_pooled = True , return_dict = True )
cond = output . pop ( " cond " )
return ( [ [ cond , output ] ] , )
2023-01-26 17:06:48 +00:00
class ConditioningCombine :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " conditioning_1 " : ( " CONDITIONING " , ) , " conditioning_2 " : ( " CONDITIONING " , ) } }
RETURN_TYPES = ( " CONDITIONING " , )
FUNCTION = " combine "
2023-01-26 17:23:15 +00:00
CATEGORY = " conditioning "
2023-01-26 17:06:48 +00:00
def combine ( self , conditioning_1 , conditioning_2 ) :
return ( conditioning_1 + conditioning_2 , )
2023-04-30 21:33:15 +00:00
class ConditioningAverage :
@classmethod
def INPUT_TYPES ( s ) :
2023-04-30 21:28:55 +00:00
return { " required " : { " conditioning_to " : ( " CONDITIONING " , ) , " conditioning_from " : ( " CONDITIONING " , ) ,
" conditioning_to_strength " : ( " FLOAT " , { " default " : 1.0 , " min " : 0.0 , " max " : 1.0 , " step " : 0.01 } )
2023-04-30 21:33:15 +00:00
} }
RETURN_TYPES = ( " CONDITIONING " , )
FUNCTION = " addWeighted "
CATEGORY = " conditioning "
2023-04-30 21:28:55 +00:00
def addWeighted ( self , conditioning_to , conditioning_from , conditioning_to_strength ) :
2023-04-30 21:33:15 +00:00
out = [ ]
2023-04-30 21:28:55 +00:00
if len ( conditioning_from ) > 1 :
2024-03-11 04:56:41 +00:00
logging . warning ( " Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to. " )
2023-04-30 21:28:55 +00:00
cond_from = conditioning_from [ 0 ] [ 0 ]
2023-07-04 01:44:37 +00:00
pooled_output_from = conditioning_from [ 0 ] [ 1 ] . get ( " pooled_output " , None )
2023-04-30 21:28:55 +00:00
for i in range ( len ( conditioning_to ) ) :
t1 = conditioning_to [ i ] [ 0 ]
2023-07-04 01:44:37 +00:00
pooled_output_to = conditioning_to [ i ] [ 1 ] . get ( " pooled_output " , pooled_output_from )
2023-04-30 21:28:55 +00:00
t0 = cond_from [ : , : t1 . shape [ 1 ] ]
if t0 . shape [ 1 ] < t1 . shape [ 1 ] :
t0 = torch . cat ( [ t0 ] + [ torch . zeros ( ( 1 , ( t1 . shape [ 1 ] - t0 . shape [ 1 ] ) , t1 . shape [ 2 ] ) ) ] , dim = 1 )
tw = torch . mul ( t1 , conditioning_to_strength ) + torch . mul ( t0 , ( 1.0 - conditioning_to_strength ) )
2023-07-04 01:44:37 +00:00
t_to = conditioning_to [ i ] [ 1 ] . copy ( )
if pooled_output_from is not None and pooled_output_to is not None :
t_to [ " pooled_output " ] = torch . mul ( pooled_output_to , conditioning_to_strength ) + torch . mul ( pooled_output_from , ( 1.0 - conditioning_to_strength ) )
elif pooled_output_from is not None :
t_to [ " pooled_output " ] = pooled_output_from
n = [ tw , t_to ]
2023-04-30 21:33:15 +00:00
out . append ( n )
return ( out , )
2023-07-05 21:40:22 +00:00
class ConditioningConcat :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : {
" conditioning_to " : ( " CONDITIONING " , ) ,
" conditioning_from " : ( " CONDITIONING " , ) ,
} }
RETURN_TYPES = ( " CONDITIONING " , )
FUNCTION = " concat "
2023-07-14 01:43:22 +00:00
CATEGORY = " conditioning "
2023-07-05 21:40:22 +00:00
def concat ( self , conditioning_to , conditioning_from ) :
out = [ ]
if len ( conditioning_from ) > 1 :
2024-03-11 04:56:41 +00:00
logging . warning ( " Warning: ConditioningConcat conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to. " )
2023-07-05 21:40:22 +00:00
cond_from = conditioning_from [ 0 ] [ 0 ]
for i in range ( len ( conditioning_to ) ) :
t1 = conditioning_to [ i ] [ 0 ]
tw = torch . cat ( ( t1 , cond_from ) , 1 )
n = [ tw , conditioning_to [ i ] [ 1 ] . copy ( ) ]
out . append ( n )
return ( out , )
2023-01-26 17:06:48 +00:00
class ConditioningSetArea :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " conditioning " : ( " CONDITIONING " , ) ,
2023-05-02 18:16:27 +00:00
" width " : ( " INT " , { " default " : 64 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" height " : ( " INT " , { " default " : 64 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" x " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" y " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
2023-01-26 17:06:48 +00:00
" strength " : ( " FLOAT " , { " default " : 1.0 , " min " : 0.0 , " max " : 10.0 , " step " : 0.01 } ) ,
} }
RETURN_TYPES = ( " CONDITIONING " , )
FUNCTION = " append "
2023-01-26 17:23:15 +00:00
CATEGORY = " conditioning "
2023-05-06 23:00:49 +00:00
def append ( self , conditioning , width , height , x , y , strength ) :
2024-04-07 18:27:40 +00:00
c = node_helpers . conditioning_set_values ( conditioning , { " area " : ( height / / 8 , width / / 8 , y / / 8 , x / / 8 ) ,
" strength " : strength ,
" set_area_to_bounds " : False } )
2023-01-26 17:06:48 +00:00
return ( c , )
2023-01-03 06:53:32 +00:00
2023-09-06 07:26:55 +00:00
class ConditioningSetAreaPercentage :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " conditioning " : ( " CONDITIONING " , ) ,
" width " : ( " FLOAT " , { " default " : 1.0 , " min " : 0 , " max " : 1.0 , " step " : 0.01 } ) ,
" height " : ( " FLOAT " , { " default " : 1.0 , " min " : 0 , " max " : 1.0 , " step " : 0.01 } ) ,
" x " : ( " FLOAT " , { " default " : 0 , " min " : 0 , " max " : 1.0 , " step " : 0.01 } ) ,
" y " : ( " FLOAT " , { " default " : 0 , " min " : 0 , " max " : 1.0 , " step " : 0.01 } ) ,
" strength " : ( " FLOAT " , { " default " : 1.0 , " min " : 0.0 , " max " : 10.0 , " step " : 0.01 } ) ,
} }
RETURN_TYPES = ( " CONDITIONING " , )
FUNCTION = " append "
CATEGORY = " conditioning "
def append ( self , conditioning , width , height , x , y , strength ) :
2024-04-07 18:27:40 +00:00
c = node_helpers . conditioning_set_values ( conditioning , { " area " : ( " percentage " , height , width , y , x ) ,
" strength " : strength ,
" set_area_to_bounds " : False } )
2023-09-06 07:26:55 +00:00
return ( c , )
2024-01-29 05:24:53 +00:00
class ConditioningSetAreaStrength :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " conditioning " : ( " CONDITIONING " , ) ,
" strength " : ( " FLOAT " , { " default " : 1.0 , " min " : 0.0 , " max " : 10.0 , " step " : 0.01 } ) ,
} }
RETURN_TYPES = ( " CONDITIONING " , )
FUNCTION = " append "
CATEGORY = " conditioning "
def append ( self , conditioning , strength ) :
2024-04-07 18:27:40 +00:00
c = node_helpers . conditioning_set_values ( conditioning , { " strength " : strength } )
2024-01-29 05:24:53 +00:00
return ( c , )
2023-04-25 07:15:25 +00:00
class ConditioningSetMask :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " conditioning " : ( " CONDITIONING " , ) ,
" mask " : ( " MASK " , ) ,
" strength " : ( " FLOAT " , { " default " : 1.0 , " min " : 0.0 , " max " : 10.0 , " step " : 0.01 } ) ,
2023-04-30 00:19:14 +00:00
" set_cond_area " : ( [ " default " , " mask bounds " ] , ) ,
2023-04-25 07:15:25 +00:00
} }
RETURN_TYPES = ( " CONDITIONING " , )
FUNCTION = " append "
CATEGORY = " conditioning "
2023-04-30 00:19:14 +00:00
def append ( self , conditioning , mask , set_cond_area , strength ) :
set_area_to_bounds = False
if set_cond_area != " default " :
set_area_to_bounds = True
2023-04-25 07:15:25 +00:00
if len ( mask . shape ) < 3 :
mask = mask . unsqueeze ( 0 )
2024-04-07 18:27:40 +00:00
c = node_helpers . conditioning_set_values ( conditioning , { " mask " : mask ,
" set_area_to_bounds " : set_area_to_bounds ,
" mask_strength " : strength } )
2023-04-25 07:15:25 +00:00
return ( c , )
2023-06-28 03:30:52 +00:00
class ConditioningZeroOut :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " conditioning " : ( " CONDITIONING " , ) } }
RETURN_TYPES = ( " CONDITIONING " , )
FUNCTION = " zero_out "
CATEGORY = " advanced/conditioning "
def zero_out ( self , conditioning ) :
c = [ ]
for t in conditioning :
d = t [ 1 ] . copy ( )
2024-07-09 15:52:31 +00:00
pooled_output = d . get ( " pooled_output " , None )
if pooled_output is not None :
d [ " pooled_output " ] = torch . zeros_like ( pooled_output )
2023-06-28 03:30:52 +00:00
n = [ torch . zeros_like ( t [ 0 ] ) , d ]
c . append ( n )
return ( c , )
2023-07-24 13:25:02 +00:00
class ConditioningSetTimestepRange :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " conditioning " : ( " CONDITIONING " , ) ,
2023-07-24 22:29:00 +00:00
" start " : ( " FLOAT " , { " default " : 0.0 , " min " : 0.0 , " max " : 1.0 , " step " : 0.001 } ) ,
" end " : ( " FLOAT " , { " default " : 1.0 , " min " : 0.0 , " max " : 1.0 , " step " : 0.001 } )
2023-07-24 13:25:02 +00:00
} }
RETURN_TYPES = ( " CONDITIONING " , )
FUNCTION = " set_range "
CATEGORY = " advanced/conditioning "
def set_range ( self , conditioning , start , end ) :
2024-04-07 18:40:43 +00:00
c = node_helpers . conditioning_set_values ( conditioning , { " start_percent " : start ,
" end_percent " : end } )
2023-07-24 13:25:02 +00:00
return ( c , )
2023-01-03 06:53:32 +00:00
class VAEDecode :
@classmethod
def INPUT_TYPES ( s ) :
2024-08-14 05:22:10 +00:00
return {
" required " : {
" samples " : ( " LATENT " , { " tooltip " : " The latent to be decoded. " } ) ,
" vae " : ( " VAE " , { " tooltip " : " The VAE model used for decoding the latent. " } )
}
}
2023-01-03 06:53:32 +00:00
RETURN_TYPES = ( " IMAGE " , )
2024-08-14 05:22:10 +00:00
OUTPUT_TOOLTIPS = ( " The decoded image. " , )
2023-01-03 06:53:32 +00:00
FUNCTION = " decode "
2023-01-26 17:23:15 +00:00
CATEGORY = " latent "
2024-08-14 05:22:10 +00:00
DESCRIPTION = " Decodes latent images back into pixel space images. "
2023-01-26 17:23:15 +00:00
2023-01-03 06:53:32 +00:00
def decode ( self , vae , samples ) :
2023-02-15 21:58:55 +00:00
return ( vae . decode ( samples [ " samples " ] ) , )
2023-01-03 06:53:32 +00:00
2023-02-24 07:10:10 +00:00
class VAEDecodeTiled :
@classmethod
def INPUT_TYPES ( s ) :
2023-08-28 14:27:22 +00:00
return { " required " : { " samples " : ( " LATENT " , ) , " vae " : ( " VAE " , ) ,
2023-08-31 18:26:16 +00:00
" tile_size " : ( " INT " , { " default " : 512 , " min " : 320 , " max " : 4096 , " step " : 64 } )
2023-08-28 14:27:22 +00:00
} }
2023-02-24 07:10:10 +00:00
RETURN_TYPES = ( " IMAGE " , )
FUNCTION = " decode "
CATEGORY = " _for_testing "
2023-08-28 14:27:22 +00:00
def decode ( self , vae , samples , tile_size ) :
2023-08-29 05:51:35 +00:00
return ( vae . decode_tiled ( samples [ " samples " ] , tile_x = tile_size / / 8 , tile_y = tile_size / / 8 , ) , )
2023-02-24 07:10:10 +00:00
2023-01-03 06:53:32 +00:00
class VAEEncode :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " pixels " : ( " IMAGE " , ) , " vae " : ( " VAE " , ) } }
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " encode "
2023-01-26 17:23:15 +00:00
CATEGORY = " latent "
2023-05-02 18:16:27 +00:00
def encode ( self , vae , pixels ) :
t = vae . encode ( pixels [ : , : , : , : 3 ] )
2023-02-15 21:58:55 +00:00
return ( { " samples " : t } , )
2023-01-03 06:53:32 +00:00
2023-03-11 20:28:15 +00:00
class VAEEncodeTiled :
@classmethod
def INPUT_TYPES ( s ) :
2023-08-28 14:27:22 +00:00
return { " required " : { " pixels " : ( " IMAGE " , ) , " vae " : ( " VAE " , ) ,
2023-08-29 05:51:35 +00:00
" tile_size " : ( " INT " , { " default " : 512 , " min " : 320 , " max " : 4096 , " step " : 64 } )
2023-08-28 14:27:22 +00:00
} }
2023-03-11 20:28:15 +00:00
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " encode "
CATEGORY = " _for_testing "
2023-08-28 14:27:22 +00:00
def encode ( self , vae , pixels , tile_size ) :
t = vae . encode_tiled ( pixels [ : , : , : , : 3 ] , tile_x = tile_size , tile_y = tile_size , )
2023-03-11 20:28:15 +00:00
return ( { " samples " : t } , )
2023-05-02 18:16:27 +00:00
2023-02-16 01:44:51 +00:00
class VAEEncodeForInpaint :
@classmethod
def INPUT_TYPES ( s ) :
2023-05-02 04:53:15 +00:00
return { " required " : { " pixels " : ( " IMAGE " , ) , " vae " : ( " VAE " , ) , " mask " : ( " MASK " , ) , " grow_mask_by " : ( " INT " , { " default " : 6 , " min " : 0 , " max " : 64 , " step " : 1 } ) , } }
2023-02-16 01:44:51 +00:00
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " encode "
CATEGORY = " latent/inpaint "
2023-05-02 04:53:15 +00:00
def encode ( self , vae , pixels , mask , grow_mask_by = 6 ) :
2024-02-19 09:06:49 +00:00
x = ( pixels . shape [ 1 ] / / vae . downscale_ratio ) * vae . downscale_ratio
y = ( pixels . shape [ 2 ] / / vae . downscale_ratio ) * vae . downscale_ratio
2023-04-25 05:12:40 +00:00
mask = torch . nn . functional . interpolate ( mask . reshape ( ( - 1 , 1 , mask . shape [ - 2 ] , mask . shape [ - 1 ] ) ) , size = ( pixels . shape [ 1 ] , pixels . shape [ 2 ] ) , mode = " bilinear " )
2023-02-27 17:02:23 +00:00
2023-03-16 21:10:08 +00:00
pixels = pixels . clone ( )
2023-02-16 01:44:51 +00:00
if pixels . shape [ 1 ] != x or pixels . shape [ 2 ] != y :
2024-02-19 09:06:49 +00:00
x_offset = ( pixels . shape [ 1 ] % vae . downscale_ratio ) / / 2
y_offset = ( pixels . shape [ 2 ] % vae . downscale_ratio ) / / 2
2023-05-02 18:16:27 +00:00
pixels = pixels [ : , x_offset : x + x_offset , y_offset : y + y_offset , : ]
mask = mask [ : , : , x_offset : x + x_offset , y_offset : y + y_offset ]
2023-02-16 01:44:51 +00:00
2023-02-27 17:02:23 +00:00
#grow mask by a few pixels to keep things seamless in latent space
2023-05-02 04:53:15 +00:00
if grow_mask_by == 0 :
mask_erosion = mask
else :
kernel_tensor = torch . ones ( ( 1 , 1 , grow_mask_by , grow_mask_by ) )
padding = math . ceil ( ( grow_mask_by - 1 ) / 2 )
mask_erosion = torch . clamp ( torch . nn . functional . conv2d ( mask . round ( ) , kernel_tensor , padding = padding ) , 0 , 1 )
2023-04-22 23:02:26 +00:00
m = ( 1.0 - mask . round ( ) ) . squeeze ( 1 )
2023-02-16 01:44:51 +00:00
for i in range ( 3 ) :
pixels [ : , : , : , i ] - = 0.5
2023-02-27 17:02:23 +00:00
pixels [ : , : , : , i ] * = m
2023-02-16 01:44:51 +00:00
pixels [ : , : , : , i ] + = 0.5
t = vae . encode ( pixels )
2023-04-25 05:12:40 +00:00
return ( { " samples " : t , " noise_mask " : ( mask_erosion [ : , : , : x , : y ] . round ( ) ) } , )
2023-01-03 06:53:32 +00:00
2024-01-11 08:15:27 +00:00
class InpaintModelConditioning :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " positive " : ( " CONDITIONING " , ) ,
" negative " : ( " CONDITIONING " , ) ,
" vae " : ( " VAE " , ) ,
" pixels " : ( " IMAGE " , ) ,
" mask " : ( " MASK " , ) ,
} }
RETURN_TYPES = ( " CONDITIONING " , " CONDITIONING " , " LATENT " )
RETURN_NAMES = ( " positive " , " negative " , " latent " )
FUNCTION = " encode "
CATEGORY = " conditioning/inpaint "
def encode ( self , positive , negative , pixels , vae , mask ) :
x = ( pixels . shape [ 1 ] / / 8 ) * 8
y = ( pixels . shape [ 2 ] / / 8 ) * 8
mask = torch . nn . functional . interpolate ( mask . reshape ( ( - 1 , 1 , mask . shape [ - 2 ] , mask . shape [ - 1 ] ) ) , size = ( pixels . shape [ 1 ] , pixels . shape [ 2 ] ) , mode = " bilinear " )
orig_pixels = pixels
pixels = orig_pixels . clone ( )
if pixels . shape [ 1 ] != x or pixels . shape [ 2 ] != y :
x_offset = ( pixels . shape [ 1 ] % 8 ) / / 2
y_offset = ( pixels . shape [ 2 ] % 8 ) / / 2
pixels = pixels [ : , x_offset : x + x_offset , y_offset : y + y_offset , : ]
mask = mask [ : , : , x_offset : x + x_offset , y_offset : y + y_offset ]
m = ( 1.0 - mask . round ( ) ) . squeeze ( 1 )
for i in range ( 3 ) :
pixels [ : , : , : , i ] - = 0.5
pixels [ : , : , : , i ] * = m
pixels [ : , : , : , i ] + = 0.5
concat_latent = vae . encode ( pixels )
orig_latent = vae . encode ( orig_pixels )
out_latent = { }
out_latent [ " samples " ] = orig_latent
out_latent [ " noise_mask " ] = mask
out = [ ]
for conditioning in [ positive , negative ] :
2024-04-07 18:40:43 +00:00
c = node_helpers . conditioning_set_values ( conditioning , { " concat_latent_image " : concat_latent ,
" concat_mask " : mask } )
2024-01-11 08:15:27 +00:00
out . append ( c )
return ( out [ 0 ] , out [ 1 ] , out_latent )
2023-05-18 03:40:28 +00:00
class SaveLatent :
def __init__ ( self ) :
2023-05-18 03:43:59 +00:00
self . output_dir = folder_paths . get_output_directory ( )
2023-05-18 03:40:28 +00:00
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " samples " : ( " LATENT " , ) ,
2023-05-18 03:43:59 +00:00
" filename_prefix " : ( " STRING " , { " default " : " latents/ComfyUI " } ) } ,
2023-05-18 03:40:28 +00:00
" hidden " : { " prompt " : " PROMPT " , " extra_pnginfo " : " EXTRA_PNGINFO " } ,
}
RETURN_TYPES = ( )
FUNCTION = " save "
OUTPUT_NODE = True
CATEGORY = " _for_testing "
def save ( self , samples , filename_prefix = " ComfyUI " , prompt = None , extra_pnginfo = None ) :
2023-05-18 03:43:59 +00:00
full_output_folder , filename , counter , subfolder , filename_prefix = folder_paths . get_save_image_path ( filename_prefix , self . output_dir )
2023-05-18 03:40:28 +00:00
# support save metadata for latent sharing
prompt_info = " "
if prompt is not None :
prompt_info = json . dumps ( prompt )
2023-07-28 16:31:41 +00:00
metadata = None
if not args . disable_metadata :
metadata = { " prompt " : prompt_info }
if extra_pnginfo is not None :
for x in extra_pnginfo :
metadata [ x ] = json . dumps ( extra_pnginfo [ x ] )
2023-05-18 03:40:28 +00:00
file = f " { filename } _ { counter : 05 } _.latent "
2023-07-30 21:36:55 +00:00
results = list ( )
results . append ( {
" filename " : file ,
" subfolder " : subfolder ,
" type " : " output "
} )
2023-05-18 03:40:28 +00:00
file = os . path . join ( full_output_folder , file )
2023-05-18 03:04:40 +00:00
output = { }
output [ " latent_tensor " ] = samples [ " samples " ]
2023-06-23 06:14:12 +00:00
output [ " latent_format_version_0 " ] = torch . tensor ( [ ] )
2023-05-18 03:04:40 +00:00
2023-06-26 16:21:07 +00:00
comfy . utils . save_torch_file ( output , file , metadata = metadata )
2023-07-30 21:36:55 +00:00
return { " ui " : { " latents " : results } }
2023-05-18 03:40:28 +00:00
class LoadLatent :
@classmethod
def INPUT_TYPES ( s ) :
2023-05-18 03:43:59 +00:00
input_dir = folder_paths . get_input_directory ( )
files = [ f for f in os . listdir ( input_dir ) if os . path . isfile ( os . path . join ( input_dir , f ) ) and f . endswith ( " .latent " ) ]
2023-05-18 03:40:28 +00:00
return { " required " : { " latent " : [ sorted ( files ) , ] } , }
CATEGORY = " _for_testing "
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " load "
def load ( self , latent ) :
2023-05-18 03:43:59 +00:00
latent_path = folder_paths . get_annotated_filepath ( latent )
latent = safetensors . torch . load_file ( latent_path , device = " cpu " )
2023-06-23 06:14:12 +00:00
multiplier = 1.0
if " latent_format_version_0 " not in latent :
multiplier = 1.0 / 0.18215
samples = { " samples " : latent [ " latent_tensor " ] . float ( ) * multiplier }
2023-05-18 03:04:40 +00:00
return ( samples , )
2023-05-18 03:40:28 +00:00
2023-05-18 03:43:59 +00:00
@classmethod
def IS_CHANGED ( s , latent ) :
image_path = folder_paths . get_annotated_filepath ( latent )
m = hashlib . sha256 ( )
with open ( image_path , ' rb ' ) as f :
m . update ( f . read ( ) )
return m . digest ( ) . hex ( )
@classmethod
def VALIDATE_INPUTS ( s , latent ) :
if not folder_paths . exists_annotated_filepath ( latent ) :
return " Invalid latent file: {} " . format ( latent )
return True
2023-05-18 03:40:28 +00:00
2023-01-03 06:53:32 +00:00
class CheckpointLoader :
@classmethod
def INPUT_TYPES ( s ) :
2023-03-17 21:57:57 +00:00
return { " required " : { " config_name " : ( folder_paths . get_filename_list ( " configs " ) , ) ,
" ckpt_name " : ( folder_paths . get_filename_list ( " checkpoints " ) , ) } }
2023-01-03 06:53:32 +00:00
RETURN_TYPES = ( " MODEL " , " CLIP " , " VAE " )
FUNCTION = " load_checkpoint "
2023-04-05 02:48:11 +00:00
CATEGORY = " advanced/loaders "
2023-01-26 17:23:15 +00:00
2024-06-06 18:49:45 +00:00
def load_checkpoint ( self , config_name , ckpt_name ) :
2023-03-17 21:57:57 +00:00
config_path = folder_paths . get_full_path ( " configs " , config_name )
ckpt_path = folder_paths . get_full_path ( " checkpoints " , ckpt_name )
2023-03-18 07:08:43 +00:00
return comfy . sd . load_checkpoint ( config_path , ckpt_path , output_vae = True , output_clip = True , embedding_directory = folder_paths . get_folder_paths ( " embeddings " ) )
2023-01-03 06:53:32 +00:00
2023-03-03 08:37:35 +00:00
class CheckpointLoaderSimple :
@classmethod
def INPUT_TYPES ( s ) :
2024-08-14 05:22:10 +00:00
return {
" required " : {
" ckpt_name " : ( folder_paths . get_filename_list ( " checkpoints " ) , { " tooltip " : " The name of the checkpoint (model) to load. " } ) ,
}
}
2023-03-03 08:37:35 +00:00
RETURN_TYPES = ( " MODEL " , " CLIP " , " VAE " )
2024-08-14 05:22:10 +00:00
OUTPUT_TOOLTIPS = ( " The model used for denoising latents. " ,
" The CLIP model used for encoding text prompts. " ,
" The VAE model used for encoding and decoding images to and from latent space. " )
2023-03-03 08:37:35 +00:00
FUNCTION = " load_checkpoint "
2023-03-03 18:09:44 +00:00
CATEGORY = " loaders "
2024-08-14 05:22:10 +00:00
DESCRIPTION = " Loads a diffusion model checkpoint, diffusion models are used to denoise latents. "
2023-03-03 08:37:35 +00:00
2024-06-06 18:49:45 +00:00
def load_checkpoint ( self , ckpt_name ) :
2023-03-17 21:57:57 +00:00
ckpt_path = folder_paths . get_full_path ( " checkpoints " , ckpt_name )
2023-03-18 07:08:43 +00:00
out = comfy . sd . load_checkpoint_guess_config ( ckpt_path , output_vae = True , output_clip = True , embedding_directory = folder_paths . get_folder_paths ( " embeddings " ) )
2023-09-02 07:34:57 +00:00
return out [ : 3 ]
2023-03-03 08:37:35 +00:00
2023-04-06 06:57:31 +00:00
class DiffusersLoader :
@classmethod
def INPUT_TYPES ( cls ) :
2023-04-06 07:24:52 +00:00
paths = [ ]
2023-04-07 05:02:26 +00:00
for search_path in folder_paths . get_folder_paths ( " diffusers " ) :
2023-04-07 04:48:58 +00:00
if os . path . exists ( search_path ) :
2023-05-15 07:25:24 +00:00
for root , subdir , files in os . walk ( search_path , followlinks = True ) :
if " model_index.json " in files :
paths . append ( os . path . relpath ( root , start = search_path ) )
2023-04-06 07:24:52 +00:00
return { " required " : { " model_path " : ( paths , ) , } }
2023-04-06 06:57:31 +00:00
RETURN_TYPES = ( " MODEL " , " CLIP " , " VAE " )
FUNCTION = " load_checkpoint "
2023-07-05 21:34:45 +00:00
CATEGORY = " advanced/loaders/deprecated "
2023-04-06 06:57:31 +00:00
def load_checkpoint ( self , model_path , output_vae = True , output_clip = True ) :
2023-04-07 05:02:26 +00:00
for search_path in folder_paths . get_folder_paths ( " diffusers " ) :
if os . path . exists ( search_path ) :
2023-05-15 07:25:24 +00:00
path = os . path . join ( search_path , model_path )
if os . path . exists ( path ) :
model_path = path
2023-04-07 05:02:26 +00:00
break
2023-04-07 05:28:15 +00:00
2023-08-30 16:55:07 +00:00
return comfy . diffusers_load . load_diffusers ( model_path , output_vae = output_vae , output_clip = output_clip , embedding_directory = folder_paths . get_folder_paths ( " embeddings " ) )
2023-04-06 06:57:31 +00:00
2023-04-02 03:19:15 +00:00
class unCLIPCheckpointLoader :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " ckpt_name " : ( folder_paths . get_filename_list ( " checkpoints " ) , ) ,
} }
RETURN_TYPES = ( " MODEL " , " CLIP " , " VAE " , " CLIP_VISION " )
FUNCTION = " load_checkpoint "
2023-04-05 02:48:11 +00:00
CATEGORY = " loaders "
2023-04-02 03:19:15 +00:00
def load_checkpoint ( self , ckpt_name , output_vae = True , output_clip = True ) :
ckpt_path = folder_paths . get_full_path ( " checkpoints " , ckpt_name )
out = comfy . sd . load_checkpoint_guess_config ( ckpt_path , output_vae = True , output_clip = True , output_clipvision = True , embedding_directory = folder_paths . get_folder_paths ( " embeddings " ) )
return out
2023-03-03 18:04:36 +00:00
class CLIPSetLastLayer :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " clip " : ( " CLIP " , ) ,
" stop_at_clip_layer " : ( " INT " , { " default " : - 1 , " min " : - 24 , " max " : - 1 , " step " : 1 } ) ,
} }
RETURN_TYPES = ( " CLIP " , )
FUNCTION = " set_last_layer "
CATEGORY = " conditioning "
def set_last_layer ( self , clip , stop_at_clip_layer ) :
clip = clip . clone ( )
clip . clip_layer ( stop_at_clip_layer )
return ( clip , )
2023-02-03 07:06:34 +00:00
class LoraLoader :
2023-06-30 03:40:02 +00:00
def __init__ ( self ) :
self . loaded_lora = None
2023-02-03 07:06:34 +00:00
@classmethod
def INPUT_TYPES ( s ) :
2024-08-14 05:22:10 +00:00
return {
" required " : {
" model " : ( " MODEL " , { " tooltip " : " The diffusion model the LoRA will be applied to. " } ) ,
" clip " : ( " CLIP " , { " tooltip " : " The CLIP model the LoRA will be applied to. " } ) ,
" lora_name " : ( folder_paths . get_filename_list ( " loras " ) , { " tooltip " : " The name of the LoRA. " } ) ,
" strength_model " : ( " FLOAT " , { " default " : 1.0 , " min " : - 100.0 , " max " : 100.0 , " step " : 0.01 , " tooltip " : " How strongly to modify the diffusion model. This value can be negative. " } ) ,
" strength_clip " : ( " FLOAT " , { " default " : 1.0 , " min " : - 100.0 , " max " : 100.0 , " step " : 0.01 , " tooltip " : " How strongly to modify the CLIP model. This value can be negative. " } ) ,
}
}
2023-02-03 07:06:34 +00:00
RETURN_TYPES = ( " MODEL " , " CLIP " )
2024-08-14 05:22:10 +00:00
OUTPUT_TOOLTIPS = ( " The modified diffusion model. " , " The modified CLIP model. " )
2023-02-03 07:06:34 +00:00
FUNCTION = " load_lora "
CATEGORY = " loaders "
2024-08-14 05:22:10 +00:00
DESCRIPTION = " LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together. "
2023-02-03 07:06:34 +00:00
def load_lora ( self , model , clip , lora_name , strength_model , strength_clip ) :
2023-05-27 00:33:30 +00:00
if strength_model == 0 and strength_clip == 0 :
return ( model , clip )
2023-03-17 21:57:57 +00:00
lora_path = folder_paths . get_full_path ( " loras " , lora_name )
2023-06-30 03:40:02 +00:00
lora = None
if self . loaded_lora is not None :
if self . loaded_lora [ 0 ] == lora_path :
lora = self . loaded_lora [ 1 ]
else :
2023-07-16 00:11:12 +00:00
temp = self . loaded_lora
self . loaded_lora = None
del temp
2023-06-30 03:40:02 +00:00
if lora is None :
lora = comfy . utils . load_torch_file ( lora_path , safe_load = True )
self . loaded_lora = ( lora_path , lora )
model_lora , clip_lora = comfy . sd . load_lora_for_models ( model , clip , lora , strength_model , strength_clip )
2023-02-03 07:06:34 +00:00
return ( model_lora , clip_lora )
2023-11-25 07:26:50 +00:00
class LoraLoaderModelOnly ( LoraLoader ) :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " model " : ( " MODEL " , ) ,
" lora_name " : ( folder_paths . get_filename_list ( " loras " ) , ) ,
2024-04-23 17:07:39 +00:00
" strength_model " : ( " FLOAT " , { " default " : 1.0 , " min " : - 100.0 , " max " : 100.0 , " step " : 0.01 } ) ,
2023-11-25 07:26:50 +00:00
} }
RETURN_TYPES = ( " MODEL " , )
FUNCTION = " load_lora_model_only "
def load_lora_model_only ( self , model , lora_name , strength_model ) :
return ( self . load_lora ( model , None , lora_name , strength_model , 0 ) [ 0 ] , )
2023-01-03 06:53:32 +00:00
class VAELoader :
2023-11-21 17:54:19 +00:00
@staticmethod
def vae_list ( ) :
vaes = folder_paths . get_filename_list ( " vae " )
approx_vaes = folder_paths . get_filename_list ( " vae_approx " )
sdxl_taesd_enc = False
sdxl_taesd_dec = False
sd1_taesd_enc = False
sd1_taesd_dec = False
2024-06-16 06:03:53 +00:00
sd3_taesd_enc = False
sd3_taesd_dec = False
2023-11-21 17:54:19 +00:00
for v in approx_vaes :
if v . startswith ( " taesd_decoder. " ) :
sd1_taesd_dec = True
elif v . startswith ( " taesd_encoder. " ) :
sd1_taesd_enc = True
elif v . startswith ( " taesdxl_decoder. " ) :
sdxl_taesd_dec = True
elif v . startswith ( " taesdxl_encoder. " ) :
sdxl_taesd_enc = True
2024-06-16 06:03:53 +00:00
elif v . startswith ( " taesd3_decoder. " ) :
sd3_taesd_dec = True
elif v . startswith ( " taesd3_encoder. " ) :
sd3_taesd_enc = True
2023-11-21 17:54:19 +00:00
if sd1_taesd_dec and sd1_taesd_enc :
vaes . append ( " taesd " )
if sdxl_taesd_dec and sdxl_taesd_enc :
vaes . append ( " taesdxl " )
2024-06-16 06:03:53 +00:00
if sd3_taesd_dec and sd3_taesd_enc :
vaes . append ( " taesd3 " )
2023-11-21 17:54:19 +00:00
return vaes
@staticmethod
def load_taesd ( name ) :
sd = { }
approx_vaes = folder_paths . get_filename_list ( " vae_approx " )
encoder = next ( filter ( lambda a : a . startswith ( " {} _encoder. " . format ( name ) ) , approx_vaes ) )
decoder = next ( filter ( lambda a : a . startswith ( " {} _decoder. " . format ( name ) ) , approx_vaes ) )
enc = comfy . utils . load_torch_file ( folder_paths . get_full_path ( " vae_approx " , encoder ) )
for k in enc :
sd [ " taesd_encoder. {} " . format ( k ) ] = enc [ k ]
dec = comfy . utils . load_torch_file ( folder_paths . get_full_path ( " vae_approx " , decoder ) )
for k in dec :
sd [ " taesd_decoder. {} " . format ( k ) ] = dec [ k ]
if name == " taesd " :
sd [ " vae_scale " ] = torch . tensor ( 0.18215 )
2024-06-16 06:04:24 +00:00
sd [ " vae_shift " ] = torch . tensor ( 0.0 )
2023-11-21 17:54:19 +00:00
elif name == " taesdxl " :
sd [ " vae_scale " ] = torch . tensor ( 0.13025 )
2024-06-16 06:04:24 +00:00
sd [ " vae_shift " ] = torch . tensor ( 0.0 )
2024-06-16 06:03:53 +00:00
elif name == " taesd3 " :
sd [ " vae_scale " ] = torch . tensor ( 1.5305 )
2024-06-16 06:04:24 +00:00
sd [ " vae_shift " ] = torch . tensor ( 0.0609 )
2023-11-21 17:54:19 +00:00
return sd
2023-01-03 06:53:32 +00:00
@classmethod
def INPUT_TYPES ( s ) :
2023-11-21 17:54:19 +00:00
return { " required " : { " vae_name " : ( s . vae_list ( ) , ) } }
2023-01-03 06:53:32 +00:00
RETURN_TYPES = ( " VAE " , )
FUNCTION = " load_vae "
2023-01-26 17:23:15 +00:00
CATEGORY = " loaders "
2023-01-03 06:53:32 +00:00
#TODO: scale factor?
def load_vae ( self , vae_name ) :
2024-06-16 06:03:53 +00:00
if vae_name in [ " taesd " , " taesdxl " , " taesd3 " ] :
2023-11-21 17:54:19 +00:00
sd = self . load_taesd ( vae_name )
else :
vae_path = folder_paths . get_full_path ( " vae " , vae_name )
sd = comfy . utils . load_torch_file ( vae_path )
2024-06-16 06:04:24 +00:00
vae = comfy . sd . VAE ( sd = sd )
2023-01-03 06:53:32 +00:00
return ( vae , )
2023-02-16 15:38:08 +00:00
class ControlNetLoader :
@classmethod
def INPUT_TYPES ( s ) :
2023-03-17 21:57:57 +00:00
return { " required " : { " control_net_name " : ( folder_paths . get_filename_list ( " controlnet " ) , ) } }
2023-02-16 15:38:08 +00:00
RETURN_TYPES = ( " CONTROL_NET " , )
FUNCTION = " load_controlnet "
CATEGORY = " loaders "
def load_controlnet ( self , control_net_name ) :
2023-03-17 21:57:57 +00:00
controlnet_path = folder_paths . get_full_path ( " controlnet " , control_net_name )
2023-08-25 21:25:39 +00:00
controlnet = comfy . controlnet . load_controlnet ( controlnet_path )
2023-02-16 15:38:08 +00:00
return ( controlnet , )
2023-02-23 04:22:03 +00:00
class DiffControlNetLoader :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " model " : ( " MODEL " , ) ,
2023-03-17 21:57:57 +00:00
" control_net_name " : ( folder_paths . get_filename_list ( " controlnet " ) , ) } }
2023-02-23 04:22:03 +00:00
RETURN_TYPES = ( " CONTROL_NET " , )
FUNCTION = " load_controlnet "
CATEGORY = " loaders "
def load_controlnet ( self , model , control_net_name ) :
2023-03-17 21:57:57 +00:00
controlnet_path = folder_paths . get_full_path ( " controlnet " , control_net_name )
2023-08-25 21:25:39 +00:00
controlnet = comfy . controlnet . load_controlnet ( controlnet_path , model )
2023-02-23 04:22:03 +00:00
return ( controlnet , )
2023-02-16 15:38:08 +00:00
class ControlNetApply :
@classmethod
def INPUT_TYPES ( s ) :
2023-02-16 23:08:01 +00:00
return { " required " : { " conditioning " : ( " CONDITIONING " , ) ,
" control_net " : ( " CONTROL_NET " , ) ,
" image " : ( " IMAGE " , ) ,
" strength " : ( " FLOAT " , { " default " : 1.0 , " min " : 0.0 , " max " : 10.0 , " step " : 0.01 } )
} }
2023-02-16 15:38:08 +00:00
RETURN_TYPES = ( " CONDITIONING " , )
FUNCTION = " apply_controlnet "
2024-07-16 21:08:25 +00:00
CATEGORY = " conditioning/controlnet "
2023-02-16 15:38:08 +00:00
2023-02-16 23:08:01 +00:00
def apply_controlnet ( self , conditioning , control_net , image , strength ) :
2023-05-27 00:33:30 +00:00
if strength == 0 :
return ( conditioning , )
2023-02-16 15:38:08 +00:00
c = [ ]
control_hint = image . movedim ( - 1 , 1 )
for t in conditioning :
n = [ t [ 0 ] , t [ 1 ] . copy ( ) ]
2023-02-21 06:18:53 +00:00
c_net = control_net . copy ( ) . set_cond_hint ( control_hint , strength )
if ' control ' in t [ 1 ] :
c_net . set_previous_controlnet ( t [ 1 ] [ ' control ' ] )
n [ 1 ] [ ' control ' ] = c_net
2023-07-24 17:26:07 +00:00
n [ 1 ] [ ' control_apply_to_uncond ' ] = True
2023-02-16 15:38:08 +00:00
c . append ( n )
return ( c , )
2023-07-24 17:26:07 +00:00
class ControlNetApplyAdvanced :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " positive " : ( " CONDITIONING " , ) ,
" negative " : ( " CONDITIONING " , ) ,
" control_net " : ( " CONTROL_NET " , ) ,
" image " : ( " IMAGE " , ) ,
" strength " : ( " FLOAT " , { " default " : 1.0 , " min " : 0.0 , " max " : 10.0 , " step " : 0.01 } ) ,
2023-07-24 22:29:00 +00:00
" start_percent " : ( " FLOAT " , { " default " : 0.0 , " min " : 0.0 , " max " : 1.0 , " step " : 0.001 } ) ,
" end_percent " : ( " FLOAT " , { " default " : 1.0 , " min " : 0.0 , " max " : 1.0 , " step " : 0.001 } )
2023-07-24 17:26:07 +00:00
} }
RETURN_TYPES = ( " CONDITIONING " , " CONDITIONING " )
RETURN_NAMES = ( " positive " , " negative " )
FUNCTION = " apply_controlnet "
2024-07-16 21:08:25 +00:00
CATEGORY = " conditioning/controlnet "
2023-07-24 17:26:07 +00:00
2024-06-26 20:14:47 +00:00
def apply_controlnet ( self , positive , negative , control_net , image , strength , start_percent , end_percent , vae = None ) :
2023-07-24 17:26:07 +00:00
if strength == 0 :
return ( positive , negative )
control_hint = image . movedim ( - 1 , 1 )
cnets = { }
out = [ ]
for conditioning in [ positive , negative ] :
c = [ ]
for t in conditioning :
d = t [ 1 ] . copy ( )
prev_cnet = d . get ( ' control ' , None )
if prev_cnet in cnets :
c_net = cnets [ prev_cnet ]
else :
2024-06-26 20:14:47 +00:00
c_net = control_net . copy ( ) . set_cond_hint ( control_hint , strength , ( start_percent , end_percent ) , vae )
2023-07-24 17:26:07 +00:00
c_net . set_previous_controlnet ( prev_cnet )
cnets [ prev_cnet ] = c_net
d [ ' control ' ] = c_net
d [ ' control_apply_to_uncond ' ] = False
n = [ t [ 0 ] , d ]
c . append ( n )
out . append ( c )
return ( out [ 0 ] , out [ 1 ] )
2023-07-05 21:34:45 +00:00
class UNETLoader :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " unet_name " : ( folder_paths . get_filename_list ( " unet " ) , ) ,
2024-08-01 17:28:41 +00:00
" weight_dtype " : ( [ " default " , " fp8_e4m3fn " , " fp8_e5m2 " ] , )
2023-07-05 21:34:45 +00:00
} }
RETURN_TYPES = ( " MODEL " , )
FUNCTION = " load_unet "
CATEGORY = " advanced/loaders "
2024-08-01 17:28:41 +00:00
def load_unet ( self , unet_name , weight_dtype ) :
2024-08-13 03:18:54 +00:00
model_options = { }
2024-08-02 02:19:53 +00:00
if weight_dtype == " fp8_e4m3fn " :
2024-08-13 03:18:54 +00:00
model_options [ " dtype " ] = torch . float8_e4m3fn
2024-08-02 02:19:53 +00:00
elif weight_dtype == " fp8_e5m2 " :
2024-08-13 03:18:54 +00:00
model_options [ " dtype " ] = torch . float8_e5m2
2024-08-02 02:19:53 +00:00
2023-07-05 21:34:45 +00:00
unet_path = folder_paths . get_full_path ( " unet " , unet_name )
2024-08-13 03:18:54 +00:00
model = comfy . sd . load_diffusion_model ( unet_path , model_options = model_options )
2023-07-05 21:34:45 +00:00
return ( model , )
2023-02-05 20:20:18 +00:00
class CLIPLoader :
@classmethod
def INPUT_TYPES ( s ) :
2023-03-17 21:57:57 +00:00
return { " required " : { " clip_name " : ( folder_paths . get_filename_list ( " clip " ) , ) ,
2024-06-15 16:14:56 +00:00
" type " : ( [ " stable_diffusion " , " stable_cascade " , " sd3 " , " stable_audio " ] , ) ,
2023-02-05 20:20:18 +00:00
} }
RETURN_TYPES = ( " CLIP " , )
FUNCTION = " load_clip "
2023-06-25 05:40:38 +00:00
CATEGORY = " advanced/loaders "
2023-02-05 20:20:18 +00:00
2024-02-16 18:29:04 +00:00
def load_clip ( self , clip_name , type = " stable_diffusion " ) :
if type == " stable_cascade " :
clip_type = comfy . sd . CLIPType . STABLE_CASCADE
2024-06-12 03:27:39 +00:00
elif type == " sd3 " :
clip_type = comfy . sd . CLIPType . SD3
2024-06-15 16:14:56 +00:00
elif type == " stable_audio " :
clip_type = comfy . sd . CLIPType . STABLE_AUDIO
else :
clip_type = comfy . sd . CLIPType . STABLE_DIFFUSION
2024-02-16 18:29:04 +00:00
2023-03-17 21:57:57 +00:00
clip_path = folder_paths . get_full_path ( " clip " , clip_name )
2024-02-16 18:29:04 +00:00
clip = comfy . sd . load_clip ( ckpt_paths = [ clip_path ] , embedding_directory = folder_paths . get_folder_paths ( " embeddings " ) , clip_type = clip_type )
2023-06-25 05:40:38 +00:00
return ( clip , )
class DualCLIPLoader :
@classmethod
def INPUT_TYPES ( s ) :
2024-06-12 03:27:39 +00:00
return { " required " : { " clip_name1 " : ( folder_paths . get_filename_list ( " clip " ) , ) ,
" clip_name2 " : ( folder_paths . get_filename_list ( " clip " ) , ) ,
2024-08-01 08:03:59 +00:00
" type " : ( [ " sdxl " , " sd3 " , " flux " ] , ) ,
2023-06-25 05:40:38 +00:00
} }
RETURN_TYPES = ( " CLIP " , )
FUNCTION = " load_clip "
CATEGORY = " advanced/loaders "
2024-06-12 03:27:39 +00:00
def load_clip ( self , clip_name1 , clip_name2 , type ) :
2023-06-25 05:40:38 +00:00
clip_path1 = folder_paths . get_full_path ( " clip " , clip_name1 )
clip_path2 = folder_paths . get_full_path ( " clip " , clip_name2 )
2024-06-12 03:27:39 +00:00
if type == " sdxl " :
clip_type = comfy . sd . CLIPType . STABLE_DIFFUSION
elif type == " sd3 " :
clip_type = comfy . sd . CLIPType . SD3
2024-08-01 08:03:59 +00:00
elif type == " flux " :
clip_type = comfy . sd . CLIPType . FLUX
2024-06-12 03:27:39 +00:00
clip = comfy . sd . load_clip ( ckpt_paths = [ clip_path1 , clip_path2 ] , embedding_directory = folder_paths . get_folder_paths ( " embeddings " ) , clip_type = clip_type )
2023-02-05 20:20:18 +00:00
return ( clip , )
2023-03-05 23:39:25 +00:00
class CLIPVisionLoader :
@classmethod
def INPUT_TYPES ( s ) :
2023-03-17 21:57:57 +00:00
return { " required " : { " clip_name " : ( folder_paths . get_filename_list ( " clip_vision " ) , ) ,
2023-03-05 23:39:25 +00:00
} }
RETURN_TYPES = ( " CLIP_VISION " , )
FUNCTION = " load_clip "
CATEGORY = " loaders "
def load_clip ( self , clip_name ) :
2023-03-17 21:57:57 +00:00
clip_path = folder_paths . get_full_path ( " clip_vision " , clip_name )
2023-04-02 03:19:15 +00:00
clip_vision = comfy . clip_vision . load ( clip_path )
2023-03-05 23:39:25 +00:00
return ( clip_vision , )
class CLIPVisionEncode :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " clip_vision " : ( " CLIP_VISION " , ) ,
" image " : ( " IMAGE " , )
} }
2023-03-06 06:30:17 +00:00
RETURN_TYPES = ( " CLIP_VISION_OUTPUT " , )
2023-03-05 23:39:25 +00:00
FUNCTION = " encode "
2023-04-02 03:19:15 +00:00
CATEGORY = " conditioning "
2023-03-05 23:39:25 +00:00
def encode ( self , clip_vision , image ) :
output = clip_vision . encode_image ( image )
return ( output , )
class StyleModelLoader :
@classmethod
def INPUT_TYPES ( s ) :
2023-03-17 21:57:57 +00:00
return { " required " : { " style_model_name " : ( folder_paths . get_filename_list ( " style_models " ) , ) } }
2023-03-05 23:39:25 +00:00
RETURN_TYPES = ( " STYLE_MODEL " , )
FUNCTION = " load_style_model "
CATEGORY = " loaders "
def load_style_model ( self , style_model_name ) :
2023-03-17 21:57:57 +00:00
style_model_path = folder_paths . get_full_path ( " style_models " , style_model_name )
2023-03-05 23:39:25 +00:00
style_model = comfy . sd . load_style_model ( style_model_path )
return ( style_model , )
class StyleModelApply :
@classmethod
def INPUT_TYPES ( s ) :
2023-03-06 06:48:18 +00:00
return { " required " : { " conditioning " : ( " CONDITIONING " , ) ,
" style_model " : ( " STYLE_MODEL " , ) ,
" clip_vision_output " : ( " CLIP_VISION_OUTPUT " , ) ,
2023-03-05 23:39:25 +00:00
} }
RETURN_TYPES = ( " CONDITIONING " , )
FUNCTION = " apply_stylemodel "
2023-03-06 06:30:17 +00:00
CATEGORY = " conditioning/style_model "
2023-03-05 23:39:25 +00:00
2023-03-06 06:48:18 +00:00
def apply_stylemodel ( self , clip_vision_output , style_model , conditioning ) :
2023-08-14 20:54:05 +00:00
cond = style_model . get_cond ( clip_vision_output ) . flatten ( start_dim = 0 , end_dim = 1 ) . unsqueeze ( dim = 0 )
2023-03-05 23:39:25 +00:00
c = [ ]
2023-03-06 06:48:18 +00:00
for t in conditioning :
n = [ torch . cat ( ( t [ 0 ] , cond ) , dim = 1 ) , t [ 1 ] . copy ( ) ]
2023-03-05 23:39:25 +00:00
c . append ( n )
return ( c , )
2023-04-02 03:19:15 +00:00
class unCLIPConditioning :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " conditioning " : ( " CONDITIONING " , ) ,
" clip_vision_output " : ( " CLIP_VISION_OUTPUT " , ) ,
" strength " : ( " FLOAT " , { " default " : 1.0 , " min " : - 10.0 , " max " : 10.0 , " step " : 0.01 } ) ,
2023-04-03 17:50:29 +00:00
" noise_augmentation " : ( " FLOAT " , { " default " : 0.0 , " min " : 0.0 , " max " : 1.0 , " step " : 0.01 } ) ,
2023-04-02 03:19:15 +00:00
} }
RETURN_TYPES = ( " CONDITIONING " , )
FUNCTION = " apply_adm "
2023-04-05 02:48:11 +00:00
CATEGORY = " conditioning "
2023-04-02 03:19:15 +00:00
2023-04-03 17:50:29 +00:00
def apply_adm ( self , conditioning , clip_vision_output , strength , noise_augmentation ) :
2023-05-27 00:33:30 +00:00
if strength == 0 :
return ( conditioning , )
2023-04-02 03:19:15 +00:00
c = [ ]
for t in conditioning :
o = t [ 1 ] . copy ( )
2023-06-11 08:01:18 +00:00
x = { " clip_vision_output " : clip_vision_output , " strength " : strength , " noise_augmentation " : noise_augmentation }
if " unclip_conditioning " in o :
o [ " unclip_conditioning " ] = o [ " unclip_conditioning " ] [ : ] + [ x ]
2023-04-02 03:19:15 +00:00
else :
2023-06-11 08:01:18 +00:00
o [ " unclip_conditioning " ] = [ x ]
2023-04-02 03:19:15 +00:00
n = [ t [ 0 ] , o ]
c . append ( n )
return ( c , )
2023-04-19 13:36:19 +00:00
class GLIGENLoader :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " gligen_name " : ( folder_paths . get_filename_list ( " gligen " ) , ) } }
RETURN_TYPES = ( " GLIGEN " , )
FUNCTION = " load_gligen "
2023-04-20 21:30:10 +00:00
CATEGORY = " loaders "
2023-04-19 13:36:19 +00:00
def load_gligen ( self , gligen_name ) :
gligen_path = folder_paths . get_full_path ( " gligen " , gligen_name )
gligen = comfy . sd . load_gligen ( gligen_path )
return ( gligen , )
class GLIGENTextBoxApply :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " conditioning_to " : ( " CONDITIONING " , ) ,
" clip " : ( " CLIP " , ) ,
" gligen_textbox_model " : ( " GLIGEN " , ) ,
2024-04-13 20:12:09 +00:00
" text " : ( " STRING " , { " multiline " : True , " dynamicPrompts " : True } ) ,
2023-04-19 13:36:19 +00:00
" width " : ( " INT " , { " default " : 64 , " min " : 8 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" height " : ( " INT " , { " default " : 64 , " min " : 8 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" x " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" y " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
} }
RETURN_TYPES = ( " CONDITIONING " , )
FUNCTION = " append "
2023-04-20 21:30:10 +00:00
CATEGORY = " conditioning/gligen "
2023-04-19 13:36:19 +00:00
def append ( self , conditioning_to , clip , gligen_textbox_model , text , width , height , x , y ) :
c = [ ]
2024-02-25 12:20:31 +00:00
cond , cond_pooled = clip . encode_from_tokens ( clip . tokenize ( text ) , return_pooled = " unprojected " )
2023-04-19 13:36:19 +00:00
for t in conditioning_to :
n = [ t [ 0 ] , t [ 1 ] . copy ( ) ]
position_params = [ ( cond_pooled , height / / 8 , width / / 8 , y / / 8 , x / / 8 ) ]
prev = [ ]
if " gligen " in n [ 1 ] :
prev = n [ 1 ] [ ' gligen ' ] [ 2 ]
n [ 1 ] [ ' gligen ' ] = ( " position " , gligen_textbox_model , prev + position_params )
c . append ( n )
return ( c , )
2023-04-02 03:19:15 +00:00
2023-01-03 06:53:32 +00:00
class EmptyLatentImage :
2023-12-08 07:35:45 +00:00
def __init__ ( self ) :
self . device = comfy . model_management . intermediate_device ( )
2023-01-03 06:53:32 +00:00
@classmethod
def INPUT_TYPES ( s ) :
2024-08-14 05:22:10 +00:00
return {
" required " : {
" width " : ( " INT " , { " default " : 512 , " min " : 16 , " max " : MAX_RESOLUTION , " step " : 8 , " tooltip " : " The width of the latent images in pixels. " } ) ,
" height " : ( " INT " , { " default " : 512 , " min " : 16 , " max " : MAX_RESOLUTION , " step " : 8 , " tooltip " : " The height of the latent images in pixels. " } ) ,
" batch_size " : ( " INT " , { " default " : 1 , " min " : 1 , " max " : 4096 , " tooltip " : " The number of latent images in the batch. " } )
}
}
2023-01-03 06:53:32 +00:00
RETURN_TYPES = ( " LATENT " , )
2024-08-14 05:22:10 +00:00
OUTPUT_TOOLTIPS = ( " The empty latent image batch. " , )
2023-01-03 06:53:32 +00:00
FUNCTION = " generate "
2023-01-26 17:23:15 +00:00
CATEGORY = " latent "
2024-08-14 05:22:10 +00:00
DESCRIPTION = " Create a new batch of empty latent images to be denoised via sampling. "
2023-01-26 17:23:15 +00:00
2023-01-03 06:53:32 +00:00
def generate ( self , width , height , batch_size = 1 ) :
2023-12-08 07:35:45 +00:00
latent = torch . zeros ( [ batch_size , 4 , height / / 8 , width / / 8 ] , device = self . device )
2023-02-15 21:58:55 +00:00
return ( { " samples " : latent } , )
2023-01-03 06:53:32 +00:00
2023-02-16 15:38:08 +00:00
2023-04-17 21:24:58 +00:00
class LatentFromBatch :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " samples " : ( " LATENT " , ) ,
" batch_index " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : 63 } ) ,
2023-05-13 15:15:45 +00:00
" length " : ( " INT " , { " default " : 1 , " min " : 1 , " max " : 64 } ) ,
2023-04-17 21:24:58 +00:00
} }
RETURN_TYPES = ( " LATENT " , )
2023-05-13 15:15:45 +00:00
FUNCTION = " frombatch "
2023-04-17 21:24:58 +00:00
2023-05-13 15:15:45 +00:00
CATEGORY = " latent/batch "
2023-04-17 21:24:58 +00:00
2023-05-13 15:15:45 +00:00
def frombatch ( self , samples , batch_index , length ) :
2023-04-17 21:24:58 +00:00
s = samples . copy ( )
s_in = samples [ " samples " ]
batch_index = min ( s_in . shape [ 0 ] - 1 , batch_index )
2023-05-13 15:15:45 +00:00
length = min ( s_in . shape [ 0 ] - batch_index , length )
s [ " samples " ] = s_in [ batch_index : batch_index + length ] . clone ( )
if " noise_mask " in samples :
masks = samples [ " noise_mask " ]
if masks . shape [ 0 ] == 1 :
s [ " noise_mask " ] = masks . clone ( )
else :
if masks . shape [ 0 ] < s_in . shape [ 0 ] :
masks = masks . repeat ( math . ceil ( s_in . shape [ 0 ] / masks . shape [ 0 ] ) , 1 , 1 , 1 ) [ : s_in . shape [ 0 ] ]
s [ " noise_mask " ] = masks [ batch_index : batch_index + length ] . clone ( )
if " batch_index " not in s :
s [ " batch_index " ] = [ x for x in range ( batch_index , batch_index + length ) ]
else :
s [ " batch_index " ] = samples [ " batch_index " ] [ batch_index : batch_index + length ]
return ( s , )
class RepeatLatentBatch :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " samples " : ( " LATENT " , ) ,
" amount " : ( " INT " , { " default " : 1 , " min " : 1 , " max " : 64 } ) ,
} }
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " repeat "
CATEGORY = " latent/batch "
def repeat ( self , samples , amount ) :
s = samples . copy ( )
s_in = samples [ " samples " ]
s [ " samples " ] = s_in . repeat ( ( amount , 1 , 1 , 1 ) )
if " noise_mask " in samples and samples [ " noise_mask " ] . shape [ 0 ] > 1 :
masks = samples [ " noise_mask " ]
if masks . shape [ 0 ] < s_in . shape [ 0 ] :
masks = masks . repeat ( math . ceil ( s_in . shape [ 0 ] / masks . shape [ 0 ] ) , 1 , 1 , 1 ) [ : s_in . shape [ 0 ] ]
s [ " noise_mask " ] = samples [ " noise_mask " ] . repeat ( ( amount , 1 , 1 , 1 ) )
if " batch_index " in s :
offset = max ( s [ " batch_index " ] ) - min ( s [ " batch_index " ] ) + 1
s [ " batch_index " ] = s [ " batch_index " ] + [ x + ( i * offset ) for i in range ( 1 , amount ) for x in s [ " batch_index " ] ]
2023-04-17 21:24:58 +00:00
return ( s , )
2023-02-04 20:53:29 +00:00
2023-01-03 06:53:32 +00:00
class LatentUpscale :
2023-06-17 05:54:33 +00:00
upscale_methods = [ " nearest-exact " , " bilinear " , " area " , " bicubic " , " bislerp " ]
2023-01-24 22:26:11 +00:00
crop_methods = [ " disabled " , " center " ]
2023-01-03 06:53:32 +00:00
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " samples " : ( " LATENT " , ) , " upscale_method " : ( s . upscale_methods , ) ,
2023-09-24 15:08:54 +00:00
" width " : ( " INT " , { " default " : 512 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" height " : ( " INT " , { " default " : 512 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
2023-01-24 22:26:11 +00:00
" crop " : ( s . crop_methods , ) } }
2023-01-03 06:53:32 +00:00
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " upscale "
2023-01-27 19:11:57 +00:00
CATEGORY = " latent "
2023-01-24 22:26:11 +00:00
def upscale ( self , samples , upscale_method , width , height , crop ) :
2023-09-24 15:08:54 +00:00
if width == 0 and height == 0 :
s = samples
else :
s = samples . copy ( )
if width == 0 :
height = max ( 64 , height )
width = max ( 64 , round ( samples [ " samples " ] . shape [ 3 ] * height / samples [ " samples " ] . shape [ 2 ] ) )
elif height == 0 :
width = max ( 64 , width )
height = max ( 64 , round ( samples [ " samples " ] . shape [ 2 ] * width / samples [ " samples " ] . shape [ 3 ] ) )
else :
width = max ( 64 , width )
height = max ( 64 , height )
s [ " samples " ] = comfy . utils . common_upscale ( samples [ " samples " ] , width / / 8 , height / / 8 , upscale_method , crop )
2023-01-03 06:53:32 +00:00
return ( s , )
2023-05-23 16:53:38 +00:00
class LatentUpscaleBy :
2023-06-17 05:54:33 +00:00
upscale_methods = [ " nearest-exact " , " bilinear " , " area " , " bicubic " , " bislerp " ]
2023-05-23 16:53:38 +00:00
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " samples " : ( " LATENT " , ) , " upscale_method " : ( s . upscale_methods , ) ,
" scale_by " : ( " FLOAT " , { " default " : 1.5 , " min " : 0.01 , " max " : 8.0 , " step " : 0.01 } ) , } }
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " upscale "
CATEGORY = " latent "
def upscale ( self , samples , upscale_method , scale_by ) :
s = samples . copy ( )
width = round ( samples [ " samples " ] . shape [ 3 ] * scale_by )
height = round ( samples [ " samples " ] . shape [ 2 ] * scale_by )
s [ " samples " ] = comfy . utils . common_upscale ( samples [ " samples " ] , width , height , upscale_method , " disabled " )
return ( s , )
2023-01-31 07:28:07 +00:00
class LatentRotate :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " samples " : ( " LATENT " , ) ,
" rotation " : ( [ " none " , " 90 degrees " , " 180 degrees " , " 270 degrees " ] , ) ,
} }
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " rotate "
2023-03-06 06:30:17 +00:00
CATEGORY = " latent/transform "
2023-01-31 07:28:07 +00:00
def rotate ( self , samples , rotation ) :
2023-02-15 21:58:55 +00:00
s = samples . copy ( )
2023-01-31 07:28:07 +00:00
rotate_by = 0
if rotation . startswith ( " 90 " ) :
rotate_by = 1
elif rotation . startswith ( " 180 " ) :
rotate_by = 2
elif rotation . startswith ( " 270 " ) :
rotate_by = 3
2023-02-15 21:58:55 +00:00
s [ " samples " ] = torch . rot90 ( samples [ " samples " ] , k = rotate_by , dims = [ 3 , 2 ] )
2023-01-31 07:28:07 +00:00
return ( s , )
2023-01-31 08:28:38 +00:00
class LatentFlip :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " samples " : ( " LATENT " , ) ,
" flip_method " : ( [ " x-axis: vertically " , " y-axis: horizontally " ] , ) ,
} }
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " flip "
2023-03-06 06:30:17 +00:00
CATEGORY = " latent/transform "
2023-01-31 08:28:38 +00:00
def flip ( self , samples , flip_method ) :
2023-02-15 21:58:55 +00:00
s = samples . copy ( )
2023-01-31 08:28:38 +00:00
if flip_method . startswith ( " x " ) :
2023-02-15 21:58:55 +00:00
s [ " samples " ] = torch . flip ( samples [ " samples " ] , dims = [ 2 ] )
2023-01-31 08:28:38 +00:00
elif flip_method . startswith ( " y " ) :
2023-02-15 21:58:55 +00:00
s [ " samples " ] = torch . flip ( samples [ " samples " ] , dims = [ 3 ] )
2023-01-31 08:28:38 +00:00
return ( s , )
2023-01-31 08:35:03 +00:00
class LatentComposite :
@classmethod
def INPUT_TYPES ( s ) :
2023-04-14 04:14:35 +00:00
return { " required " : { " samples_to " : ( " LATENT " , ) ,
" samples_from " : ( " LATENT " , ) ,
" x " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" y " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" feather " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
} }
2023-01-31 08:35:03 +00:00
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " composite "
CATEGORY = " latent "
2023-04-14 04:14:35 +00:00
def composite ( self , samples_to , samples_from , x , y , composite_method = " normal " , feather = 0 ) :
x = x / / 8
y = y / / 8
2023-02-12 18:01:52 +00:00
feather = feather / / 8
2023-04-14 04:14:35 +00:00
samples_out = samples_to . copy ( )
s = samples_to [ " samples " ] . clone ( )
samples_to = samples_to [ " samples " ]
samples_from = samples_from [ " samples " ]
if feather == 0 :
s [ : , : , y : y + samples_from . shape [ 2 ] , x : x + samples_from . shape [ 3 ] ] = samples_from [ : , : , : samples_to . shape [ 2 ] - y , : samples_to . shape [ 3 ] - x ]
else :
samples_from = samples_from [ : , : , : samples_to . shape [ 2 ] - y , : samples_to . shape [ 3 ] - x ]
mask = torch . ones_like ( samples_from )
for t in range ( feather ) :
if y != 0 :
mask [ : , : , t : 1 + t , : ] * = ( ( 1.0 / feather ) * ( t + 1 ) )
if y + samples_from . shape [ 2 ] < samples_to . shape [ 2 ] :
mask [ : , : , mask . shape [ 2 ] - 1 - t : mask . shape [ 2 ] - t , : ] * = ( ( 1.0 / feather ) * ( t + 1 ) )
if x != 0 :
mask [ : , : , : , t : 1 + t ] * = ( ( 1.0 / feather ) * ( t + 1 ) )
if x + samples_from . shape [ 3 ] < samples_to . shape [ 3 ] :
mask [ : , : , : , mask . shape [ 3 ] - 1 - t : mask . shape [ 3 ] - t ] * = ( ( 1.0 / feather ) * ( t + 1 ) )
rev_mask = torch . ones_like ( mask ) - mask
s [ : , : , y : y + samples_from . shape [ 2 ] , x : x + samples_from . shape [ 3 ] ] = samples_from [ : , : , : samples_to . shape [ 2 ] - y , : samples_to . shape [ 3 ] - x ] * mask + s [ : , : , y : y + samples_from . shape [ 2 ] , x : x + samples_from . shape [ 3 ] ] * rev_mask
samples_out [ " samples " ] = s
return ( samples_out , )
2023-01-31 08:35:03 +00:00
2023-08-01 06:23:14 +00:00
class LatentBlend :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : {
2023-08-04 06:51:28 +00:00
" samples1 " : ( " LATENT " , ) ,
" samples2 " : ( " LATENT " , ) ,
2023-08-01 06:23:14 +00:00
" blend_factor " : ( " FLOAT " , {
" default " : 0.5 ,
" min " : 0 ,
" max " : 1 ,
" step " : 0.01
} ) ,
} }
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " blend "
CATEGORY = " _for_testing "
2023-08-04 06:51:28 +00:00
def blend ( self , samples1 , samples2 , blend_factor : float , blend_mode : str = " normal " ) :
2023-08-01 06:23:14 +00:00
2023-08-04 06:51:28 +00:00
samples_out = samples1 . copy ( )
samples1 = samples1 [ " samples " ]
samples2 = samples2 [ " samples " ]
2023-08-01 06:23:14 +00:00
2023-08-04 06:51:28 +00:00
if samples1 . shape != samples2 . shape :
samples2 . permute ( 0 , 3 , 1 , 2 )
samples2 = comfy . utils . common_upscale ( samples2 , samples1 . shape [ 3 ] , samples1 . shape [ 2 ] , ' bicubic ' , crop = ' center ' )
samples2 . permute ( 0 , 2 , 3 , 1 )
2023-08-01 06:23:14 +00:00
2023-08-04 06:51:28 +00:00
samples_blended = self . blend_mode ( samples1 , samples2 , blend_mode )
samples_blended = samples1 * blend_factor + samples_blended * ( 1 - blend_factor )
2023-08-01 06:23:14 +00:00
samples_out [ " samples " ] = samples_blended
return ( samples_out , )
def blend_mode ( self , img1 , img2 , mode ) :
if mode == " normal " :
return img2
else :
raise ValueError ( f " Unsupported blend mode: { mode } " )
2023-02-04 20:21:46 +00:00
class LatentCrop :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " samples " : ( " LATENT " , ) ,
2023-05-02 18:16:27 +00:00
" width " : ( " INT " , { " default " : 512 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" height " : ( " INT " , { " default " : 512 , " min " : 64 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
2023-03-22 16:22:48 +00:00
" x " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" y " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
2023-02-04 20:21:46 +00:00
} }
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " crop "
2023-03-06 06:30:17 +00:00
CATEGORY = " latent/transform "
2023-02-04 20:21:46 +00:00
def crop ( self , samples , width , height , x , y ) :
2023-02-15 21:58:55 +00:00
s = samples . copy ( )
samples = samples [ ' samples ' ]
2023-02-04 20:21:46 +00:00
x = x / / 8
y = y / / 8
#enfonce minimum size of 64
if x > ( samples . shape [ 3 ] - 8 ) :
x = samples . shape [ 3 ] - 8
if y > ( samples . shape [ 2 ] - 8 ) :
y = samples . shape [ 2 ] - 8
new_height = height / / 8
new_width = width / / 8
to_x = new_width + x
to_y = new_height + y
2023-02-15 21:58:55 +00:00
s [ ' samples ' ] = samples [ : , : , y : to_y , x : to_x ]
2023-02-04 20:21:46 +00:00
return ( s , )
2023-02-15 21:58:55 +00:00
class SetLatentNoiseMask :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " samples " : ( " LATENT " , ) ,
" mask " : ( " MASK " , ) ,
} }
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " set_mask "
2023-02-16 01:44:51 +00:00
CATEGORY = " latent/inpaint "
2023-02-15 21:58:55 +00:00
def set_mask ( self , samples , mask ) :
s = samples . copy ( )
2023-05-13 00:34:48 +00:00
s [ " noise_mask " ] = mask . reshape ( ( - 1 , 1 , mask . shape [ - 2 ] , mask . shape [ - 1 ] ) )
2023-02-15 21:58:55 +00:00
return ( s , )
2023-06-05 18:19:02 +00:00
def common_ksampler ( model , seed , steps , cfg , sampler_name , scheduler , positive , negative , latent , denoise = 1.0 , disable_noise = False , start_step = None , last_step = None , force_full_denoise = False ) :
2023-04-23 18:02:08 +00:00
latent_image = latent [ " samples " ]
2024-06-08 06:16:55 +00:00
latent_image = comfy . sample . fix_empty_latent_channels ( model , latent_image )
2023-01-31 08:09:38 +00:00
if disable_noise :
noise = torch . zeros ( latent_image . size ( ) , dtype = latent_image . dtype , layout = latent_image . layout , device = " cpu " )
else :
2023-05-13 15:15:45 +00:00
batch_inds = latent [ " batch_index " ] if " batch_index " in latent else None
noise = comfy . sample . prepare_noise ( latent_image , seed , batch_inds )
2023-01-31 08:09:38 +00:00
2023-04-24 10:53:10 +00:00
noise_mask = None
2023-02-15 21:58:55 +00:00
if " noise_mask " in latent :
2023-04-25 03:25:51 +00:00
noise_mask = latent [ " noise_mask " ]
2023-01-31 08:09:38 +00:00
2023-09-27 20:45:22 +00:00
callback = latent_preview . prepare_callback ( model , steps )
2023-10-12 00:35:50 +00:00
disable_pbar = not comfy . utils . PROGRESS_BAR_ENABLED
2023-04-25 03:25:51 +00:00
samples = comfy . sample . sample ( model , noise , steps , cfg , sampler_name , scheduler , positive , negative , latent_image ,
denoise = denoise , disable_noise = disable_noise , start_step = start_step , last_step = last_step ,
2023-09-28 02:21:18 +00:00
force_full_denoise = force_full_denoise , noise_mask = noise_mask , callback = callback , disable_pbar = disable_pbar , seed = seed )
2023-02-15 21:58:55 +00:00
out = latent . copy ( )
out [ " samples " ] = samples
return ( out , )
2023-01-31 08:09:38 +00:00
2023-01-03 06:53:32 +00:00
class KSampler :
@classmethod
def INPUT_TYPES ( s ) :
2024-08-14 05:22:10 +00:00
return {
" required " : {
" model " : ( " MODEL " , { " tooltip " : " The model used for denoising the input latent. " } ) ,
" seed " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : 0xffffffffffffffff , " tooltip " : " The random seed used for creating the noise. " } ) ,
" steps " : ( " INT " , { " default " : 20 , " min " : 1 , " max " : 10000 , " tooltip " : " The number of steps used in the denoising process. " } ) ,
" cfg " : ( " FLOAT " , { " default " : 8.0 , " min " : 0.0 , " max " : 100.0 , " step " : 0.1 , " round " : 0.01 , " tooltip " : " The Classifier-Free Guidance scale balances creativity and adherence to the prompt. Higher values result in images more closely matching the prompt however too high values will negatively impact quality. " } ) ,
" sampler_name " : ( comfy . samplers . KSampler . SAMPLERS , { " tooltip " : " The algorithm used when sampling, this can affect the quality, speed, and style of the generated output. " } ) ,
" scheduler " : ( comfy . samplers . KSampler . SCHEDULERS , { " tooltip " : " The scheduler controls how noise is gradually removed to form the image. " } ) ,
" positive " : ( " CONDITIONING " , { " tooltip " : " The conditioning describing the attributes you want to include in the image. " } ) ,
" negative " : ( " CONDITIONING " , { " tooltip " : " The conditioning describing the attributes you want to exclude from the image. " } ) ,
" latent_image " : ( " LATENT " , { " tooltip " : " The latent image to denoise. " } ) ,
" denoise " : ( " FLOAT " , { " default " : 1.0 , " min " : 0.0 , " max " : 1.0 , " step " : 0.01 , " tooltip " : " The amount of denoising applied, lower values will maintain the structure of the initial image allowing for image to image sampling. " } ) ,
}
}
2023-01-03 06:53:32 +00:00
RETURN_TYPES = ( " LATENT " , )
2024-08-14 05:22:10 +00:00
OUTPUT_TOOLTIPS = ( " The denoised latent. " , )
2023-01-03 06:53:32 +00:00
FUNCTION = " sample "
2023-01-26 17:23:15 +00:00
CATEGORY = " sampling "
2024-08-14 05:22:10 +00:00
DESCRIPTION = " Uses the provided model, positive and negative conditioning to denoise the latent image. "
2023-01-26 17:23:15 +00:00
2023-06-05 18:19:02 +00:00
def sample ( self , model , seed , steps , cfg , sampler_name , scheduler , positive , negative , latent_image , denoise = 1.0 ) :
return common_ksampler ( model , seed , steps , cfg , sampler_name , scheduler , positive , negative , latent_image , denoise = denoise )
2023-01-03 06:53:32 +00:00
2023-01-31 08:09:38 +00:00
class KSamplerAdvanced :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " :
{ " model " : ( " MODEL " , ) ,
" add_noise " : ( [ " enable " , " disable " ] , ) ,
" noise_seed " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : 0xffffffffffffffff } ) ,
" steps " : ( " INT " , { " default " : 20 , " min " : 1 , " max " : 10000 } ) ,
2023-11-09 22:35:17 +00:00
" cfg " : ( " FLOAT " , { " default " : 8.0 , " min " : 0.0 , " max " : 100.0 , " step " : 0.1 , " round " : 0.01 } ) ,
2023-01-31 08:09:38 +00:00
" sampler_name " : ( comfy . samplers . KSampler . SAMPLERS , ) ,
" scheduler " : ( comfy . samplers . KSampler . SCHEDULERS , ) ,
" positive " : ( " CONDITIONING " , ) ,
" negative " : ( " CONDITIONING " , ) ,
" latent_image " : ( " LATENT " , ) ,
" start_at_step " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : 10000 } ) ,
" end_at_step " : ( " INT " , { " default " : 10000 , " min " : 0 , " max " : 10000 } ) ,
" return_with_leftover_noise " : ( [ " disable " , " enable " ] , ) ,
2023-06-05 18:19:02 +00:00
}
}
2023-01-31 08:09:38 +00:00
RETURN_TYPES = ( " LATENT " , )
FUNCTION = " sample "
CATEGORY = " sampling "
2023-01-03 06:53:32 +00:00
2023-06-05 18:19:02 +00:00
def sample ( self , model , add_noise , noise_seed , steps , cfg , sampler_name , scheduler , positive , negative , latent_image , start_at_step , end_at_step , return_with_leftover_noise , denoise = 1.0 ) :
2023-01-31 08:09:38 +00:00
force_full_denoise = True
if return_with_leftover_noise == " enable " :
force_full_denoise = False
disable_noise = False
if add_noise == " disable " :
disable_noise = True
2023-06-05 18:19:02 +00:00
return common_ksampler ( model , noise_seed , steps , cfg , sampler_name , scheduler , positive , negative , latent_image , denoise = denoise , disable_noise = disable_noise , start_step = start_at_step , last_step = end_at_step , force_full_denoise = force_full_denoise )
2023-01-03 06:53:32 +00:00
class SaveImage :
def __init__ ( self ) :
2023-04-05 18:01:01 +00:00
self . output_dir = folder_paths . get_output_directory ( )
2023-03-19 11:54:29 +00:00
self . type = " output "
2023-07-11 21:35:55 +00:00
self . prefix_append = " "
2023-11-28 09:57:59 +00:00
self . compress_level = 4
2023-01-03 06:53:32 +00:00
@classmethod
def INPUT_TYPES ( s ) :
2024-08-14 05:22:10 +00:00
return {
" required " : {
" images " : ( " IMAGE " , { " tooltip " : " The images to save. " } ) ,
" filename_prefix " : ( " STRING " , { " default " : " ComfyUI " , " tooltip " : " The prefix for the file to save. This may include formatting information such as %d ate:yyyy-MM-dd % o r %E mpty Latent Image.width % to include values from nodes. " } )
} ,
" hidden " : {
" prompt " : " PROMPT " , " extra_pnginfo " : " EXTRA_PNGINFO "
} ,
}
2023-01-03 06:53:32 +00:00
RETURN_TYPES = ( )
FUNCTION = " save_images "
OUTPUT_NODE = True
2023-01-26 17:23:15 +00:00
CATEGORY = " image "
2024-08-14 05:22:10 +00:00
DESCRIPTION = " Saves the input images to your ComfyUI output directory. "
2023-01-26 17:23:15 +00:00
2023-03-14 19:42:28 +00:00
def save_images ( self , images , filename_prefix = " ComfyUI " , prompt = None , extra_pnginfo = None ) :
2023-07-11 21:35:55 +00:00
filename_prefix + = self . prefix_append
2023-05-18 03:43:59 +00:00
full_output_folder , filename , counter , subfolder , filename_prefix = folder_paths . get_save_image_path ( filename_prefix , self . output_dir , images [ 0 ] . shape [ 1 ] , images [ 0 ] . shape [ 0 ] )
2023-03-19 11:54:29 +00:00
results = list ( )
2024-02-08 12:01:56 +00:00
for ( batch_number , image ) in enumerate ( images ) :
2023-01-03 06:53:32 +00:00
i = 255. * image . cpu ( ) . numpy ( )
2023-03-11 17:48:28 +00:00
img = Image . fromarray ( np . clip ( i , 0 , 255 ) . astype ( np . uint8 ) )
2023-07-28 16:31:41 +00:00
metadata = None
if not args . disable_metadata :
metadata = PngInfo ( )
if prompt is not None :
metadata . add_text ( " prompt " , json . dumps ( prompt ) )
if extra_pnginfo is not None :
for x in extra_pnginfo :
metadata . add_text ( x , json . dumps ( extra_pnginfo [ x ] ) )
2023-03-15 10:48:15 +00:00
2024-02-08 12:01:56 +00:00
filename_with_batch_num = filename . replace ( " % batch_num % " , str ( batch_number ) )
file = f " { filename_with_batch_num } _ { counter : 05 } _.png "
2023-11-28 09:57:59 +00:00
img . save ( os . path . join ( full_output_folder , file ) , pnginfo = metadata , compress_level = self . compress_level )
2023-03-19 11:54:29 +00:00
results . append ( {
" filename " : file ,
" subfolder " : subfolder ,
" type " : self . type
2023-04-13 20:38:02 +00:00
} )
2023-01-24 07:17:18 +00:00
counter + = 1
2023-03-20 18:55:28 +00:00
2023-03-19 11:54:29 +00:00
return { " ui " : { " images " : results } }
2023-01-03 06:53:32 +00:00
2023-03-14 19:28:07 +00:00
class PreviewImage ( SaveImage ) :
def __init__ ( self ) :
2023-04-05 18:01:01 +00:00
self . output_dir = folder_paths . get_temp_directory ( )
2023-03-19 11:54:29 +00:00
self . type = " temp "
2023-07-11 21:35:55 +00:00
self . prefix_append = " _temp_ " + ' ' . join ( random . choice ( " abcdefghijklmnopqrstupvxyz " ) for x in range ( 5 ) )
2023-11-28 09:57:59 +00:00
self . compress_level = 1
2023-03-14 19:28:07 +00:00
@classmethod
def INPUT_TYPES ( s ) :
2023-03-14 23:08:23 +00:00
return { " required " :
2023-03-14 19:28:07 +00:00
{ " images " : ( " IMAGE " , ) , } ,
" hidden " : { " prompt " : " PROMPT " , " extra_pnginfo " : " EXTRA_PNGINFO " } ,
}
2023-03-14 23:08:23 +00:00
2023-01-22 19:59:34 +00:00
class LoadImage :
@classmethod
def INPUT_TYPES ( s ) :
2023-04-05 18:01:01 +00:00
input_dir = folder_paths . get_input_directory ( )
2023-05-08 18:13:06 +00:00
files = [ f for f in os . listdir ( input_dir ) if os . path . isfile ( os . path . join ( input_dir , f ) ) ]
2023-01-22 19:59:34 +00:00
return { " required " :
2023-08-22 23:41:49 +00:00
{ " image " : ( sorted ( files ) , { " image_upload " : True } ) } ,
2023-01-22 19:59:34 +00:00
}
2023-01-26 17:23:15 +00:00
CATEGORY = " image "
2023-01-22 19:59:34 +00:00
2023-03-09 19:07:55 +00:00
RETURN_TYPES = ( " IMAGE " , " MASK " )
2023-01-22 19:59:34 +00:00
FUNCTION = " load_image "
def load_image ( self , image ) :
2023-04-23 20:03:26 +00:00
image_path = folder_paths . get_annotated_filepath ( image )
2024-05-04 07:32:41 +00:00
2024-05-09 09:38:00 +00:00
img = node_helpers . pillow ( Image . open , image_path )
2024-05-04 07:32:41 +00:00
2023-12-20 21:39:09 +00:00
output_images = [ ]
output_masks = [ ]
2024-05-12 11:07:38 +00:00
w , h = None , None
excluded_formats = [ ' MPO ' ]
2023-12-20 21:39:09 +00:00
for i in ImageSequence . Iterator ( img ) :
2024-05-09 09:38:00 +00:00
i = node_helpers . pillow ( ImageOps . exif_transpose , i )
2024-05-07 09:41:06 +00:00
2024-01-08 08:46:36 +00:00
if i . mode == ' I ' :
i = i . point ( lambda i : i * ( 1 / 255 ) )
2023-12-20 21:39:09 +00:00
image = i . convert ( " RGB " )
2024-05-12 11:07:38 +00:00
if len ( output_images ) == 0 :
w = image . size [ 0 ]
h = image . size [ 1 ]
if image . size [ 0 ] != w or image . size [ 1 ] != h :
continue
2023-12-20 21:39:09 +00:00
image = np . array ( image ) . astype ( np . float32 ) / 255.0
image = torch . from_numpy ( image ) [ None , ]
if ' A ' in i . getbands ( ) :
mask = np . array ( i . getchannel ( ' A ' ) ) . astype ( np . float32 ) / 255.0
mask = 1. - torch . from_numpy ( mask )
else :
mask = torch . zeros ( ( 64 , 64 ) , dtype = torch . float32 , device = " cpu " )
output_images . append ( image )
output_masks . append ( mask . unsqueeze ( 0 ) )
2024-05-12 11:07:38 +00:00
if len ( output_images ) > 1 and img . format not in excluded_formats :
2023-12-20 21:39:09 +00:00
output_image = torch . cat ( output_images , dim = 0 )
output_mask = torch . cat ( output_masks , dim = 0 )
2023-03-09 19:07:55 +00:00
else :
2023-12-20 21:39:09 +00:00
output_image = output_images [ 0 ]
output_mask = output_masks [ 0 ]
return ( output_image , output_mask )
2023-01-22 19:59:34 +00:00
2023-01-23 02:42:22 +00:00
@classmethod
def IS_CHANGED ( s , image ) :
2023-04-23 20:03:26 +00:00
image_path = folder_paths . get_annotated_filepath ( image )
2023-01-23 02:42:22 +00:00
m = hashlib . sha256 ( )
with open ( image_path , ' rb ' ) as f :
m . update ( f . read ( ) )
return m . digest ( ) . hex ( )
2023-03-09 18:18:08 +00:00
2023-04-23 20:03:26 +00:00
@classmethod
def VALIDATE_INPUTS ( s , image ) :
if not folder_paths . exists_annotated_filepath ( image ) :
return " Invalid image file: {} " . format ( image )
return True
2023-02-15 22:39:42 +00:00
class LoadImageMask :
2023-04-23 20:03:26 +00:00
_color_channels = [ " alpha " , " red " , " green " , " blue " ]
2023-02-15 22:39:42 +00:00
@classmethod
def INPUT_TYPES ( s ) :
2023-04-05 18:01:01 +00:00
input_dir = folder_paths . get_input_directory ( )
2023-05-08 18:13:06 +00:00
files = [ f for f in os . listdir ( input_dir ) if os . path . isfile ( os . path . join ( input_dir , f ) ) ]
2023-02-15 22:39:42 +00:00
return { " required " :
2023-08-22 23:41:49 +00:00
{ " image " : ( sorted ( files ) , { " image_upload " : True } ) ,
2023-05-08 18:37:36 +00:00
" channel " : ( s . _color_channels , ) , }
2023-02-15 22:39:42 +00:00
}
2023-04-05 23:52:39 +00:00
CATEGORY = " mask "
2023-02-15 22:39:42 +00:00
RETURN_TYPES = ( " MASK " , )
FUNCTION = " load_image "
def load_image ( self , image , channel ) :
2023-04-23 20:03:26 +00:00
image_path = folder_paths . get_annotated_filepath ( image )
2024-05-09 09:38:00 +00:00
i = node_helpers . pillow ( Image . open , image_path )
i = node_helpers . pillow ( ImageOps . exif_transpose , i )
2023-04-06 17:02:28 +00:00
if i . getbands ( ) != ( " R " , " G " , " B " , " A " ) :
2024-01-08 22:08:17 +00:00
if i . mode == ' I ' :
i = i . point ( lambda i : i * ( 1 / 255 ) )
2023-04-06 17:02:28 +00:00
i = i . convert ( " RGBA " )
2023-02-15 22:39:42 +00:00
mask = None
c = channel [ 0 ] . upper ( )
if c in i . getbands ( ) :
mask = np . array ( i . getchannel ( c ) ) . astype ( np . float32 ) / 255.0
mask = torch . from_numpy ( mask )
if c == ' A ' :
mask = 1. - mask
else :
mask = torch . zeros ( ( 64 , 64 ) , dtype = torch . float32 , device = " cpu " )
2023-09-26 06:56:40 +00:00
return ( mask . unsqueeze ( 0 ) , )
2023-02-15 22:39:42 +00:00
@classmethod
def IS_CHANGED ( s , image , channel ) :
2023-04-23 20:03:26 +00:00
image_path = folder_paths . get_annotated_filepath ( image )
2023-02-15 22:39:42 +00:00
m = hashlib . sha256 ( )
with open ( image_path , ' rb ' ) as f :
m . update ( f . read ( ) )
return m . digest ( ) . hex ( )
2023-03-09 18:18:08 +00:00
2023-04-23 20:03:26 +00:00
@classmethod
2023-12-29 22:33:30 +00:00
def VALIDATE_INPUTS ( s , image ) :
2023-04-23 20:03:26 +00:00
if not folder_paths . exists_annotated_filepath ( image ) :
return " Invalid image file: {} " . format ( image )
return True
2023-02-04 20:53:29 +00:00
class ImageScale :
2023-09-19 08:40:38 +00:00
upscale_methods = [ " nearest-exact " , " bilinear " , " area " , " bicubic " , " lanczos " ]
2023-02-04 20:53:29 +00:00
crop_methods = [ " disabled " , " center " ]
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " image " : ( " IMAGE " , ) , " upscale_method " : ( s . upscale_methods , ) ,
2023-09-24 15:08:54 +00:00
" width " : ( " INT " , { " default " : 512 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 1 } ) ,
" height " : ( " INT " , { " default " : 512 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 1 } ) ,
2023-02-04 20:53:29 +00:00
" crop " : ( s . crop_methods , ) } }
RETURN_TYPES = ( " IMAGE " , )
FUNCTION = " upscale "
2023-03-11 23:10:36 +00:00
CATEGORY = " image/upscaling "
2023-01-22 19:59:34 +00:00
2023-02-04 20:53:29 +00:00
def upscale ( self , image , upscale_method , width , height , crop ) :
2023-09-24 15:08:54 +00:00
if width == 0 and height == 0 :
s = image
else :
samples = image . movedim ( - 1 , 1 )
if width == 0 :
width = max ( 1 , round ( samples . shape [ 3 ] * height / samples . shape [ 2 ] ) )
elif height == 0 :
height = max ( 1 , round ( samples . shape [ 2 ] * width / samples . shape [ 3 ] ) )
s = comfy . utils . common_upscale ( samples , width , height , upscale_method , crop )
s = s . movedim ( 1 , - 1 )
2023-02-04 20:53:29 +00:00
return ( s , )
2023-01-03 06:53:32 +00:00
2023-06-12 05:14:04 +00:00
class ImageScaleBy :
2023-09-19 08:40:38 +00:00
upscale_methods = [ " nearest-exact " , " bilinear " , " area " , " bicubic " , " lanczos " ]
2023-06-12 05:14:04 +00:00
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " image " : ( " IMAGE " , ) , " upscale_method " : ( s . upscale_methods , ) ,
" scale_by " : ( " FLOAT " , { " default " : 1.0 , " min " : 0.01 , " max " : 8.0 , " step " : 0.01 } ) , } }
RETURN_TYPES = ( " IMAGE " , )
FUNCTION = " upscale "
CATEGORY = " image/upscaling "
def upscale ( self , image , upscale_method , scale_by ) :
samples = image . movedim ( - 1 , 1 )
width = round ( samples . shape [ 3 ] * scale_by )
height = round ( samples . shape [ 2 ] * scale_by )
s = comfy . utils . common_upscale ( samples , width , height , upscale_method , " disabled " )
s = s . movedim ( 1 , - 1 )
return ( s , )
2023-02-23 02:57:56 +00:00
class ImageInvert :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " image " : ( " IMAGE " , ) } }
RETURN_TYPES = ( " IMAGE " , )
FUNCTION = " invert "
CATEGORY = " image "
def invert ( self , image ) :
s = 1.0 - image
return ( s , )
2023-08-15 00:23:38 +00:00
class ImageBatch :
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " image1 " : ( " IMAGE " , ) , " image2 " : ( " IMAGE " , ) } }
RETURN_TYPES = ( " IMAGE " , )
FUNCTION = " batch "
CATEGORY = " image "
def batch ( self , image1 , image2 ) :
if image1 . shape [ 1 : ] != image2 . shape [ 1 : ] :
image2 = comfy . utils . common_upscale ( image2 . movedim ( - 1 , 1 ) , image1 . shape [ 2 ] , image1 . shape [ 1 ] , " bilinear " , " center " ) . movedim ( 1 , - 1 )
s = torch . cat ( ( image1 , image2 ) , dim = 0 )
return ( s , )
2023-02-23 02:57:56 +00:00
2023-08-15 21:53:10 +00:00
class EmptyImage :
def __init__ ( self , device = " cpu " ) :
self . device = device
@classmethod
def INPUT_TYPES ( s ) :
return { " required " : { " width " : ( " INT " , { " default " : 512 , " min " : 1 , " max " : MAX_RESOLUTION , " step " : 1 } ) ,
" height " : ( " INT " , { " default " : 512 , " min " : 1 , " max " : MAX_RESOLUTION , " step " : 1 } ) ,
2023-09-25 05:46:44 +00:00
" batch_size " : ( " INT " , { " default " : 1 , " min " : 1 , " max " : 4096 } ) ,
2023-08-15 21:53:10 +00:00
" color " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : 0xFFFFFF , " step " : 1 , " display " : " color " } ) ,
} }
RETURN_TYPES = ( " IMAGE " , )
FUNCTION = " generate "
CATEGORY = " image "
def generate ( self , width , height , batch_size = 1 , color = 0 ) :
r = torch . full ( [ batch_size , height , width , 1 ] , ( ( color >> 16 ) & 0xFF ) / 0xFF )
g = torch . full ( [ batch_size , height , width , 1 ] , ( ( color >> 8 ) & 0xFF ) / 0xFF )
b = torch . full ( [ batch_size , height , width , 1 ] , ( ( color ) & 0xFF ) / 0xFF )
return ( torch . cat ( ( r , g , b ) , dim = - 1 ) , )
2023-03-23 15:33:35 +00:00
class ImagePadForOutpaint :
@classmethod
def INPUT_TYPES ( s ) :
return {
" required " : {
" image " : ( " IMAGE " , ) ,
2023-05-02 18:16:27 +00:00
" left " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" top " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" right " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
" bottom " : ( " INT " , { " default " : 0 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 8 } ) ,
2023-03-25 08:49:58 +00:00
" feathering " : ( " INT " , { " default " : 40 , " min " : 0 , " max " : MAX_RESOLUTION , " step " : 1 } ) ,
2023-03-23 15:33:35 +00:00
}
}
RETURN_TYPES = ( " IMAGE " , " MASK " )
FUNCTION = " expand_image "
CATEGORY = " image "
2023-03-24 14:39:33 +00:00
def expand_image ( self , image , left , top , right , bottom , feathering ) :
2023-03-23 15:33:35 +00:00
d1 , d2 , d3 , d4 = image . size ( )
2024-01-11 08:15:27 +00:00
new_image = torch . ones (
2023-03-23 15:33:35 +00:00
( d1 , d2 + top + bottom , d3 + left + right , d4 ) ,
dtype = torch . float32 ,
2024-01-11 08:15:27 +00:00
) * 0.5
2023-03-23 15:33:35 +00:00
new_image [ : , top : top + d2 , left : left + d3 , : ] = image
mask = torch . ones (
( d2 + top + bottom , d3 + left + right ) ,
dtype = torch . float32 ,
)
2023-03-24 14:39:33 +00:00
2023-03-25 08:27:47 +00:00
t = torch . zeros (
( d2 , d3 ) ,
dtype = torch . float32
)
2023-03-24 14:39:33 +00:00
if feathering > 0 and feathering * 2 < d2 and feathering * 2 < d3 :
2023-03-25 08:27:47 +00:00
for i in range ( d2 ) :
for j in range ( d3 ) :
dt = i if top != 0 else d2
db = d2 - i if bottom != 0 else d2
dl = j if left != 0 else d3
dr = d3 - j if right != 0 else d3
d = min ( dt , db , dl , dr )
if d > = feathering :
continue
v = ( feathering - d ) / feathering
t [ i , j ] = v * v
mask [ top : top + d2 , left : left + d3 ] = t
2023-03-24 14:39:33 +00:00
2023-03-23 15:33:35 +00:00
return ( new_image , mask )
2023-01-03 06:53:32 +00:00
NODE_CLASS_MAPPINGS = {
" KSampler " : KSampler ,
2023-03-03 18:09:44 +00:00
" CheckpointLoaderSimple " : CheckpointLoaderSimple ,
2023-01-03 06:53:32 +00:00
" CLIPTextEncode " : CLIPTextEncode ,
2023-03-03 18:04:36 +00:00
" CLIPSetLastLayer " : CLIPSetLastLayer ,
2023-01-03 06:53:32 +00:00
" VAEDecode " : VAEDecode ,
" VAEEncode " : VAEEncode ,
2023-02-16 01:44:51 +00:00
" VAEEncodeForInpaint " : VAEEncodeForInpaint ,
2023-01-03 06:53:32 +00:00
" VAELoader " : VAELoader ,
" EmptyLatentImage " : EmptyLatentImage ,
" LatentUpscale " : LatentUpscale ,
2023-05-23 16:53:38 +00:00
" LatentUpscaleBy " : LatentUpscaleBy ,
2023-04-17 21:24:58 +00:00
" LatentFromBatch " : LatentFromBatch ,
2023-05-13 15:15:45 +00:00
" RepeatLatentBatch " : RepeatLatentBatch ,
2023-01-03 06:53:32 +00:00
" SaveImage " : SaveImage ,
2023-03-14 19:28:07 +00:00
" PreviewImage " : PreviewImage ,
2023-01-26 17:06:48 +00:00
" LoadImage " : LoadImage ,
2023-02-15 22:39:42 +00:00
" LoadImageMask " : LoadImageMask ,
2023-02-04 20:53:29 +00:00
" ImageScale " : ImageScale ,
2023-06-12 05:14:04 +00:00
" ImageScaleBy " : ImageScaleBy ,
2023-02-23 02:57:56 +00:00
" ImageInvert " : ImageInvert ,
2023-08-15 00:23:38 +00:00
" ImageBatch " : ImageBatch ,
2023-03-23 15:33:35 +00:00
" ImagePadForOutpaint " : ImagePadForOutpaint ,
2023-08-15 21:53:10 +00:00
" EmptyImage " : EmptyImage ,
2023-09-24 17:27:57 +00:00
" ConditioningAverage " : ConditioningAverage ,
2023-01-26 17:06:48 +00:00
" ConditioningCombine " : ConditioningCombine ,
2023-07-14 01:43:22 +00:00
" ConditioningConcat " : ConditioningConcat ,
2023-01-26 17:06:48 +00:00
" ConditioningSetArea " : ConditioningSetArea ,
2023-09-06 07:26:55 +00:00
" ConditioningSetAreaPercentage " : ConditioningSetAreaPercentage ,
2024-01-29 05:24:53 +00:00
" ConditioningSetAreaStrength " : ConditioningSetAreaStrength ,
2023-04-25 07:15:25 +00:00
" ConditioningSetMask " : ConditioningSetMask ,
2023-01-31 08:09:38 +00:00
" KSamplerAdvanced " : KSamplerAdvanced ,
2023-02-15 21:58:55 +00:00
" SetLatentNoiseMask " : SetLatentNoiseMask ,
2023-01-31 08:35:03 +00:00
" LatentComposite " : LatentComposite ,
2023-08-01 06:23:14 +00:00
" LatentBlend " : LatentBlend ,
2023-01-31 07:28:07 +00:00
" LatentRotate " : LatentRotate ,
2023-01-31 08:28:38 +00:00
" LatentFlip " : LatentFlip ,
2023-02-04 20:21:46 +00:00
" LatentCrop " : LatentCrop ,
2023-02-03 07:06:34 +00:00
" LoraLoader " : LoraLoader ,
2023-02-05 20:20:18 +00:00
" CLIPLoader " : CLIPLoader ,
2023-07-05 21:34:45 +00:00
" UNETLoader " : UNETLoader ,
2023-06-25 05:40:38 +00:00
" DualCLIPLoader " : DualCLIPLoader ,
2023-03-05 23:39:25 +00:00
" CLIPVisionEncode " : CLIPVisionEncode ,
2023-03-06 06:48:18 +00:00
" StyleModelApply " : StyleModelApply ,
2023-04-02 03:19:15 +00:00
" unCLIPConditioning " : unCLIPConditioning ,
2023-02-16 15:38:08 +00:00
" ControlNetApply " : ControlNetApply ,
2023-07-24 17:26:07 +00:00
" ControlNetApplyAdvanced " : ControlNetApplyAdvanced ,
2023-02-16 15:38:08 +00:00
" ControlNetLoader " : ControlNetLoader ,
2023-02-23 04:22:03 +00:00
" DiffControlNetLoader " : DiffControlNetLoader ,
2023-03-06 06:30:17 +00:00
" StyleModelLoader " : StyleModelLoader ,
" CLIPVisionLoader " : CLIPVisionLoader ,
2023-02-24 07:10:10 +00:00
" VAEDecodeTiled " : VAEDecodeTiled ,
2023-03-11 20:28:15 +00:00
" VAEEncodeTiled " : VAEEncodeTiled ,
2023-04-02 03:19:15 +00:00
" unCLIPCheckpointLoader " : unCLIPCheckpointLoader ,
2023-04-19 13:36:19 +00:00
" GLIGENLoader " : GLIGENLoader ,
" GLIGENTextBoxApply " : GLIGENTextBoxApply ,
2024-01-11 08:15:27 +00:00
" InpaintModelConditioning " : InpaintModelConditioning ,
2023-04-19 13:36:19 +00:00
2023-04-05 02:48:11 +00:00
" CheckpointLoader " : CheckpointLoader ,
2023-04-06 06:57:31 +00:00
" DiffusersLoader " : DiffusersLoader ,
2023-05-18 03:40:28 +00:00
" LoadLatent " : LoadLatent ,
2023-06-22 17:03:50 +00:00
" SaveLatent " : SaveLatent ,
2023-06-28 03:30:52 +00:00
" ConditioningZeroOut " : ConditioningZeroOut ,
2023-07-24 13:25:02 +00:00
" ConditioningSetTimestepRange " : ConditioningSetTimestepRange ,
2023-11-25 07:26:50 +00:00
" LoraLoaderModelOnly " : LoraLoaderModelOnly ,
2023-01-03 06:53:32 +00:00
}
2023-03-30 21:13:58 +00:00
NODE_DISPLAY_NAME_MAPPINGS = {
# Sampling
" KSampler " : " KSampler " ,
" KSamplerAdvanced " : " KSampler (Advanced) " ,
# Loaders
2023-10-15 06:22:22 +00:00
" CheckpointLoader " : " Load Checkpoint With Config (DEPRECATED) " ,
2023-04-08 19:53:01 +00:00
" CheckpointLoaderSimple " : " Load Checkpoint " ,
2023-03-30 21:13:58 +00:00
" VAELoader " : " Load VAE " ,
" LoraLoader " : " Load LoRA " ,
" CLIPLoader " : " Load CLIP " ,
" ControlNetLoader " : " Load ControlNet Model " ,
" DiffControlNetLoader " : " Load ControlNet Model (diff) " ,
" StyleModelLoader " : " Load Style Model " ,
" CLIPVisionLoader " : " Load CLIP Vision " ,
" UpscaleModelLoader " : " Load Upscale Model " ,
2024-08-01 17:33:30 +00:00
" UNETLoader " : " Load Diffusion Model " ,
2023-03-30 21:13:58 +00:00
# Conditioning
" CLIPVisionEncode " : " CLIP Vision Encode " ,
" StyleModelApply " : " Apply Style Model " ,
" CLIPTextEncode " : " CLIP Text Encode (Prompt) " ,
" CLIPSetLastLayer " : " CLIP Set Last Layer " ,
" ConditioningCombine " : " Conditioning (Combine) " ,
2023-04-30 21:33:15 +00:00
" ConditioningAverage " : " Conditioning (Average) " ,
2023-07-14 01:43:22 +00:00
" ConditioningConcat " : " Conditioning (Concat) " ,
2023-03-30 21:13:58 +00:00
" ConditioningSetArea " : " Conditioning (Set Area) " ,
2023-09-06 07:26:55 +00:00
" ConditioningSetAreaPercentage " : " Conditioning (Set Area with Percentage) " ,
2023-04-25 07:15:25 +00:00
" ConditioningSetMask " : " Conditioning (Set Mask) " ,
2023-03-30 21:13:58 +00:00
" ControlNetApply " : " Apply ControlNet " ,
2023-07-24 17:26:07 +00:00
" ControlNetApplyAdvanced " : " Apply ControlNet (Advanced) " ,
2023-03-30 21:13:58 +00:00
# Latent
" VAEEncodeForInpaint " : " VAE Encode (for Inpainting) " ,
" SetLatentNoiseMask " : " Set Latent Noise Mask " ,
" VAEDecode " : " VAE Decode " ,
" VAEEncode " : " VAE Encode " ,
" LatentRotate " : " Rotate Latent " ,
" LatentFlip " : " Flip Latent " ,
" LatentCrop " : " Crop Latent " ,
" EmptyLatentImage " : " Empty Latent Image " ,
" LatentUpscale " : " Upscale Latent " ,
2023-05-23 16:53:38 +00:00
" LatentUpscaleBy " : " Upscale Latent By " ,
2023-03-30 21:13:58 +00:00
" LatentComposite " : " Latent Composite " ,
2023-08-01 06:23:14 +00:00
" LatentBlend " : " Latent Blend " ,
2023-05-13 15:15:45 +00:00
" LatentFromBatch " : " Latent From Batch " ,
" RepeatLatentBatch " : " Repeat Latent Batch " ,
2023-03-30 21:13:58 +00:00
# Image
" SaveImage " : " Save Image " ,
" PreviewImage " : " Preview Image " ,
" LoadImage " : " Load Image " ,
" LoadImageMask " : " Load Image (as Mask) " ,
" ImageScale " : " Upscale Image " ,
2023-06-12 05:14:04 +00:00
" ImageScaleBy " : " Upscale Image By " ,
2023-03-30 21:13:58 +00:00
" ImageUpscaleWithModel " : " Upscale Image (using Model) " ,
" ImageInvert " : " Invert Image " ,
" ImagePadForOutpaint " : " Pad Image for Outpainting " ,
2023-08-15 00:23:38 +00:00
" ImageBatch " : " Batch Images " ,
2023-03-30 21:13:58 +00:00
# _for_testing
" VAEDecodeTiled " : " VAE Decode (Tiled) " ,
" VAEEncodeTiled " : " VAE Encode (Tiled) " ,
}
2023-08-20 18:55:48 +00:00
EXTENSION_WEB_DIRS = { }
2024-07-05 00:49:07 +00:00
2024-07-16 00:36:03 +00:00
def get_module_name ( module_path : str ) - > str :
2024-07-09 21:07:15 +00:00
"""
Returns the module name based on the given module path .
Examples :
2024-07-16 00:36:03 +00:00
get_module_name ( " C:/Users/username/ComfyUI/custom_nodes/my_custom_node.py " ) - > " my_custom_node "
get_module_name ( " C:/Users/username/ComfyUI/custom_nodes/my_custom_node " ) - > " my_custom_node "
get_module_name ( " C:/Users/username/ComfyUI/custom_nodes/my_custom_node/ " ) - > " my_custom_node "
get_module_name ( " C:/Users/username/ComfyUI/custom_nodes/my_custom_node/__init__.py " ) - > " my_custom_node "
get_module_name ( " C:/Users/username/ComfyUI/custom_nodes/my_custom_node/__init__ " ) - > " my_custom_node "
get_module_name ( " C:/Users/username/ComfyUI/custom_nodes/my_custom_node/__init__/ " ) - > " my_custom_node "
get_module_name ( " C:/Users/username/ComfyUI/custom_nodes/my_custom_node.disabled " ) - > " custom_nodes
2024-07-09 21:07:15 +00:00
Args :
module_path ( str ) : The path of the module .
Returns :
str : The module name .
"""
2024-07-16 00:36:03 +00:00
base_path = os . path . basename ( module_path )
2024-07-09 21:07:15 +00:00
if os . path . isfile ( module_path ) :
2024-07-16 00:36:03 +00:00
base_path = os . path . splitext ( base_path ) [ 0 ]
return base_path
2024-07-09 21:07:15 +00:00
2024-07-16 00:36:03 +00:00
def load_custom_node ( module_path : str , ignore = set ( ) , module_parent = " custom_nodes " ) - > bool :
2024-07-05 01:09:05 +00:00
module_name = os . path . basename ( module_path )
2024-07-05 01:49:50 +00:00
if os . path . isfile ( module_path ) :
sp = os . path . splitext ( module_path )
module_name = sp [ 0 ]
2023-03-11 17:49:41 +00:00
try :
2024-03-30 15:52:11 +00:00
logging . debug ( " Trying to load custom node {} " . format ( module_path ) )
2023-03-11 17:49:41 +00:00
if os . path . isfile ( module_path ) :
module_spec = importlib . util . spec_from_file_location ( module_name , module_path )
2023-08-20 18:55:48 +00:00
module_dir = os . path . split ( module_path ) [ 0 ]
2023-03-11 17:49:41 +00:00
else :
module_spec = importlib . util . spec_from_file_location ( module_name , os . path . join ( module_path , " __init__.py " ) )
2023-08-20 18:55:48 +00:00
module_dir = module_path
2023-03-11 17:49:41 +00:00
module = importlib . util . module_from_spec ( module_spec )
sys . modules [ module_name ] = module
module_spec . loader . exec_module ( module )
2023-08-20 18:55:48 +00:00
if hasattr ( module , " WEB_DIRECTORY " ) and getattr ( module , " WEB_DIRECTORY " ) is not None :
web_dir = os . path . abspath ( os . path . join ( module_dir , getattr ( module , " WEB_DIRECTORY " ) ) )
if os . path . isdir ( web_dir ) :
EXTENSION_WEB_DIRS [ module_name ] = web_dir
2023-03-11 17:49:41 +00:00
if hasattr ( module , " NODE_CLASS_MAPPINGS " ) and getattr ( module , " NODE_CLASS_MAPPINGS " ) is not None :
2024-07-09 21:07:15 +00:00
for name , node_cls in module . NODE_CLASS_MAPPINGS . items ( ) :
2023-07-13 16:52:42 +00:00
if name not in ignore :
2024-07-09 21:07:15 +00:00
NODE_CLASS_MAPPINGS [ name ] = node_cls
2024-07-16 00:36:03 +00:00
node_cls . RELATIVE_PYTHON_MODULE = " {} . {} " . format ( module_parent , get_module_name ( module_path ) )
2023-03-31 05:05:17 +00:00
if hasattr ( module , " NODE_DISPLAY_NAME_MAPPINGS " ) and getattr ( module , " NODE_DISPLAY_NAME_MAPPINGS " ) is not None :
NODE_DISPLAY_NAME_MAPPINGS . update ( module . NODE_DISPLAY_NAME_MAPPINGS )
2023-05-13 17:23:42 +00:00
return True
2023-03-11 17:49:41 +00:00
else :
2024-03-11 04:56:41 +00:00
logging . warning ( f " Skip { module_path } module for custom nodes due to the lack of NODE_CLASS_MAPPINGS. " )
2023-05-13 17:23:42 +00:00
return False
2023-03-11 17:49:41 +00:00
except Exception as e :
2024-03-11 04:56:41 +00:00
logging . warning ( traceback . format_exc ( ) )
2024-03-11 20:24:47 +00:00
logging . warning ( f " Cannot import { module_path } module for custom nodes: { e } " )
2023-05-13 17:23:42 +00:00
return False
2023-03-11 17:49:41 +00:00
2024-07-01 21:54:03 +00:00
def init_external_custom_nodes ( ) :
"""
Initializes the external custom nodes .
This function loads custom nodes from the specified folder paths and imports them into the application .
It measures the import times for each custom node and logs the results .
Returns :
None
"""
2023-07-13 16:52:42 +00:00
base_node_names = set ( NODE_CLASS_MAPPINGS . keys ( ) )
2023-04-16 05:36:15 +00:00
node_paths = folder_paths . get_folder_paths ( " custom_nodes " )
2023-05-13 15:54:45 +00:00
node_import_times = [ ]
2023-04-16 05:36:15 +00:00
for custom_node_path in node_paths :
2023-11-23 21:24:58 +00:00
possible_modules = os . listdir ( os . path . realpath ( custom_node_path ) )
2023-04-16 05:36:15 +00:00
if " __pycache__ " in possible_modules :
possible_modules . remove ( " __pycache__ " )
for possible_module in possible_modules :
module_path = os . path . join ( custom_node_path , possible_module )
if os . path . isfile ( module_path ) and os . path . splitext ( module_path ) [ 1 ] != " .py " : continue
2023-05-15 07:31:03 +00:00
if module_path . endswith ( " .disabled " ) : continue
2023-05-13 19:31:22 +00:00
time_before = time . perf_counter ( )
2024-07-16 00:36:03 +00:00
success = load_custom_node ( module_path , base_node_names , module_parent = " custom_nodes " )
2023-05-13 19:31:22 +00:00
node_import_times . append ( ( time . perf_counter ( ) - time_before , module_path , success ) )
2023-05-13 15:54:45 +00:00
2023-05-13 17:15:31 +00:00
if len ( node_import_times ) > 0 :
2024-03-11 17:54:56 +00:00
logging . info ( " \n Import times for custom nodes: " )
2023-05-13 17:15:31 +00:00
for n in sorted ( node_import_times ) :
2023-05-13 17:23:42 +00:00
if n [ 2 ] :
import_message = " "
else :
import_message = " (IMPORT FAILED) "
2024-03-11 17:54:56 +00:00
logging . info ( " {:6.1f} seconds {} : {} " . format ( n [ 0 ] , import_message , n [ 1 ] ) )
logging . info ( " " )
2023-02-17 16:19:49 +00:00
2024-07-01 21:54:03 +00:00
def init_builtin_extra_nodes ( ) :
"""
Initializes the built - in extra nodes in ComfyUI .
This function loads the extra node files located in the " comfy_extras " directory and imports them into ComfyUI .
If any of the extra node files fail to import , a warning message is logged .
Returns :
None
"""
2023-10-02 21:26:59 +00:00
extras_dir = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , " comfy_extras " )
extras_files = [
" nodes_latent.py " ,
" nodes_hypernetwork.py " ,
" nodes_upscale_model.py " ,
" nodes_post_processing.py " ,
" nodes_mask.py " ,
2023-09-22 21:03:22 +00:00
" nodes_compositing.py " ,
2023-10-02 21:26:59 +00:00
" nodes_rebatch.py " ,
" nodes_model_merging.py " ,
" nodes_tomesd.py " ,
" nodes_clip_sdxl.py " ,
" nodes_canny.py " ,
" nodes_freelunch.py " ,
2023-10-21 09:16:38 +00:00
" nodes_custom_sampler.py " ,
" nodes_hypertile.py " ,
2023-11-07 08:28:53 +00:00
" nodes_model_advanced.py " ,
2023-11-16 18:23:25 +00:00
" nodes_model_downscale.py " ,
2023-11-18 09:44:17 +00:00
" nodes_images.py " ,
2023-11-24 00:43:09 +00:00
" nodes_video_model.py " ,
2023-12-13 20:52:11 +00:00
" nodes_sag.py " ,
2023-12-15 18:58:16 +00:00
" nodes_perpneg.py " ,
2023-12-18 08:18:40 +00:00
" nodes_stable3d.py " ,
2024-01-03 08:30:39 +00:00
" nodes_sdupscale.py " ,
2024-01-24 14:49:57 +00:00
" nodes_photomaker.py " ,
2024-02-10 13:27:05 +00:00
" nodes_cond.py " ,
2024-03-04 17:50:28 +00:00
" nodes_morphology.py " ,
2024-02-16 17:56:11 +00:00
" nodes_stable_cascade.py " ,
2024-03-03 20:34:13 +00:00
" nodes_differential_diffusion.py " ,
2024-04-04 19:06:17 +00:00
" nodes_ip2p.py " ,
2024-04-09 08:25:45 +00:00
" nodes_model_merging_model_specific.py " ,
2024-04-15 03:34:25 +00:00
" nodes_pag.py " ,
2024-04-20 08:31:49 +00:00
" nodes_align_your_steps.py " ,
2024-04-28 16:50:22 +00:00
" nodes_attention_multiply.py " ,
2024-04-30 00:00:47 +00:00
" nodes_advanced_samplers.py " ,
2024-05-17 17:16:08 +00:00
" nodes_webcam.py " ,
2024-06-15 16:14:56 +00:00
" nodes_audio.py " ,
2024-06-10 17:26:25 +00:00
" nodes_sd3.py " ,
2024-06-20 12:12:15 +00:00
" nodes_gits.py " ,
2024-07-16 21:01:40 +00:00
" nodes_controlnet.py " ,
2024-07-26 17:04:48 +00:00
" nodes_hunyuan.py " ,
2024-08-01 22:53:25 +00:00
" nodes_flux.py " ,
2023-10-02 21:26:59 +00:00
]
2024-03-04 18:24:08 +00:00
import_failed = [ ]
2023-10-02 21:26:59 +00:00
for node_file in extras_files :
2024-07-16 00:36:03 +00:00
if not load_custom_node ( os . path . join ( extras_dir , node_file ) , module_parent = " comfy_extras " ) :
2024-03-04 18:24:08 +00:00
import_failed . append ( node_file )
2023-10-02 21:26:59 +00:00
2024-07-05 01:43:23 +00:00
return import_failed
def init_extra_nodes ( init_custom_nodes = True ) :
2024-07-05 01:49:50 +00:00
import_failed = init_builtin_extra_nodes ( )
2024-07-05 01:43:23 +00:00
if init_custom_nodes :
init_external_custom_nodes ( )
else :
logging . info ( " Skipping loading of custom nodes " )
2024-03-04 18:24:08 +00:00
if len ( import_failed ) > 0 :
2024-03-11 04:56:41 +00:00
logging . warning ( " WARNING: some comfy_extras/ nodes did not import correctly. This may be because they are missing some dependencies. \n " )
2024-03-04 18:24:08 +00:00
for node in import_failed :
2024-03-11 04:56:41 +00:00
logging . warning ( " IMPORT FAILED: {} " . format ( node ) )
logging . warning ( " \n This issue might be caused by new missing dependencies added the last time you updated ComfyUI. " )
2024-03-04 18:24:08 +00:00
if args . windows_standalone_build :
2024-03-11 04:56:41 +00:00
logging . warning ( " Please run the update script: update/update_comfyui.bat " )
2024-03-04 18:24:08 +00:00
else :
2024-03-11 04:56:41 +00:00
logging . warning ( " Please do a: pip install -r requirements.txt " )
logging . warning ( " " )