Add latent2rgb preview
This commit is contained in:
parent
70d72c4336
commit
d5a28fadaa
|
@ -44,10 +44,11 @@ parser.add_argument("--dont-upcast-attention", action="store_true", help="Disabl
|
|||
parser.add_argument("--force-fp32", action="store_true", help="Force fp32 (If this makes your GPU work better please report it).")
|
||||
parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.")
|
||||
|
||||
class PreviewType(enum.Enum):
|
||||
class LatentPreviewType(enum.Enum):
|
||||
Latent2RGB = "latent2rgb"
|
||||
TAESD = "taesd"
|
||||
parser.add_argument("--disable-previews", action="store_true", help="Disable showing node previews.")
|
||||
parser.add_argument("--default-preview-method", type=str, default=PreviewType.TAESD, metavar="PREVIEW_TYPE", help="Default preview method for sampler nodes.")
|
||||
parser.add_argument("--default-preview-method", type=str, default=LatentPreviewType.Latent2RGB, metavar="PREVIEW_TYPE", help="Default preview method for sampler nodes.")
|
||||
|
||||
attn_group = parser.add_mutually_exclusive_group()
|
||||
attn_group.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.")
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import torch
|
||||
import math
|
||||
import struct
|
||||
import comfy.model_management
|
||||
|
||||
def load_torch_file(ckpt, safe_load=False):
|
||||
if ckpt.lower().endswith(".safetensors"):
|
||||
|
@ -166,6 +167,8 @@ def tiled_scale(samples, function, tile_x=64, tile_y=64, overlap = 8, upscale_am
|
|||
out_div = torch.zeros((s.shape[0], out_channels, round(s.shape[2] * upscale_amount), round(s.shape[3] * upscale_amount)), device="cpu")
|
||||
for y in range(0, s.shape[2], tile_y - overlap):
|
||||
for x in range(0, s.shape[3], tile_x - overlap):
|
||||
comfy.model_management.throw_exception_if_processing_interrupted()
|
||||
|
||||
s_in = s[:,:,y:y+tile_y,x:x+tile_x]
|
||||
|
||||
ps = function(s_in).cpu()
|
||||
|
|
61
nodes.py
61
nodes.py
|
@ -24,7 +24,7 @@ import comfy.samplers
|
|||
import comfy.sample
|
||||
import comfy.sd
|
||||
import comfy.utils
|
||||
from comfy.cli_args import args
|
||||
from comfy.cli_args import args, LatentPreviewType
|
||||
from comfy.taesd.taesd import TAESD
|
||||
|
||||
import comfy.clip_vision
|
||||
|
@ -40,6 +40,27 @@ class LatentPreviewer:
|
|||
pass
|
||||
|
||||
|
||||
class Latent2RGBPreviewer(LatentPreviewer):
|
||||
def __init__(self):
|
||||
self.latent_rgb_factors = torch.tensor([
|
||||
# R G B
|
||||
[0.298, 0.207, 0.208], # L1
|
||||
[0.187, 0.286, 0.173], # L2
|
||||
[-0.158, 0.189, 0.264], # L3
|
||||
[-0.184, -0.271, -0.473], # L4
|
||||
], device="cpu")
|
||||
|
||||
def decode_latent_to_preview(self, device, x0):
|
||||
latent_image = x0[0].permute(1, 2, 0).cpu() @ self.latent_rgb_factors
|
||||
|
||||
latents_ubyte = (((latent_image + 1) / 2)
|
||||
.clamp(0, 1) # change scale from -1..1 to 0..1
|
||||
.mul(0xFF) # to 0..255
|
||||
.byte()).cpu()
|
||||
|
||||
return Image.fromarray(latents_ubyte.numpy())
|
||||
|
||||
|
||||
def before_node_execution():
|
||||
comfy.model_management.throw_exception_if_processing_interrupted()
|
||||
|
||||
|
@ -266,7 +287,13 @@ class TAESDPreviewerImpl(LatentPreviewer):
|
|||
x_sample = self.taesd.decoder(x0.to(device))[0].detach()
|
||||
# x_sample = self.taesd.unscale_latents(x_sample).div(4).add(0.5) # returns value in [-2, 2]
|
||||
x_sample = x_sample.sub(0.5).mul(2)
|
||||
return x_sample
|
||||
|
||||
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
|
||||
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
|
||||
x_sample = x_sample.astype(np.uint8)
|
||||
|
||||
preview_image = Image.fromarray(x_sample)
|
||||
return preview_image
|
||||
|
||||
class SaveLatent:
|
||||
def __init__(self):
|
||||
|
@ -952,16 +979,8 @@ class SetLatentNoiseMask:
|
|||
|
||||
|
||||
def decode_latent_to_preview_image(previewer, device, preview_format, x0):
|
||||
x_sample = previewer.decode_latent_to_preview(device, x0)
|
||||
|
||||
x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0)
|
||||
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
|
||||
x_sample = x_sample.astype(np.uint8)
|
||||
|
||||
preview_image = Image.fromarray(x_sample)
|
||||
|
||||
if preview_image.size[0] > MAX_PREVIEW_RESOLUTION or preview_image.size[1] > MAX_PREVIEW_RESOLUTION:
|
||||
preview_image.thumbnail((MAX_PREVIEW_RESOLUTION, MAX_PREVIEW_RESOLUTION), Image.ANTIALIAS)
|
||||
preview_image = previewer.decode_latent_to_preview(device, x0)
|
||||
preview_image = ImageOps.contain(preview_image, (MAX_PREVIEW_RESOLUTION, MAX_PREVIEW_RESOLUTION), Image.ANTIALIAS)
|
||||
|
||||
preview_type = 1
|
||||
if preview_format == "JPEG":
|
||||
|
@ -999,13 +1018,17 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
|
|||
previewer = None
|
||||
if not args.disable_previews:
|
||||
# TODO previewer methods
|
||||
encoder_path = folder_paths.get_full_path("taesd", "taesd_encoder.pth")
|
||||
decoder_path = folder_paths.get_full_path("taesd", "taesd_decoder.pth")
|
||||
if encoder_path and decoder_path:
|
||||
taesd = TAESD(encoder_path, decoder_path).to(device)
|
||||
previewer = TAESDPreviewerImpl(taesd)
|
||||
else:
|
||||
print("Warning: TAESD previews enabled, but could not find models/taesd/taesd_encoder.pth and models/taesd/taesd_decoder.pth")
|
||||
if args.default_preview_method == LatentPreviewType.TAESD:
|
||||
encoder_path = folder_paths.get_full_path("taesd", "taesd_encoder.pth")
|
||||
decoder_path = folder_paths.get_full_path("taesd", "taesd_decoder.pth")
|
||||
if encoder_path and decoder_path:
|
||||
taesd = TAESD(encoder_path, decoder_path).to(device)
|
||||
previewer = TAESDPreviewerImpl(taesd)
|
||||
else:
|
||||
print("Warning: TAESD previews enabled, but could not find models/taesd/taesd_encoder.pth and models/taesd/taesd_decoder.pth")
|
||||
|
||||
if previewer is None:
|
||||
previewer = Latent2RGBPreviewer()
|
||||
|
||||
pbar = comfy.utils.ProgressBar(steps)
|
||||
def callback(step, x0, x, total_steps):
|
||||
|
|
Loading…
Reference in New Issue