Tiled upscaling with the upscale models.

This commit is contained in:
comfyanonymous 2023-03-11 14:04:13 -05:00
parent 905857edd8
commit 9db2e97b47
3 changed files with 28 additions and 25 deletions

View File

@ -386,30 +386,8 @@ class VAE:
def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 8):
model_management.unload_model()
output = torch.empty((samples.shape[0], 3, samples.shape[2] * 8, samples.shape[3] * 8), device="cpu")
self.first_stage_model = self.first_stage_model.to(self.device)
for b in range(samples.shape[0]):
s = samples[b:b+1]
out = torch.zeros((s.shape[0], 3, s.shape[2] * 8, s.shape[3] * 8), device="cpu")
out_div = torch.zeros((s.shape[0], 3, s.shape[2] * 8, s.shape[3] * 8), device="cpu")
for y in range(0, s.shape[2], tile_y - overlap):
for x in range(0, s.shape[3], tile_x - overlap):
s_in = s[:,:,y:y+tile_y,x:x+tile_x]
pixel_samples = self.first_stage_model.decode(1. / self.scale_factor * s_in.to(self.device))
pixel_samples = torch.clamp((pixel_samples + 1.0) / 2.0, min=0.0, max=1.0)
ps = pixel_samples.cpu()
mask = torch.ones_like(ps)
feather = overlap * 8
for t in range(feather):
mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))
mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
out[:,:,y*8:(y+tile_y)*8,x*8:(x+tile_x)*8] += ps * mask
out_div[:,:,y*8:(y+tile_y)*8,x*8:(x+tile_x)*8] += mask
output[b:b+1] = out/out_div
output = utils.tiled_scale(samples, lambda a: torch.clamp((self.first_stage_model.decode(1. / self.scale_factor * a.to(self.device)) + 1.0) / 2.0, min=0.0, max=1.0), tile_x, tile_y, overlap, upscale_amount = 8)
self.first_stage_model = self.first_stage_model.cpu()
return output.movedim(1,-1)

View File

@ -16,3 +16,28 @@ def common_upscale(samples, width, height, upscale_method, crop):
else:
s = samples
return torch.nn.functional.interpolate(s, size=(height, width), mode=upscale_method)
@torch.inference_mode()
def tiled_scale(samples, function, tile_x=64, tile_y=64, overlap = 8, upscale_amount = 4):
output = torch.empty((samples.shape[0], 3, samples.shape[2] * upscale_amount, samples.shape[3] * upscale_amount), device="cpu")
for b in range(samples.shape[0]):
s = samples[b:b+1]
out = torch.zeros((s.shape[0], 3, s.shape[2] * upscale_amount, s.shape[3] * upscale_amount), device="cpu")
out_div = torch.zeros((s.shape[0], 3, s.shape[2] * upscale_amount, s.shape[3] * upscale_amount), device="cpu")
for y in range(0, s.shape[2], tile_y - overlap):
for x in range(0, s.shape[3], tile_x - overlap):
s_in = s[:,:,y:y+tile_y,x:x+tile_x]
ps = function(s_in).cpu()
mask = torch.ones_like(ps)
feather = overlap * upscale_amount
for t in range(feather):
mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1))
mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1))
mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1))
mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1))
out[:,:,y*upscale_amount:(y+tile_y)*upscale_amount,x*upscale_amount:(x+tile_x)*upscale_amount] += ps * mask
out_div[:,:,y*upscale_amount:(y+tile_y)*upscale_amount,x*upscale_amount:(x+tile_x)*upscale_amount] += mask
output[b:b+1] = out/out_div
return output

View File

@ -4,6 +4,7 @@ from comfy.sd import load_torch_file
import comfy.model_management
from nodes import filter_files_extensions, recursive_search, supported_ckpt_extensions
import torch
import comfy.utils
class UpscaleModelLoader:
models_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "models")
@ -40,8 +41,7 @@ class ImageUpscaleWithModel:
device = comfy.model_management.get_torch_device()
upscale_model.to(device)
in_img = image.movedim(-1,-3).to(device)
with torch.inference_mode():
s = upscale_model(in_img).cpu()
s = comfy.utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=128 + 64, tile_y=128 + 64, overlap = 8, upscale_amount=upscale_model.scale)
upscale_model.cpu()
s = torch.clamp(s.movedim(-3,-1), min=0, max=1.0)
return (s,)