2023-11-04 05:32:23 +00:00
|
|
|
import torch
|
|
|
|
from comfy.ldm.modules.diffusionmodules.util import make_beta_schedule
|
2023-11-24 00:41:33 +00:00
|
|
|
import math
|
2023-11-04 05:32:23 +00:00
|
|
|
|
2024-11-11 09:55:56 +00:00
|
|
|
def rescale_zero_terminal_snr_sigmas(sigmas):
|
|
|
|
alphas_cumprod = 1 / ((sigmas * sigmas) + 1)
|
|
|
|
alphas_bar_sqrt = alphas_cumprod.sqrt()
|
|
|
|
|
|
|
|
# Store old values.
|
|
|
|
alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
|
|
|
|
alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
|
|
|
|
|
|
|
|
# Shift so the last timestep is zero.
|
|
|
|
alphas_bar_sqrt -= (alphas_bar_sqrt_T)
|
|
|
|
|
|
|
|
# Scale so the first timestep is back to the old value.
|
|
|
|
alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
|
|
|
|
|
|
|
|
# Convert alphas_bar_sqrt to betas
|
|
|
|
alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
|
|
|
|
alphas_bar[-1] = 4.8973451890853435e-08
|
|
|
|
return ((1 - alphas_bar) / alphas_bar) ** 0.5
|
|
|
|
|
2023-11-04 05:32:23 +00:00
|
|
|
class EPS:
|
|
|
|
def calculate_input(self, sigma, noise):
|
|
|
|
sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
|
|
|
|
return noise / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
|
|
|
|
|
|
|
def calculate_denoised(self, sigma, model_output, model_input):
|
|
|
|
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
|
|
|
return model_input - model_output * sigma
|
|
|
|
|
2024-03-01 17:54:38 +00:00
|
|
|
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
|
|
|
|
if max_denoise:
|
|
|
|
noise = noise * torch.sqrt(1.0 + sigma ** 2.0)
|
|
|
|
else:
|
|
|
|
noise = noise * sigma
|
2024-03-04 13:51:25 +00:00
|
|
|
|
|
|
|
noise += latent_image
|
2024-03-01 17:54:38 +00:00
|
|
|
return noise
|
2023-11-04 05:32:23 +00:00
|
|
|
|
2024-03-21 18:49:11 +00:00
|
|
|
def inverse_noise_scaling(self, sigma, latent):
|
|
|
|
return latent
|
|
|
|
|
2023-11-04 05:32:23 +00:00
|
|
|
class V_PREDICTION(EPS):
|
|
|
|
def calculate_denoised(self, sigma, model_output, model_input):
|
|
|
|
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
|
|
|
return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
|
|
|
|
2024-02-27 20:12:33 +00:00
|
|
|
class EDM(V_PREDICTION):
|
|
|
|
def calculate_denoised(self, sigma, model_output, model_input):
|
|
|
|
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
|
|
|
return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) + model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
|
|
|
|
|
2024-06-10 17:26:25 +00:00
|
|
|
class CONST:
|
|
|
|
def calculate_input(self, sigma, noise):
|
|
|
|
return noise
|
|
|
|
|
|
|
|
def calculate_denoised(self, sigma, model_output, model_input):
|
|
|
|
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
|
|
|
return model_input - model_output * sigma
|
|
|
|
|
|
|
|
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
|
|
|
|
return sigma * noise + (1.0 - sigma) * latent_image
|
|
|
|
|
|
|
|
def inverse_noise_scaling(self, sigma, latent):
|
|
|
|
return latent / (1.0 - sigma)
|
2023-11-04 05:32:23 +00:00
|
|
|
|
|
|
|
class ModelSamplingDiscrete(torch.nn.Module):
|
2024-11-11 09:55:56 +00:00
|
|
|
def __init__(self, model_config=None, zsnr=None):
|
2023-11-04 05:32:23 +00:00
|
|
|
super().__init__()
|
2023-12-08 07:49:30 +00:00
|
|
|
|
2023-11-04 05:32:23 +00:00
|
|
|
if model_config is not None:
|
2023-12-08 07:49:30 +00:00
|
|
|
sampling_settings = model_config.sampling_settings
|
|
|
|
else:
|
|
|
|
sampling_settings = {}
|
|
|
|
|
|
|
|
beta_schedule = sampling_settings.get("beta_schedule", "linear")
|
|
|
|
linear_start = sampling_settings.get("linear_start", 0.00085)
|
|
|
|
linear_end = sampling_settings.get("linear_end", 0.012)
|
2024-07-16 19:18:24 +00:00
|
|
|
timesteps = sampling_settings.get("timesteps", 1000)
|
2023-12-08 07:49:30 +00:00
|
|
|
|
2024-11-11 09:55:56 +00:00
|
|
|
if zsnr is None:
|
|
|
|
zsnr = sampling_settings.get("zsnr", False)
|
|
|
|
|
|
|
|
self._register_schedule(given_betas=None, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=8e-3, zsnr=zsnr)
|
2023-11-04 05:32:23 +00:00
|
|
|
self.sigma_data = 1.0
|
|
|
|
|
|
|
|
def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
|
2024-11-11 09:55:56 +00:00
|
|
|
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, zsnr=False):
|
2023-11-04 05:32:23 +00:00
|
|
|
if given_betas is not None:
|
|
|
|
betas = given_betas
|
|
|
|
else:
|
|
|
|
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
|
|
|
|
alphas = 1. - betas
|
2024-02-07 23:52:51 +00:00
|
|
|
alphas_cumprod = torch.cumprod(alphas, dim=0)
|
2023-11-04 05:32:23 +00:00
|
|
|
|
|
|
|
timesteps, = betas.shape
|
|
|
|
self.num_timesteps = int(timesteps)
|
|
|
|
self.linear_start = linear_start
|
|
|
|
self.linear_end = linear_end
|
|
|
|
|
|
|
|
# self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32))
|
|
|
|
# self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32))
|
|
|
|
# self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32))
|
|
|
|
|
|
|
|
sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5
|
2024-11-11 09:55:56 +00:00
|
|
|
if zsnr:
|
|
|
|
sigmas = rescale_zero_terminal_snr_sigmas(sigmas)
|
|
|
|
|
2023-11-07 08:28:53 +00:00
|
|
|
self.set_sigmas(sigmas)
|
2023-11-04 05:32:23 +00:00
|
|
|
|
2023-11-07 08:28:53 +00:00
|
|
|
def set_sigmas(self, sigmas):
|
2024-02-07 23:52:51 +00:00
|
|
|
self.register_buffer('sigmas', sigmas.float())
|
|
|
|
self.register_buffer('log_sigmas', sigmas.log().float())
|
2023-11-04 05:32:23 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def sigma_min(self):
|
|
|
|
return self.sigmas[0]
|
|
|
|
|
|
|
|
@property
|
|
|
|
def sigma_max(self):
|
|
|
|
return self.sigmas[-1]
|
|
|
|
|
|
|
|
def timestep(self, sigma):
|
|
|
|
log_sigma = sigma.log()
|
|
|
|
dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None]
|
2023-11-27 21:41:33 +00:00
|
|
|
return dists.abs().argmin(dim=0).view(sigma.shape).to(sigma.device)
|
2023-11-04 05:32:23 +00:00
|
|
|
|
|
|
|
def sigma(self, timestep):
|
2023-11-27 21:41:33 +00:00
|
|
|
t = torch.clamp(timestep.float().to(self.log_sigmas.device), min=0, max=(len(self.sigmas) - 1))
|
2023-11-04 05:32:23 +00:00
|
|
|
low_idx = t.floor().long()
|
|
|
|
high_idx = t.ceil().long()
|
|
|
|
w = t.frac()
|
|
|
|
log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx]
|
2023-11-27 21:41:33 +00:00
|
|
|
return log_sigma.exp().to(timestep.device)
|
2023-11-04 05:32:23 +00:00
|
|
|
|
|
|
|
def percent_to_sigma(self, percent):
|
2023-11-16 09:07:35 +00:00
|
|
|
if percent <= 0.0:
|
2023-11-19 04:20:29 +00:00
|
|
|
return 999999999.9
|
2023-11-16 09:07:35 +00:00
|
|
|
if percent >= 1.0:
|
2023-11-19 04:20:29 +00:00
|
|
|
return 0.0
|
2023-11-16 09:07:35 +00:00
|
|
|
percent = 1.0 - percent
|
2023-11-19 04:20:29 +00:00
|
|
|
return self.sigma(torch.tensor(percent * 999.0)).item()
|
2023-11-04 05:32:23 +00:00
|
|
|
|
2024-06-10 17:26:25 +00:00
|
|
|
class ModelSamplingDiscreteEDM(ModelSamplingDiscrete):
|
|
|
|
def timestep(self, sigma):
|
|
|
|
return 0.25 * sigma.log()
|
|
|
|
|
|
|
|
def sigma(self, timestep):
|
|
|
|
return (timestep / 0.25).exp()
|
2023-11-24 00:41:33 +00:00
|
|
|
|
|
|
|
class ModelSamplingContinuousEDM(torch.nn.Module):
|
|
|
|
def __init__(self, model_config=None):
|
|
|
|
super().__init__()
|
|
|
|
if model_config is not None:
|
|
|
|
sampling_settings = model_config.sampling_settings
|
|
|
|
else:
|
|
|
|
sampling_settings = {}
|
|
|
|
|
|
|
|
sigma_min = sampling_settings.get("sigma_min", 0.002)
|
|
|
|
sigma_max = sampling_settings.get("sigma_max", 120.0)
|
2024-02-27 20:12:33 +00:00
|
|
|
sigma_data = sampling_settings.get("sigma_data", 1.0)
|
|
|
|
self.set_parameters(sigma_min, sigma_max, sigma_data)
|
2023-11-24 00:41:33 +00:00
|
|
|
|
2024-02-27 20:12:33 +00:00
|
|
|
def set_parameters(self, sigma_min, sigma_max, sigma_data):
|
|
|
|
self.sigma_data = sigma_data
|
2023-11-24 00:41:33 +00:00
|
|
|
sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), 1000).exp()
|
|
|
|
|
|
|
|
self.register_buffer('sigmas', sigmas) #for compatibility with some schedulers
|
|
|
|
self.register_buffer('log_sigmas', sigmas.log())
|
|
|
|
|
|
|
|
@property
|
|
|
|
def sigma_min(self):
|
|
|
|
return self.sigmas[0]
|
|
|
|
|
|
|
|
@property
|
|
|
|
def sigma_max(self):
|
|
|
|
return self.sigmas[-1]
|
|
|
|
|
|
|
|
def timestep(self, sigma):
|
|
|
|
return 0.25 * sigma.log()
|
|
|
|
|
|
|
|
def sigma(self, timestep):
|
|
|
|
return (timestep / 0.25).exp()
|
|
|
|
|
|
|
|
def percent_to_sigma(self, percent):
|
|
|
|
if percent <= 0.0:
|
|
|
|
return 999999999.9
|
|
|
|
if percent >= 1.0:
|
|
|
|
return 0.0
|
|
|
|
percent = 1.0 - percent
|
|
|
|
|
|
|
|
log_sigma_min = math.log(self.sigma_min)
|
|
|
|
return math.exp((math.log(self.sigma_max) - log_sigma_min) * percent + log_sigma_min)
|
2024-02-16 15:55:08 +00:00
|
|
|
|
2024-06-10 17:26:25 +00:00
|
|
|
|
2024-06-15 16:14:56 +00:00
|
|
|
class ModelSamplingContinuousV(ModelSamplingContinuousEDM):
|
|
|
|
def timestep(self, sigma):
|
|
|
|
return sigma.atan() / math.pi * 2
|
|
|
|
|
|
|
|
def sigma(self, timestep):
|
|
|
|
return (timestep * math.pi / 2).tan()
|
|
|
|
|
|
|
|
|
2024-06-10 17:26:25 +00:00
|
|
|
def time_snr_shift(alpha, t):
|
|
|
|
if alpha == 1.0:
|
|
|
|
return t
|
|
|
|
return alpha * t / (1 + (alpha - 1) * t)
|
|
|
|
|
|
|
|
class ModelSamplingDiscreteFlow(torch.nn.Module):
|
|
|
|
def __init__(self, model_config=None):
|
|
|
|
super().__init__()
|
|
|
|
if model_config is not None:
|
|
|
|
sampling_settings = model_config.sampling_settings
|
|
|
|
else:
|
|
|
|
sampling_settings = {}
|
|
|
|
|
2024-07-06 08:06:03 +00:00
|
|
|
self.set_parameters(shift=sampling_settings.get("shift", 1.0), multiplier=sampling_settings.get("multiplier", 1000))
|
2024-06-10 17:26:25 +00:00
|
|
|
|
2024-07-06 08:06:03 +00:00
|
|
|
def set_parameters(self, shift=1.0, timesteps=1000, multiplier=1000):
|
2024-06-10 17:26:25 +00:00
|
|
|
self.shift = shift
|
2024-07-06 08:06:03 +00:00
|
|
|
self.multiplier = multiplier
|
|
|
|
ts = self.sigma((torch.arange(1, timesteps + 1, 1) / timesteps) * multiplier)
|
2024-06-10 17:26:25 +00:00
|
|
|
self.register_buffer('sigmas', ts)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def sigma_min(self):
|
|
|
|
return self.sigmas[0]
|
|
|
|
|
|
|
|
@property
|
|
|
|
def sigma_max(self):
|
|
|
|
return self.sigmas[-1]
|
|
|
|
|
|
|
|
def timestep(self, sigma):
|
2024-07-06 08:06:03 +00:00
|
|
|
return sigma * self.multiplier
|
2024-06-10 17:26:25 +00:00
|
|
|
|
|
|
|
def sigma(self, timestep):
|
2024-07-06 08:06:03 +00:00
|
|
|
return time_snr_shift(self.shift, timestep / self.multiplier)
|
2024-06-10 17:26:25 +00:00
|
|
|
|
|
|
|
def percent_to_sigma(self, percent):
|
|
|
|
if percent <= 0.0:
|
|
|
|
return 1.0
|
|
|
|
if percent >= 1.0:
|
|
|
|
return 0.0
|
|
|
|
return 1.0 - percent
|
|
|
|
|
2024-02-16 15:55:08 +00:00
|
|
|
class StableCascadeSampling(ModelSamplingDiscrete):
|
|
|
|
def __init__(self, model_config=None):
|
|
|
|
super().__init__()
|
2024-02-17 16:38:47 +00:00
|
|
|
|
|
|
|
if model_config is not None:
|
|
|
|
sampling_settings = model_config.sampling_settings
|
|
|
|
else:
|
|
|
|
sampling_settings = {}
|
|
|
|
|
2024-02-18 05:55:23 +00:00
|
|
|
self.set_parameters(sampling_settings.get("shift", 1.0))
|
|
|
|
|
|
|
|
def set_parameters(self, shift=1.0, cosine_s=8e-3):
|
|
|
|
self.shift = shift
|
2024-02-17 16:38:47 +00:00
|
|
|
self.cosine_s = torch.tensor(cosine_s)
|
2024-02-16 15:55:08 +00:00
|
|
|
self._init_alpha_cumprod = torch.cos(self.cosine_s / (1 + self.cosine_s) * torch.pi * 0.5) ** 2
|
2024-02-18 05:55:23 +00:00
|
|
|
|
|
|
|
#This part is just for compatibility with some schedulers in the codebase
|
2024-02-20 09:05:39 +00:00
|
|
|
self.num_timesteps = 10000
|
2024-02-18 05:55:23 +00:00
|
|
|
sigmas = torch.empty((self.num_timesteps), dtype=torch.float32)
|
2024-02-16 15:55:08 +00:00
|
|
|
for x in range(self.num_timesteps):
|
2024-02-20 09:05:39 +00:00
|
|
|
t = (x + 1) / self.num_timesteps
|
2024-02-16 15:55:08 +00:00
|
|
|
sigmas[x] = self.sigma(t)
|
|
|
|
|
|
|
|
self.set_sigmas(sigmas)
|
|
|
|
|
|
|
|
def sigma(self, timestep):
|
2024-02-17 16:38:47 +00:00
|
|
|
alpha_cumprod = (torch.cos((timestep + self.cosine_s) / (1 + self.cosine_s) * torch.pi * 0.5) ** 2 / self._init_alpha_cumprod)
|
|
|
|
|
|
|
|
if self.shift != 1.0:
|
|
|
|
var = alpha_cumprod
|
|
|
|
logSNR = (var/(1-var)).log()
|
|
|
|
logSNR += 2 * torch.log(1.0 / torch.tensor(self.shift))
|
|
|
|
alpha_cumprod = logSNR.sigmoid()
|
|
|
|
|
|
|
|
alpha_cumprod = alpha_cumprod.clamp(0.0001, 0.9999)
|
2024-02-16 15:55:08 +00:00
|
|
|
return ((1 - alpha_cumprod) / alpha_cumprod) ** 0.5
|
|
|
|
|
|
|
|
def timestep(self, sigma):
|
2024-02-17 16:38:47 +00:00
|
|
|
var = 1 / ((sigma * sigma) + 1)
|
|
|
|
var = var.clamp(0, 1.0)
|
|
|
|
s, min_var = self.cosine_s.to(var.device), self._init_alpha_cumprod.to(var.device)
|
|
|
|
t = (((var * min_var) ** 0.5).acos() / (torch.pi * 0.5)) * (1 + s) - s
|
|
|
|
return t
|
2024-02-16 15:55:08 +00:00
|
|
|
|
|
|
|
def percent_to_sigma(self, percent):
|
|
|
|
if percent <= 0.0:
|
|
|
|
return 999999999.9
|
|
|
|
if percent >= 1.0:
|
|
|
|
return 0.0
|
|
|
|
|
|
|
|
percent = 1.0 - percent
|
|
|
|
return self.sigma(torch.tensor(percent))
|
2024-08-01 08:03:59 +00:00
|
|
|
|
|
|
|
|
|
|
|
def flux_time_shift(mu: float, sigma: float, t):
|
|
|
|
return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
|
|
|
|
|
|
|
|
class ModelSamplingFlux(torch.nn.Module):
|
|
|
|
def __init__(self, model_config=None):
|
|
|
|
super().__init__()
|
|
|
|
if model_config is not None:
|
|
|
|
sampling_settings = model_config.sampling_settings
|
|
|
|
else:
|
|
|
|
sampling_settings = {}
|
|
|
|
|
|
|
|
self.set_parameters(shift=sampling_settings.get("shift", 1.15))
|
|
|
|
|
|
|
|
def set_parameters(self, shift=1.15, timesteps=10000):
|
|
|
|
self.shift = shift
|
|
|
|
ts = self.sigma((torch.arange(1, timesteps + 1, 1) / timesteps))
|
|
|
|
self.register_buffer('sigmas', ts)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def sigma_min(self):
|
|
|
|
return self.sigmas[0]
|
|
|
|
|
|
|
|
@property
|
|
|
|
def sigma_max(self):
|
|
|
|
return self.sigmas[-1]
|
|
|
|
|
|
|
|
def timestep(self, sigma):
|
|
|
|
return sigma
|
|
|
|
|
|
|
|
def sigma(self, timestep):
|
|
|
|
return flux_time_shift(self.shift, 1.0, timestep)
|
|
|
|
|
|
|
|
def percent_to_sigma(self, percent):
|
|
|
|
if percent <= 0.0:
|
|
|
|
return 1.0
|
|
|
|
if percent >= 1.0:
|
|
|
|
return 0.0
|
|
|
|
return 1.0 - percent
|