File size: 10,812 Bytes
0070fce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 |
import contextlib
import ldm_patched.modules.clip_vision
import ldm_patched.modules.model_patcher
import ldm_patched.modules.utils
import torch
from ldm_patched.ldm.util import instantiate_from_config
from ldm_patched.modules import model_detection, model_management
from ldm_patched.modules.model_base import ModelType, model_sampling
from ldm_patched.modules.sd import CLIP, VAE, load_model_weights
from modules import sd_hijack, shared
from modules.sd_models_config import find_checkpoint_config
from modules.sd_models_types import WebuiSdModel
from modules_forge import forge_clip
from modules_forge.unet_patcher import UnetPatcher
from omegaconf import OmegaConf
class FakeObject:
def __init__(self, *args, **kwargs):
return
def eval(self, *args, **kwargs):
return self
def parameters(self, *args, **kwargs):
return []
class ForgeObjects:
def __init__(self, unet, clip, vae, clipvision):
self.unet = unet
self.clip = clip
self.vae = vae
self.clipvision = clipvision
def shallow_copy(self):
return ForgeObjects(self.unet, self.clip, self.vae, self.clipvision)
@torch.no_grad()
def load_checkpoint_guess_config(sd, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True) -> ForgeObjects:
clip = None
clipvision = None
vae = None
model = None
model_patcher = None
clip_target = None
parameters = ldm_patched.modules.utils.calculate_parameters(sd, "model.diffusion_model.")
unet_dtype = model_management.unet_dtype(model_params=parameters)
load_device = model_management.get_torch_device()
manual_cast_dtype = model_management.unet_manual_cast(unet_dtype, load_device)
class WeightsLoader(torch.nn.Module):
pass
model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", unet_dtype)
model_config.set_manual_cast(manual_cast_dtype)
if model_config is None:
raise RuntimeError("Could not detect model type")
if model_config.clip_vision_prefix is not None:
if output_clipvision:
clipvision = ldm_patched.modules.clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True)
if output_model:
initial_load_device = model_management.unet_initial_load_device(parameters, unet_dtype)
print("UNet dtype:", unet_dtype)
model = model_config.get_model(sd, "model.diffusion_model.", device=initial_load_device)
model.load_model_weights(sd, "model.diffusion_model.")
if output_vae:
vae_sd = ldm_patched.modules.utils.state_dict_prefix_replace(sd, {"first_stage_model.": ""}, filter_keys=True)
vae_sd = model_config.process_vae_state_dict(vae_sd)
vae = VAE(sd=vae_sd)
if output_clip:
w = WeightsLoader()
clip_target = model_config.clip_target()
if clip_target is not None:
clip = CLIP(clip_target, embedding_directory=embedding_directory)
w.cond_stage_model = clip.cond_stage_model
sd = model_config.process_clip_state_dict(sd)
load_model_weights(w, sd)
left_over = sd.keys()
if len(left_over) > 0:
print("left over keys:", left_over)
if output_model:
model_patcher = UnetPatcher(
model,
load_device=load_device,
offload_device=model_management.unet_offload_device(),
current_device=initial_load_device,
)
if initial_load_device != torch.device("cpu"):
print("loaded straight to GPU")
model_management.load_model_gpu(model_patcher)
return ForgeObjects(model_patcher, clip, vae, clipvision)
@torch.no_grad()
def load_model_for_a1111(timer, checkpoint_info=None, state_dict=None) -> WebuiSdModel:
ztsnr = False
if state_dict is not None:
ztsnr = state_dict.pop("ztsnr", None) is not None
a1111_config_filename = find_checkpoint_config(state_dict, checkpoint_info)
a1111_config = OmegaConf.load(a1111_config_filename)
timer.record("forge solving config")
for obj in ("unet_config", "network_config", "first_stage_config"):
if hasattr(a1111_config.model.params, obj):
getattr(a1111_config.model.params, obj).target = "modules_forge.forge_loader.FakeObject"
sd_model: WebuiSdModel = instantiate_from_config(a1111_config.model)
del a1111_config
timer.record("forge instantiate config")
forge_objects = load_checkpoint_guess_config(
state_dict,
output_vae=True,
output_clip=True,
output_clipvision=True,
embedding_directory=shared.cmd_opts.embeddings_dir,
output_model=True,
)
sd_model.forge_objects = forge_objects
sd_model.forge_objects_original = forge_objects.shallow_copy()
sd_model.forge_objects_after_applying_lora = forge_objects.shallow_copy()
del state_dict
timer.record("forge load real models")
sd_model.first_stage_model = forge_objects.vae.first_stage_model
sd_model.model.diffusion_model = forge_objects.unet.model.diffusion_model
conditioner = getattr(sd_model, "conditioner", None)
sd_model.is_sdxl = conditioner is not None
if sd_model.is_sdxl:
for i in range(len(conditioner.embedders)):
embedder = conditioner.embedders[i]
typename = type(embedder).__name__
if typename == "FrozenCLIPEmbedder": # Clip L
embedder.tokenizer = forge_objects.clip.tokenizer.clip_l.tokenizer
embedder.transformer = forge_objects.clip.cond_stage_model.clip_l.transformer
model_embeddings = embedder.transformer.text_model.embeddings
model_embeddings.token_embedding = sd_hijack.EmbeddingsWithFixes(model_embeddings.token_embedding, sd_hijack.model_hijack)
conditioner.embedders[i] = forge_clip.CLIP_SD_XL_L(embedder, sd_hijack.model_hijack)
elif typename == "FrozenOpenCLIPEmbedder2": # Clip G
embedder.tokenizer = forge_objects.clip.tokenizer.clip_g.tokenizer
embedder.transformer = forge_objects.clip.cond_stage_model.clip_g.transformer
embedder.text_projection = forge_objects.clip.cond_stage_model.clip_g.text_projection
model_embeddings = embedder.transformer.text_model.embeddings
model_embeddings.token_embedding = sd_hijack.EmbeddingsWithFixes(model_embeddings.token_embedding, sd_hijack.model_hijack, textual_inversion_key="clip_g")
conditioner.embedders[i] = forge_clip.CLIP_SD_XL_G(embedder, sd_hijack.model_hijack)
elif typename == "ConcatTimestepEmbedderND":
embedder.device = model_management.text_encoder_device()
sd_model.cond_stage_model = conditioner
else:
assert type(sd_model.cond_stage_model).__name__ == "FrozenCLIPEmbedder"
sd_model.cond_stage_model.tokenizer = forge_objects.clip.tokenizer.clip_l.tokenizer
sd_model.cond_stage_model.transformer = forge_objects.clip.cond_stage_model.clip_l.transformer
model_embeddings = sd_model.cond_stage_model.transformer.text_model.embeddings
model_embeddings.token_embedding = sd_hijack.EmbeddingsWithFixes(model_embeddings.token_embedding, sd_hijack.model_hijack)
sd_model.cond_stage_model = forge_clip.CLIP_SD_15_L(sd_model.cond_stage_model, sd_hijack.model_hijack)
timer.record("forge set components")
sd_model_hash = checkpoint_info.calculate_shorthash()
timer.record("calculate hash")
if getattr(sd_model, "parameterization", None) == "v":
sd_model.forge_objects.unet.model.model_sampling = model_sampling(sd_model.forge_objects.unet.model.model_config, ModelType.V_PREDICTION)
sd_model.alphas_cumprod_original = sd_model.alphas_cumprod
sd_model.ztsnr = ztsnr
sd_model.is_sd2 = False
sd_model.is_sd1 = not sd_model.is_sdxl
sd_model.sd_model_hash = sd_model_hash
sd_model.sd_model_checkpoint = checkpoint_info.filename
sd_model.sd_checkpoint_info = checkpoint_info
apply_alpha_schedule_override(sd_model)
@torch.inference_mode()
def patched_decode_first_stage(x):
sample = sd_model.forge_objects.unet.model.model_config.latent_format.process_out(x)
sample = sd_model.forge_objects.vae.decode(sample).movedim(-1, 1) * 2.0 - 1.0
return sample.to(x)
@torch.inference_mode()
def patched_encode_first_stage(x):
sample = sd_model.forge_objects.vae.encode(x.movedim(1, -1) * 0.5 + 0.5)
sample = sd_model.forge_objects.unet.model.model_config.latent_format.process_in(sample)
return sample.to(x)
sd_model.ema_scope = lambda *args, **kwargs: contextlib.nullcontext()
sd_model.get_first_stage_encoding = lambda x: x
sd_model.decode_first_stage = patched_decode_first_stage
sd_model.encode_first_stage = patched_encode_first_stage
sd_model.clip = sd_model.cond_stage_model
sd_model.tiling_enabled = False
timer.record("forge finalize")
sd_model.current_lora_hash = str([])
return sd_model
def rescale_zero_terminal_snr_abar(alphas_cumprod):
alphas_bar_sqrt = alphas_cumprod.sqrt()
# Store old values.
alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
# Shift so the last timestep is zero.
alphas_bar_sqrt -= alphas_bar_sqrt_T
# Scale so the first timestep is back to the old value.
alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
# Convert alphas_bar_sqrt to betas
alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
alphas_bar[-1] = 4.8973451890853435e-08
return alphas_bar
def apply_alpha_schedule_override(sd_model, p=None):
"""
Applies an override to the alpha schedule of the model according to settings.
- downcasts the alpha schedule to half precision
- rescales the alpha schedule to have zero terminal SNR
"""
if not (hasattr(sd_model, "alphas_cumprod") and hasattr(sd_model, "alphas_cumprod_original")):
return
sd_model.alphas_cumprod = sd_model.alphas_cumprod_original.to(shared.device)
if shared.opts.use_downcasted_alpha_bar:
if p is not None:
p.extra_generation_params["Downcast alphas_cumprod"] = shared.opts.use_downcasted_alpha_bar
sd_model.alphas_cumprod = sd_model.alphas_cumprod.half().to(shared.device)
if getattr(sd_model, "ztsnr", False) or shared.opts.sd_noise_schedule == "Zero Terminal SNR":
if p is not None:
p.extra_generation_params["Noise Schedule"] = "Zero Terminal SNR"
sd_model.alphas_cumprod = rescale_zero_terminal_snr_abar(sd_model.alphas_cumprod).to(shared.device)
ForgeSD = ForgeObjects
|