Upload 868 files
Browse filesuptade forge classic v1.6
This view is limited to 50 files because it contains too many changes.
See raw diff
- README.md +78 -43
- extensions-builtin/Lora/extra_networks_lora.py +4 -2
- extensions-builtin/Lora/network.py +9 -12
- extensions-builtin/Lora/networks.py +11 -2
- extensions-builtin/Lora/scripts/lora_script.py +4 -5
- extensions-builtin/Lora/ui_edit_user_metadata.py +2 -1
- extensions-builtin/Lora/ui_extra_networks_lora.py +1 -7
- extensions-builtin/forge_legacy_preprocessors/annotator/depth_anything.py +7 -11
- extensions-builtin/forge_legacy_preprocessors/annotator/depth_anything_v2.py +78 -0
- extensions-builtin/forge_legacy_preprocessors/annotator/pidinet/model.py +1 -15
- extensions-builtin/forge_legacy_preprocessors/install.py +25 -54
- extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor.py +40 -71
- extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor_compiled.py +14 -29
- extensions-builtin/forge_legacy_preprocessors/requirements.txt +4 -1
- extensions-builtin/sd_forge_controlnet/lib_controlnet/controlnet_ui/controlnet_ui_group.py +1 -3
- extensions-builtin/sd_forge_controlnet/lib_controlnet/enums.py +0 -57
- extensions-builtin/sd_forge_controlnet/lib_controlnet/external_code.py +1 -3
- extensions-builtin/sd_forge_controlnet/lib_controlnet/global_state.py +7 -17
- extensions-builtin/sd_forge_controlnet/preload.py +1 -7
- extensions-builtin/sd_forge_controlnet/scripts/controlnet.py +0 -2
- extensions-builtin/xyz/lib_xyz/builtins.py +11 -75
- extensions-builtin/xyz/scripts/xyz_grid.py +2 -6
- html/footer.html +1 -5
- javascript/ui.js +1 -1
- ldm_patched/k_diffusion/sampling.py +28 -723
- ldm_patched/ldm/modules/attention.py +29 -21
- ldm_patched/modules/args_parser.py +31 -0
- modules/api/api.py +14 -0
- modules/api/models.py +8 -0
- modules/cmd_args.py +1 -0
- modules/esrgan_model.py +24 -20
- modules/images.py +32 -5
- modules/img2img.py +0 -8
- modules/infotext_utils.py +3 -0
- modules/launch_utils.py +4 -6
- modules/modelloader.py +36 -68
- modules/options.py +20 -12
- modules/postprocessing.py +0 -24
- modules/processing.py +10 -0
- modules/processing_scripts/comments.py +2 -2
- modules/processing_scripts/mahiro.py +1 -0
- modules/processing_scripts/refiner.py +1 -0
- modules/processing_scripts/rescale_cfg.py +1 -0
- modules/processing_scripts/sampler.py +66 -0
- modules/processing_scripts/seed.py +1 -0
- modules/scripts_postprocessing.py +0 -1
- modules/sd_emphasis.py +8 -4
- modules/sd_samplers.py +105 -16
- modules/sd_samplers_cfg_denoiser.py +38 -27
- modules/sd_samplers_cfgpp.py +264 -0
README.md
CHANGED
@@ -18,24 +18,23 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
18 |
|
19 |
<br>
|
20 |
|
21 |
-
## Features [
|
22 |
> Most base features of the original [Automatic1111 Webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) should still function
|
23 |
|
24 |
#### New Features
|
25 |
|
26 |
-
- [X] Support `v-pred` **SDXL** checkpoints *(**eg.** [NoobAI](https://civitai.com/models/833294?modelVersionId=1190596))*
|
27 |
- [X] Support [uv](https://github.com/astral-sh/uv) package manager
|
28 |
-
- requires **uv
|
29 |
- drastically speed up installation
|
30 |
- see [Commandline](#by-classic)
|
31 |
- [X] Support [SageAttention](https://github.com/thu-ml/SageAttention)
|
32 |
-
- requires **manually** installing
|
33 |
- [how to install](#install-triton)
|
34 |
- requires RTX **30** +
|
35 |
-
- ~10% speed up
|
36 |
- see [Commandline](#by-classic)
|
37 |
- [X] Support [FlashAttention](https://arxiv.org/abs/2205.14135)
|
38 |
-
- requires **manually** installing
|
39 |
- [how to install](#install-flash-attn)
|
40 |
- ~10% speed up
|
41 |
- [X] Support fast `fp16_accumulation`
|
@@ -43,37 +42,46 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
43 |
- ~25% speed up
|
44 |
- see [Commandline](#by-classic)
|
45 |
- [X] Support fast `cublas` operation *(`CublasLinear`)*
|
46 |
-
- requires **manually** installing
|
47 |
- [how to install](#install-cublas)
|
48 |
- ~25% speed up
|
49 |
-
- enable in **Settings**
|
50 |
- [X] Support fast `fp8` operation *(`torch._scaled_mm`)*
|
51 |
- requires RTX **40** +
|
52 |
- ~10% speed up; reduce quality
|
53 |
-
- enable in **Settings**
|
54 |
|
55 |
> [!Note]
|
56 |
-
> -
|
57 |
-
> - The `fp16_accumulation` and `
|
58 |
|
|
|
|
|
|
|
|
|
|
|
59 |
- [X] Implement RescaleCFG
|
60 |
- reduce burnt colors; mainly for `v-pred` checkpoints
|
|
|
61 |
- [X] Implement MaHiRo
|
62 |
-
- alternative CFG calculation
|
63 |
-
-
|
64 |
-
- [X] Implement `diskcache`
|
65 |
- *(backported from Automatic1111 Webui upstream)*
|
66 |
- [X] Implement `skip_early_cond`
|
67 |
- *(backported from Automatic1111 Webui upstream)*
|
|
|
|
|
|
|
68 |
- [X] Update `spandrel`
|
69 |
-
- support
|
70 |
- [X] Add `pillow-heif` package
|
71 |
-
- support `.avif` and `.heif`
|
72 |
-
- [X]
|
73 |
-
- [X]
|
74 |
-
- [X]
|
75 |
- [X] Support [Union](https://huggingface.co/xinsir/controlnet-union-sdxl-1.0) / [ProMax](https://huggingface.co/brad-twinkl/controlnet-union-sdxl-1.0-promax) ControlNet
|
76 |
-
-
|
77 |
|
78 |
#### Removed Features
|
79 |
|
@@ -88,9 +96,12 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
88 |
- [X] Textual Inversion Training
|
89 |
- [X] Checkpoint Merging
|
90 |
- [X] LDSR
|
91 |
-
- [X] Most
|
92 |
-
- [X] Some
|
93 |
-
- [X]
|
|
|
|
|
|
|
94 |
- [X] `Photopea` and `openpose_editor` *(ControlNet)*
|
95 |
- [X] Unix `.sh` launch scripts
|
96 |
- You can still use this WebUI by copying a launch script from another working WebUI; I just don't want to maintain them...
|
@@ -103,17 +114,19 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
103 |
- [X] Fix memory leak when switching checkpoints
|
104 |
- [X] Clean up the `ldm_patched` *(**ie.** `comfy`)* folder
|
105 |
- [X] Remove unused `cmd_args`
|
106 |
-
- [X] Remove unused `shared_options`
|
107 |
- [X] Remove unused `args_parser`
|
|
|
108 |
- [X] Remove legacy codes
|
109 |
- [X] Remove duplicated upscaler codes
|
110 |
- put every upscaler inside the `ESRGAN` folder
|
|
|
111 |
- [X] Improve color correction
|
112 |
-
- [X] Improve code logics
|
113 |
- [X] Improve hash caching
|
114 |
- [X] Improve error logs
|
115 |
-
- no longer
|
116 |
-
- [X]
|
|
|
|
|
117 |
- [X] Check for Extension updates in parallel
|
118 |
- [X] Moved `embeddings` folder into `models` folder
|
119 |
- [X] ControlNet Rewrite
|
@@ -121,19 +134,27 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
121 |
- remove multi-inputs, as they are "[misleading](https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/932)"
|
122 |
- change `visible` toggle to `interactive` toggle; now the UI will no longer jump around
|
123 |
- improved `Presets` application
|
|
|
|
|
|
|
|
|
124 |
- [X] Run `text encoder` on CPU by default
|
125 |
- [X] Fix `pydantic` Errors
|
126 |
- [X] Fix `Soft Inpainting`
|
127 |
-
- [X] Lint & Format
|
128 |
-
- [X] Update
|
129 |
- faster image processing
|
130 |
- [X] Update `protobuf`
|
131 |
- faster `insightface` loading
|
132 |
- [X] Update to latest PyTorch
|
133 |
- `torch==2.7.0+cu128`
|
134 |
- `xformers==0.0.30`
|
|
|
|
|
|
|
|
|
135 |
- [X] No longer install `open-clip` twice
|
136 |
-
- [X] Update
|
137 |
- [X] Update recommended Python to `3.11.9`
|
138 |
- [X] many more... :tm:
|
139 |
|
@@ -181,6 +202,12 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
181 |
> [!Important]
|
182 |
> Using `symlink` means it will directly access the packages from the cache folders; refrain from clearing the cache when setting this option
|
183 |
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
- `--fast-fp16`: Enable the `allow_fp16_accumulation` option
|
185 |
- requires PyTorch **2.7.0** +
|
186 |
- `--sage`: Install the `sageattention` package to speed up generation
|
@@ -188,14 +215,21 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
188 |
- requires RTX **30** +
|
189 |
- only affects **SDXL**
|
190 |
|
191 |
-
> [!
|
192 |
-
>
|
193 |
|
194 |
-
|
195 |
-
|
196 |
|
197 |
-
|
198 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
199 |
|
200 |
<br>
|
201 |
|
@@ -212,7 +246,7 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
212 |
<details>
|
213 |
<summary>Recommended Method</summary>
|
214 |
|
215 |
-
- Install **[uv](https://github.com/astral-sh/uv)**
|
216 |
- Set up **venv**
|
217 |
```bash
|
218 |
cd sd-webui-forge-classic
|
@@ -329,7 +363,6 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
329 |
</details>
|
330 |
|
331 |
### Install sageattention 2
|
332 |
-
> If you only use **SDXL**, then `1.x` is already enough; `2.x` simply has partial support for **SD1** checkpoints
|
333 |
|
334 |
<details>
|
335 |
<summary>Expand</summary>
|
@@ -363,10 +396,7 @@ The name "Forge" is inspired by "Minecraft Forge". This project aims to become t
|
|
363 |
|
364 |
</details>
|
365 |
|
366 |
-
<br>
|
367 |
-
|
368 |
### Install older PyTorch
|
369 |
-
> Read this if your GPU does not support the latest PyTorch
|
370 |
|
371 |
<details>
|
372 |
<summary>Expand</summary>
|
@@ -385,7 +415,7 @@ set TORCH_COMMAND=pip install torch==2.1.2 torchvision==0.16.2 --extra-index-url
|
|
385 |
## Attention
|
386 |
|
387 |
> [!Important]
|
388 |
-
> The `--xformers` and `--sage` args are only responsible for installing the packages, **not** whether its respective attention is used
|
389 |
|
390 |
**Forge Classic** tries to import the packages and automatically choose the first available attention function in the following order:
|
391 |
|
@@ -395,8 +425,11 @@ set TORCH_COMMAND=pip install torch==2.1.2 torchvision==0.16.2 --extra-index-url
|
|
395 |
4. `PyTorch`
|
396 |
5. `Basic`
|
397 |
|
|
|
|
|
|
|
398 |
> [!Note]
|
399 |
-
> The VAE only checks for `xformers`
|
400 |
|
401 |
In my experience, the speed of each attention function for SDXL is ranked in the following order:
|
402 |
|
@@ -405,6 +438,8 @@ In my experience, the speed of each attention function for SDXL is ranked in the
|
|
405 |
> [!Note]
|
406 |
> `SageAttention` is based on quantization, so its quality might be slightly worse than others
|
407 |
|
|
|
|
|
408 |
## Issues & Requests
|
409 |
|
410 |
- **Issues** about removed features will simply be ignored
|
|
|
18 |
|
19 |
<br>
|
20 |
|
21 |
+
## Features [May. 21]
|
22 |
> Most base features of the original [Automatic1111 Webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) should still function
|
23 |
|
24 |
#### New Features
|
25 |
|
|
|
26 |
- [X] Support [uv](https://github.com/astral-sh/uv) package manager
|
27 |
+
- requires **manually** installing [uv](https://github.com/astral-sh/uv/releases)
|
28 |
- drastically speed up installation
|
29 |
- see [Commandline](#by-classic)
|
30 |
- [X] Support [SageAttention](https://github.com/thu-ml/SageAttention)
|
31 |
+
- requires **manually** installing [triton](https://github.com/triton-lang/triton)
|
32 |
- [how to install](#install-triton)
|
33 |
- requires RTX **30** +
|
34 |
+
- ~10% speed up for SDXL
|
35 |
- see [Commandline](#by-classic)
|
36 |
- [X] Support [FlashAttention](https://arxiv.org/abs/2205.14135)
|
37 |
+
- requires **manually** installing [flash-attn](https://github.com/Dao-AILab/flash-attention)
|
38 |
- [how to install](#install-flash-attn)
|
39 |
- ~10% speed up
|
40 |
- [X] Support fast `fp16_accumulation`
|
|
|
42 |
- ~25% speed up
|
43 |
- see [Commandline](#by-classic)
|
44 |
- [X] Support fast `cublas` operation *(`CublasLinear`)*
|
45 |
+
- requires **manually** installing [cublas_ops](https://github.com/aredden/torch-cublas-hgemm)
|
46 |
- [how to install](#install-cublas)
|
47 |
- ~25% speed up
|
48 |
+
- enable in **Settings/Optimizations**
|
49 |
- [X] Support fast `fp8` operation *(`torch._scaled_mm`)*
|
50 |
- requires RTX **40** +
|
51 |
- ~10% speed up; reduce quality
|
52 |
+
- enable in **Settings/Optimizations**
|
53 |
|
54 |
> [!Note]
|
55 |
+
> - Both `fp16_accumulation` and `cublas_ops` achieve the same speed up; if you already install/update to PyTorch **2.7.0**, you do not need to go for `cublas_ops`
|
56 |
+
> - The `fp16_accumulation` and `cublas_ops` require `fp16` precision, thus is not compatible with the `fp8` operation
|
57 |
|
58 |
+
- [X] Implement new Samplers
|
59 |
+
- *(ported from reForge Webui)*
|
60 |
+
- [X] Implement Scheduler Dropdown
|
61 |
+
- *(backported from Automatic1111 Webui upstream)*
|
62 |
+
- enable in **Settings/UI alternatives**
|
63 |
- [X] Implement RescaleCFG
|
64 |
- reduce burnt colors; mainly for `v-pred` checkpoints
|
65 |
+
- enable in **Settings/UI alternatives**
|
66 |
- [X] Implement MaHiRo
|
67 |
+
- alternative CFG calculation; improve prompt adherence
|
68 |
+
- enable in **Settings/UI alternatives**
|
69 |
+
- [X] Implement `diskcache` for hashes
|
70 |
- *(backported from Automatic1111 Webui upstream)*
|
71 |
- [X] Implement `skip_early_cond`
|
72 |
- *(backported from Automatic1111 Webui upstream)*
|
73 |
+
- enable in **Settings/Optimizations**
|
74 |
+
- [X] Support `v-pred` **SDXL** checkpoints *(**eg.** [NoobAI](https://civitai.com/models/833294?modelVersionId=1190596))*
|
75 |
+
- [X] Support new LoRA architectures
|
76 |
- [X] Update `spandrel`
|
77 |
+
- support new Upscaler architectures
|
78 |
- [X] Add `pillow-heif` package
|
79 |
+
- support `.avif` and `.heif` images
|
80 |
+
- [X] Automatically determine the optimal row count for `X/Y/Z Plot`
|
81 |
+
- [X] `DepthAnything v2` Preprocessor
|
82 |
+
- [X] Support [NoobAI Inpaint](https://civitai.com/models/1376234/noobai-inpainting-controlnet) ControlNet
|
83 |
- [X] Support [Union](https://huggingface.co/xinsir/controlnet-union-sdxl-1.0) / [ProMax](https://huggingface.co/brad-twinkl/controlnet-union-sdxl-1.0-promax) ControlNet
|
84 |
+
- they simply always show up in the dropdown
|
85 |
|
86 |
#### Removed Features
|
87 |
|
|
|
96 |
- [X] Textual Inversion Training
|
97 |
- [X] Checkpoint Merging
|
98 |
- [X] LDSR
|
99 |
+
- [X] Most built-in Extensions
|
100 |
+
- [X] Some built-in Scripts
|
101 |
+
- [X] Some Samplers
|
102 |
+
- [X] Sampler in RadioGroup
|
103 |
+
- [X] `test` scripts
|
104 |
+
- [X] Some Preprocessors *(ControlNet)*
|
105 |
- [X] `Photopea` and `openpose_editor` *(ControlNet)*
|
106 |
- [X] Unix `.sh` launch scripts
|
107 |
- You can still use this WebUI by copying a launch script from another working WebUI; I just don't want to maintain them...
|
|
|
114 |
- [X] Fix memory leak when switching checkpoints
|
115 |
- [X] Clean up the `ldm_patched` *(**ie.** `comfy`)* folder
|
116 |
- [X] Remove unused `cmd_args`
|
|
|
117 |
- [X] Remove unused `args_parser`
|
118 |
+
- [X] Remove unused `shared_options`
|
119 |
- [X] Remove legacy codes
|
120 |
- [X] Remove duplicated upscaler codes
|
121 |
- put every upscaler inside the `ESRGAN` folder
|
122 |
+
- optimize upscaler logics
|
123 |
- [X] Improve color correction
|
|
|
124 |
- [X] Improve hash caching
|
125 |
- [X] Improve error logs
|
126 |
+
- no longer just print `TypeError: 'NoneType' object is not iterable`
|
127 |
+
- [X] Revamp settings
|
128 |
+
- improve formatting
|
129 |
+
- update descriptions
|
130 |
- [X] Check for Extension updates in parallel
|
131 |
- [X] Moved `embeddings` folder into `models` folder
|
132 |
- [X] ControlNet Rewrite
|
|
|
134 |
- remove multi-inputs, as they are "[misleading](https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/932)"
|
135 |
- change `visible` toggle to `interactive` toggle; now the UI will no longer jump around
|
136 |
- improved `Presets` application
|
137 |
+
- [X] Disable Refiner by default
|
138 |
+
- enable again in **Settings/UI alternatives**
|
139 |
+
- [X] Disable Tree View by default
|
140 |
+
- enable again in **Settings/Extra Networks**
|
141 |
- [X] Run `text encoder` on CPU by default
|
142 |
- [X] Fix `pydantic` Errors
|
143 |
- [X] Fix `Soft Inpainting`
|
144 |
+
- [X] Lint & Format
|
145 |
+
- [X] Update `Pillow`
|
146 |
- faster image processing
|
147 |
- [X] Update `protobuf`
|
148 |
- faster `insightface` loading
|
149 |
- [X] Update to latest PyTorch
|
150 |
- `torch==2.7.0+cu128`
|
151 |
- `xformers==0.0.30`
|
152 |
+
|
153 |
+
> [!Tip]
|
154 |
+
> If your GPU does not support the latest PyTorch, manually [install](#install-older-pytorch) older version of PyTorch
|
155 |
+
|
156 |
- [X] No longer install `open-clip` twice
|
157 |
+
- [X] Update some packages to newer versions
|
158 |
- [X] Update recommended Python to `3.11.9`
|
159 |
- [X] many more... :tm:
|
160 |
|
|
|
202 |
> [!Important]
|
203 |
> Using `symlink` means it will directly access the packages from the cache folders; refrain from clearing the cache when setting this option
|
204 |
|
205 |
+
- `--model-ref`: Points to a central `models` folder that contains all your models
|
206 |
+
- said folder should contain subfolders like `Stable-diffusion`, `Lora`, `VAE`, `ESRGAN`, etc.
|
207 |
+
|
208 |
+
> [!Important]
|
209 |
+
> This simply **replaces** the `models` folder, rather than adding on top of it
|
210 |
+
|
211 |
- `--fast-fp16`: Enable the `allow_fp16_accumulation` option
|
212 |
- requires PyTorch **2.7.0** +
|
213 |
- `--sage`: Install the `sageattention` package to speed up generation
|
|
|
215 |
- requires RTX **30** +
|
216 |
- only affects **SDXL**
|
217 |
|
218 |
+
> [!Note]
|
219 |
+
> For RTX **50** users, you may need to manually [install](#install-sageattention-2) `sageattention 2` instead
|
220 |
|
221 |
+
<details>
|
222 |
+
<summary>with SageAttention 2</summary>
|
223 |
|
224 |
+
- `--sageattn2-api`: Select the function used by **SageAttention 2**
|
225 |
+
- **options:**
|
226 |
+
- `auto` (default)
|
227 |
+
- `triton-fp16`
|
228 |
+
- `cuda-fp16`
|
229 |
+
- `cuda-fp8`
|
230 |
+
- try the `fp16` options if you get `NaN` *(black images)* on `auto`
|
231 |
+
|
232 |
+
</details>
|
233 |
|
234 |
<br>
|
235 |
|
|
|
246 |
<details>
|
247 |
<summary>Recommended Method</summary>
|
248 |
|
249 |
+
- Install **[uv](https://github.com/astral-sh/uv#installation)**
|
250 |
- Set up **venv**
|
251 |
```bash
|
252 |
cd sd-webui-forge-classic
|
|
|
363 |
</details>
|
364 |
|
365 |
### Install sageattention 2
|
|
|
366 |
|
367 |
<details>
|
368 |
<summary>Expand</summary>
|
|
|
396 |
|
397 |
</details>
|
398 |
|
|
|
|
|
399 |
### Install older PyTorch
|
|
|
400 |
|
401 |
<details>
|
402 |
<summary>Expand</summary>
|
|
|
415 |
## Attention
|
416 |
|
417 |
> [!Important]
|
418 |
+
> The `--xformers` and `--sage` args are only responsible for installing the packages, **not** whether its respective attention is used *(this also means you can remove them once the packages are successfully installed)*
|
419 |
|
420 |
**Forge Classic** tries to import the packages and automatically choose the first available attention function in the following order:
|
421 |
|
|
|
425 |
4. `PyTorch`
|
426 |
5. `Basic`
|
427 |
|
428 |
+
> [!Tip]
|
429 |
+
> To skip a specific attention, add the respective disable arg such as `--disable-sage`
|
430 |
+
|
431 |
> [!Note]
|
432 |
+
> The **VAE** only checks for `xformers`, so `--xformers` is still recommended even if you already have `--sage`
|
433 |
|
434 |
In my experience, the speed of each attention function for SDXL is ranked in the following order:
|
435 |
|
|
|
438 |
> [!Note]
|
439 |
> `SageAttention` is based on quantization, so its quality might be slightly worse than others
|
440 |
|
441 |
+
<br>
|
442 |
+
|
443 |
## Issues & Requests
|
444 |
|
445 |
- **Issues** about removed features will simply be ignored
|
extensions-builtin/Lora/extra_networks_lora.py
CHANGED
@@ -26,17 +26,19 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
|
|
26 |
for params in params_list:
|
27 |
assert params.items
|
28 |
|
29 |
-
names.append(params.positional[0])
|
30 |
-
|
31 |
te_multiplier = float(params.positional[1]) if len(params.positional) > 1 else 1.0
|
32 |
te_multiplier = float(params.named.get("te", te_multiplier))
|
33 |
|
34 |
unet_multiplier = float(params.positional[2]) if len(params.positional) > 2 else te_multiplier
|
35 |
unet_multiplier = float(params.named.get("unet", unet_multiplier))
|
36 |
|
|
|
|
|
|
|
37 |
dyn_dim = int(params.positional[3]) if len(params.positional) > 3 else None
|
38 |
dyn_dim = int(params.named["dyn"]) if "dyn" in params.named else dyn_dim
|
39 |
|
|
|
40 |
te_multipliers.append(te_multiplier)
|
41 |
unet_multipliers.append(unet_multiplier)
|
42 |
dyn_dims.append(dyn_dim)
|
|
|
26 |
for params in params_list:
|
27 |
assert params.items
|
28 |
|
|
|
|
|
29 |
te_multiplier = float(params.positional[1]) if len(params.positional) > 1 else 1.0
|
30 |
te_multiplier = float(params.named.get("te", te_multiplier))
|
31 |
|
32 |
unet_multiplier = float(params.positional[2]) if len(params.positional) > 2 else te_multiplier
|
33 |
unet_multiplier = float(params.named.get("unet", unet_multiplier))
|
34 |
|
35 |
+
if te_multiplier == 0.0 and unet_multiplier == 0.0:
|
36 |
+
continue
|
37 |
+
|
38 |
dyn_dim = int(params.positional[3]) if len(params.positional) > 3 else None
|
39 |
dyn_dim = int(params.named["dyn"]) if "dyn" in params.named else dyn_dim
|
40 |
|
41 |
+
names.append(params.positional[0])
|
42 |
te_multipliers.append(te_multiplier)
|
43 |
unet_multipliers.append(unet_multiplier)
|
44 |
dyn_dims.append(dyn_dim)
|
extensions-builtin/Lora/network.py
CHANGED
@@ -21,16 +21,15 @@ metadata_tags_order = {
|
|
21 |
class SDVersion(enum.Enum):
|
22 |
Unknown = -1
|
23 |
SD1 = 1
|
24 |
-
SD2 = 2
|
25 |
SDXL = 3
|
26 |
|
27 |
|
28 |
class NetworkOnDisk:
|
29 |
-
def __init__(self, name, filename):
|
30 |
-
self.name = name
|
31 |
-
self.filename = filename
|
32 |
-
self.metadata = {}
|
33 |
-
self.is_safetensors = filename.lower().endswith(".safetensors")
|
34 |
|
35 |
def read_metadata():
|
36 |
metadata = sd_models.read_metadata_from_safetensors(filename)
|
@@ -50,19 +49,17 @@ class NetworkOnDisk:
|
|
50 |
|
51 |
self.metadata = m
|
52 |
|
53 |
-
self.alias = self.metadata.get("ss_output_name", self.name)
|
54 |
|
55 |
-
self.hash = None
|
56 |
-
self.shorthash = None
|
57 |
self.set_hash(self.metadata.get("sshs_model_hash") or hashes.sha256_from_cache(self.filename, "/".join(["lora", self.name]), use_addnet_hash=self.is_safetensors) or "")
|
58 |
|
59 |
-
self.sd_version = self.detect_version()
|
60 |
|
61 |
def detect_version(self):
|
62 |
if str(self.metadata.get("ss_base_model_version", "")).startswith("sdxl_"):
|
63 |
return SDVersion.SDXL
|
64 |
-
elif str(self.metadata.get("ss_v2", "")) == "True":
|
65 |
-
return SDVersion.SD2
|
66 |
elif len(self.metadata):
|
67 |
return SDVersion.SD1
|
68 |
|
|
|
21 |
class SDVersion(enum.Enum):
|
22 |
Unknown = -1
|
23 |
SD1 = 1
|
|
|
24 |
SDXL = 3
|
25 |
|
26 |
|
27 |
class NetworkOnDisk:
|
28 |
+
def __init__(self, name: str, filename: str):
|
29 |
+
self.name: str = name
|
30 |
+
self.filename: str = filename
|
31 |
+
self.metadata: dict[str, str] = {}
|
32 |
+
self.is_safetensors: bool = filename.lower().endswith(".safetensors")
|
33 |
|
34 |
def read_metadata():
|
35 |
metadata = sd_models.read_metadata_from_safetensors(filename)
|
|
|
49 |
|
50 |
self.metadata = m
|
51 |
|
52 |
+
self.alias: str = self.metadata.get("ss_output_name", self.name)
|
53 |
|
54 |
+
self.hash: str = None
|
55 |
+
self.shorthash: str = None
|
56 |
self.set_hash(self.metadata.get("sshs_model_hash") or hashes.sha256_from_cache(self.filename, "/".join(["lora", self.name]), use_addnet_hash=self.is_safetensors) or "")
|
57 |
|
58 |
+
self.sd_version: "SDVersion" = self.detect_version()
|
59 |
|
60 |
def detect_version(self):
|
61 |
if str(self.metadata.get("ss_base_model_version", "")).startswith("sdxl_"):
|
62 |
return SDVersion.SDXL
|
|
|
|
|
63 |
elif len(self.metadata):
|
64 |
return SDVersion.SD1
|
65 |
|
extensions-builtin/Lora/networks.py
CHANGED
@@ -20,6 +20,16 @@ def load_network(name, network_on_disk):
|
|
20 |
return net
|
21 |
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None):
|
24 |
global lora_state_dict_cache
|
25 |
|
@@ -29,8 +39,7 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
|
|
29 |
|
30 |
loaded_networks.clear()
|
31 |
|
32 |
-
networks_on_disk =
|
33 |
-
assert not any(x is None for x in networks_on_disk)
|
34 |
|
35 |
for network_on_disk, name in zip(networks_on_disk, names):
|
36 |
try:
|
|
|
20 |
return net
|
21 |
|
22 |
|
23 |
+
def get_networks_on_desk(names: list[str], *, tried: bool = True) -> list["network.NetworkOnDisk"]:
|
24 |
+
networks_on_disk = [(available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None)) for name in names]
|
25 |
+
|
26 |
+
if tried or all(x is not None for x in networks_on_disk):
|
27 |
+
return networks_on_disk
|
28 |
+
|
29 |
+
list_available_networks()
|
30 |
+
return get_networks_on_desk(names, tried=True)
|
31 |
+
|
32 |
+
|
33 |
def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None):
|
34 |
global lora_state_dict_cache
|
35 |
|
|
|
39 |
|
40 |
loaded_networks.clear()
|
41 |
|
42 |
+
networks_on_disk = get_networks_on_desk(names, tried=False)
|
|
|
43 |
|
44 |
for network_on_disk, name in zip(networks_on_disk, names):
|
45 |
try:
|
extensions-builtin/Lora/scripts/lora_script.py
CHANGED
@@ -14,11 +14,10 @@ shared.options_templates.update(
|
|
14 |
shared.options_section(
|
15 |
("extra_networks", "Extra Networks"),
|
16 |
{
|
17 |
-
"sd_lora": shared.OptionInfo("None", "
|
18 |
-
"lora_preferred_name": shared.OptionInfo("Alias
|
19 |
-
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "
|
20 |
-
"lora_show_all": shared.OptionInfo(False, "Always show all
|
21 |
-
"lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
|
22 |
},
|
23 |
)
|
24 |
)
|
|
|
14 |
shared.options_section(
|
15 |
("extra_networks", "Extra Networks"),
|
16 |
{
|
17 |
+
"sd_lora": shared.OptionInfo("None", "Append LoRA to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks),
|
18 |
+
"lora_preferred_name": shared.OptionInfo("Alias", "When adding syntax to prompt, refer to LoRA by", gr.Radio, {"choices": ("Alias", "Filename")}),
|
19 |
+
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Append LoRA hashes to infotext"),
|
20 |
+
"lora_show_all": shared.OptionInfo(False, "Always show all LoRA cards").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
|
|
|
21 |
},
|
22 |
)
|
23 |
)
|
extensions-builtin/Lora/ui_edit_user_metadata.py
CHANGED
@@ -68,6 +68,7 @@ class LoraUserMetadataEditor(UserMetadataEditor):
|
|
68 |
user_metadata["notes"] = notes
|
69 |
|
70 |
self.write_user_metadata(name, user_metadata)
|
|
|
71 |
|
72 |
def get_metadata_table(self, name):
|
73 |
table = super().get_metadata_table(name)
|
@@ -146,7 +147,7 @@ class LoraUserMetadataEditor(UserMetadataEditor):
|
|
146 |
|
147 |
def create_extra_default_items_in_left_column(self):
|
148 |
self.select_sd_version = gr.Dropdown(
|
149 |
-
choices=("SD1", "
|
150 |
value="Unknown",
|
151 |
label="Stable Diffusion Version",
|
152 |
interactive=True,
|
|
|
68 |
user_metadata["notes"] = notes
|
69 |
|
70 |
self.write_user_metadata(name, user_metadata)
|
71 |
+
self.page.refresh()
|
72 |
|
73 |
def get_metadata_table(self, name):
|
74 |
table = super().get_metadata_table(name)
|
|
|
147 |
|
148 |
def create_extra_default_items_in_left_column(self):
|
149 |
self.select_sd_version = gr.Dropdown(
|
150 |
+
choices=("SD1", "SDXL", "Unknown"),
|
151 |
value="Unknown",
|
152 |
label="Stable Diffusion Version",
|
153 |
interactive=True,
|
extensions-builtin/Lora/ui_extra_networks_lora.py
CHANGED
@@ -53,13 +53,7 @@ class ExtraNetworksPageLora(ExtraNetworksPage):
|
|
53 |
sd_version = lora_on_disk.sd_version
|
54 |
|
55 |
if enable_filter and not shared.opts.lora_show_all:
|
56 |
-
if sd_version
|
57 |
-
model_version = network.SDVersion.SDXL if shared.sd_model.is_sdxl else network.SDVersion.SD1
|
58 |
-
if model_version.name in shared.opts.lora_hide_unknown_for_versions:
|
59 |
-
return None
|
60 |
-
elif shared.sd_model.is_sdxl and sd_version != network.SDVersion.SDXL:
|
61 |
-
return None
|
62 |
-
elif shared.sd_model.is_sd2 and sd_version != network.SDVersion.SD2:
|
63 |
return None
|
64 |
elif shared.sd_model.is_sd1 and sd_version != network.SDVersion.SD1:
|
65 |
return None
|
|
|
53 |
sd_version = lora_on_disk.sd_version
|
54 |
|
55 |
if enable_filter and not shared.opts.lora_show_all:
|
56 |
+
if shared.sd_model.is_sdxl and sd_version != network.SDVersion.SDXL:
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
return None
|
58 |
elif shared.sd_model.is_sd1 and sd_version != network.SDVersion.SD1:
|
59 |
return None
|
extensions-builtin/forge_legacy_preprocessors/annotator/depth_anything.py
CHANGED
@@ -1,15 +1,15 @@
|
|
1 |
import os
|
2 |
-
|
3 |
import cv2
|
4 |
import numpy as np
|
|
|
5 |
import torch.nn.functional as F
|
|
|
|
|
6 |
from torchvision.transforms import Compose
|
7 |
|
8 |
-
from depth_anything.dpt import DPT_DINOv2
|
9 |
-
from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
|
10 |
-
from .util import load_model
|
11 |
from .annotator_path import models_path
|
12 |
-
|
13 |
|
14 |
transform = Compose(
|
15 |
[
|
@@ -49,9 +49,7 @@ class DepthAnythingDetector:
|
|
49 |
"CONTROLNET_DEPTH_ANYTHING_MODEL_URL",
|
50 |
"https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitl14.pth",
|
51 |
)
|
52 |
-
model_path = load_model(
|
53 |
-
"depth_anything_vitl14.pth", remote_url=remote_url, model_dir=self.model_dir
|
54 |
-
)
|
55 |
self.model.load_state_dict(torch.load(model_path))
|
56 |
|
57 |
def __call__(self, image: np.ndarray, colored: bool = True) -> np.ndarray:
|
@@ -67,9 +65,7 @@ class DepthAnythingDetector:
|
|
67 |
return model(image)
|
68 |
|
69 |
depth = predict_depth(self.model, image)
|
70 |
-
depth = F.interpolate(
|
71 |
-
depth[None], (h, w), mode="bilinear", align_corners=False
|
72 |
-
)[0, 0]
|
73 |
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
|
74 |
depth = depth.cpu().numpy().astype(np.uint8)
|
75 |
if colored:
|
|
|
1 |
import os
|
2 |
+
|
3 |
import cv2
|
4 |
import numpy as np
|
5 |
+
import torch
|
6 |
import torch.nn.functional as F
|
7 |
+
from depth_anything.dpt import DPT_DINOv2
|
8 |
+
from depth_anything.util.transform import NormalizeImage, PrepareForNet, Resize
|
9 |
from torchvision.transforms import Compose
|
10 |
|
|
|
|
|
|
|
11 |
from .annotator_path import models_path
|
12 |
+
from .util import load_model
|
13 |
|
14 |
transform = Compose(
|
15 |
[
|
|
|
49 |
"CONTROLNET_DEPTH_ANYTHING_MODEL_URL",
|
50 |
"https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitl14.pth",
|
51 |
)
|
52 |
+
model_path = load_model("depth_anything_vitl14.pth", remote_url=remote_url, model_dir=self.model_dir)
|
|
|
|
|
53 |
self.model.load_state_dict(torch.load(model_path))
|
54 |
|
55 |
def __call__(self, image: np.ndarray, colored: bool = True) -> np.ndarray:
|
|
|
65 |
return model(image)
|
66 |
|
67 |
depth = predict_depth(self.model, image)
|
68 |
+
depth = F.interpolate(depth[None], (h, w), mode="bilinear", align_corners=False)[0, 0]
|
|
|
|
|
69 |
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
|
70 |
depth = depth.cpu().numpy().astype(np.uint8)
|
71 |
if colored:
|
extensions-builtin/forge_legacy_preprocessors/annotator/depth_anything_v2.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import cv2
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
import torch.nn.functional as F
|
7 |
+
from depth_anything_v2.dpt import DepthAnythingV2
|
8 |
+
from depth_anything_v2.util.transform import NormalizeImage, PrepareForNet, Resize
|
9 |
+
from safetensors.torch import load_file
|
10 |
+
from torchvision.transforms import Compose
|
11 |
+
|
12 |
+
from .annotator_path import models_path
|
13 |
+
from .util import load_model
|
14 |
+
|
15 |
+
transform = Compose(
|
16 |
+
[
|
17 |
+
Resize(
|
18 |
+
width=518,
|
19 |
+
height=518,
|
20 |
+
resize_target=False,
|
21 |
+
keep_aspect_ratio=True,
|
22 |
+
ensure_multiple_of=14,
|
23 |
+
resize_method="lower_bound",
|
24 |
+
image_interpolation_method=cv2.INTER_CUBIC,
|
25 |
+
),
|
26 |
+
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
27 |
+
PrepareForNet(),
|
28 |
+
]
|
29 |
+
)
|
30 |
+
|
31 |
+
|
32 |
+
class DepthAnythingV2Detector:
|
33 |
+
"""https://github.com/MackinationsAi/Upgraded-Depth-Anything-V2"""
|
34 |
+
|
35 |
+
model_dir = os.path.join(models_path, "depth_anything_v2")
|
36 |
+
|
37 |
+
def __init__(self, device: torch.device):
|
38 |
+
self.device = device
|
39 |
+
self.model = (
|
40 |
+
DepthAnythingV2(
|
41 |
+
encoder="vitl",
|
42 |
+
features=256,
|
43 |
+
out_channels=[256, 512, 1024, 1024],
|
44 |
+
)
|
45 |
+
.to(device)
|
46 |
+
.eval()
|
47 |
+
)
|
48 |
+
remote_url = os.environ.get(
|
49 |
+
"CONTROLNET_DEPTH_ANYTHING_V2_MODEL_URL",
|
50 |
+
"https://huggingface.co/MackinationsAi/Depth-Anything-V2_Safetensors/resolve/main/depth_anything_v2_vitl.safetensors",
|
51 |
+
)
|
52 |
+
model_path = load_model("depth_anything_v2_vitl.safetensors", remote_url=remote_url, model_dir=self.model_dir)
|
53 |
+
self.model.load_state_dict(load_file(model_path))
|
54 |
+
|
55 |
+
def __call__(self, image: np.ndarray, colored: bool = True) -> np.ndarray:
|
56 |
+
self.model.to(self.device)
|
57 |
+
h, w = image.shape[:2]
|
58 |
+
|
59 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) / 255.0
|
60 |
+
image = transform({"image": image})["image"]
|
61 |
+
image = torch.from_numpy(image).unsqueeze(0).to(self.device)
|
62 |
+
|
63 |
+
@torch.no_grad()
|
64 |
+
def predict_depth(model, image):
|
65 |
+
return model(image)
|
66 |
+
|
67 |
+
depth = predict_depth(self.model, image)
|
68 |
+
depth = F.interpolate(depth[None], (h, w), mode="bilinear", align_corners=False)[0, 0]
|
69 |
+
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
|
70 |
+
depth = depth.cpu().numpy().astype(np.uint8)
|
71 |
+
if colored:
|
72 |
+
depth_color = cv2.applyColorMap(depth, cv2.COLORMAP_INFERNO)[:, :, ::-1]
|
73 |
+
return depth_color
|
74 |
+
else:
|
75 |
+
return depth
|
76 |
+
|
77 |
+
def unload_model(self):
|
78 |
+
self.model.to("cpu")
|
extensions-builtin/forge_legacy_preprocessors/annotator/pidinet/model.py
CHANGED
@@ -11,7 +11,7 @@ import torch
|
|
11 |
import torch.nn as nn
|
12 |
import torch.nn.functional as F
|
13 |
from modules import devices
|
14 |
-
|
15 |
|
16 |
nets = {
|
17 |
"baseline": {
|
@@ -739,17 +739,3 @@ def pidinet():
|
|
739 |
pdcs = config_model("carv4")
|
740 |
dil = 24 # if args.dil else None
|
741 |
return PiDiNet(60, pdcs, dil=dil, sa=True)
|
742 |
-
|
743 |
-
|
744 |
-
if __name__ == "__main__":
|
745 |
-
model = pidinet()
|
746 |
-
ckp = torch.load("table5_pidinet.pth")["state_dict"]
|
747 |
-
model.load_state_dict({k.replace("module.", ""): v for k, v in ckp.items()})
|
748 |
-
im = cv2.imread("examples/test_my/cat_v4.png")
|
749 |
-
im = img2tensor(im).unsqueeze(0) / 255.0
|
750 |
-
res = model(im)[-1]
|
751 |
-
res = res > 0.5
|
752 |
-
res = res.float()
|
753 |
-
res = (res[0, 0].cpu().data.numpy() * 255.0).astype(np.uint8)
|
754 |
-
print(res.shape)
|
755 |
-
cv2.imwrite("edge.png", res)
|
|
|
11 |
import torch.nn as nn
|
12 |
import torch.nn.functional as F
|
13 |
from modules import devices
|
14 |
+
|
15 |
|
16 |
nets = {
|
17 |
"baseline": {
|
|
|
739 |
pdcs = config_model("carv4")
|
740 |
dil = 24 # if args.dil else None
|
741 |
return PiDiNet(60, pdcs, dil=dil, sa=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
extensions-builtin/forge_legacy_preprocessors/install.py
CHANGED
@@ -1,8 +1,6 @@
|
|
1 |
import os
|
2 |
from pathlib import Path
|
3 |
-
from
|
4 |
-
|
5 |
-
import pkg_resources
|
6 |
|
7 |
import launch
|
8 |
|
@@ -10,72 +8,37 @@ repo_root = Path(__file__).parent
|
|
10 |
main_req_file = repo_root / "requirements.txt"
|
11 |
|
12 |
|
13 |
-
def comparable_version(version: str) -> Tuple:
|
14 |
-
return tuple(version.split("."))
|
15 |
-
|
16 |
-
|
17 |
-
def get_installed_version(package: str) -> Optional[str]:
|
18 |
-
try:
|
19 |
-
return pkg_resources.get_distribution(package).version
|
20 |
-
except Exception:
|
21 |
-
return None
|
22 |
-
|
23 |
-
|
24 |
-
def extract_base_package(package_string: str) -> str:
|
25 |
-
base_package = package_string.split("@git")[0]
|
26 |
-
return base_package
|
27 |
-
|
28 |
-
|
29 |
def install_requirements(req_file):
|
30 |
-
with open(req_file) as file:
|
31 |
-
for package in file:
|
32 |
try:
|
33 |
package = package.strip()
|
34 |
-
if
|
35 |
-
package_name, package_version = package.split("==")
|
36 |
-
installed_version = get_installed_version(package_name)
|
37 |
-
if installed_version != package_version:
|
38 |
-
launch.run_pip(
|
39 |
-
f"install -U {package}",
|
40 |
-
f"forge_legacy_preprocessor requirement: {package_name}=={package_version}",
|
41 |
-
)
|
42 |
-
elif ">=" in package:
|
43 |
-
package_name, package_version = package.split(">=")
|
44 |
-
installed_version = get_installed_version(package_name)
|
45 |
-
if not installed_version or comparable_version(installed_version) < comparable_version(package_version):
|
46 |
-
launch.run_pip(
|
47 |
-
f"install -U {package}",
|
48 |
-
f"forge_legacy_preprocessor requirement: {package_name}=={package_version}",
|
49 |
-
)
|
50 |
-
elif not launch.is_installed(extract_base_package(package)):
|
51 |
launch.run_pip(
|
52 |
f"install {package}",
|
53 |
-
f"
|
54 |
)
|
55 |
except Exception as e:
|
56 |
-
|
57 |
-
print(f"
|
58 |
|
59 |
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
return
|
67 |
|
68 |
try:
|
69 |
launch.run_pip(
|
70 |
f"install -U {wheel_url}",
|
71 |
-
f"
|
72 |
)
|
73 |
except Exception as e:
|
74 |
-
|
75 |
-
print(f"
|
76 |
-
|
77 |
|
78 |
-
install_requirements(main_req_file)
|
79 |
|
80 |
try_install_from_wheel(
|
81 |
"handrefinerportable",
|
@@ -83,8 +46,8 @@ try_install_from_wheel(
|
|
83 |
"HANDREFINER_WHEEL",
|
84 |
"https://github.com/huchenlei/HandRefinerPortable/releases/download/v1.0.1/handrefinerportable-2024.2.12.0-py2.py3-none-any.whl",
|
85 |
),
|
86 |
-
version="2024.2.12.0",
|
87 |
)
|
|
|
88 |
try_install_from_wheel(
|
89 |
"depth_anything",
|
90 |
wheel_url=os.environ.get(
|
@@ -92,3 +55,11 @@ try_install_from_wheel(
|
|
92 |
"https://github.com/huchenlei/Depth-Anything/releases/download/v1.0.0/depth_anything-2024.1.22.0-py2.py3-none-any.whl",
|
93 |
),
|
94 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
from pathlib import Path
|
3 |
+
from modules.errors import display
|
|
|
|
|
4 |
|
5 |
import launch
|
6 |
|
|
|
8 |
main_req_file = repo_root / "requirements.txt"
|
9 |
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
def install_requirements(req_file):
|
12 |
+
with open(req_file, "r") as file:
|
13 |
+
for package in file.readlines():
|
14 |
try:
|
15 |
package = package.strip()
|
16 |
+
if not launch.is_installed(package):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
launch.run_pip(
|
18 |
f"install {package}",
|
19 |
+
f"Legacy Preprocessor Requirement: {package}",
|
20 |
)
|
21 |
except Exception as e:
|
22 |
+
display(e, "cnet req")
|
23 |
+
print(f"Failed to install {package}, some Preprocessors may not work...")
|
24 |
|
25 |
|
26 |
+
install_requirements(main_req_file)
|
27 |
+
|
28 |
+
|
29 |
+
def try_install_from_wheel(pkg_name: str, wheel_url: str):
|
30 |
+
if launch.is_installed(pkg_name):
|
31 |
+
return
|
|
|
32 |
|
33 |
try:
|
34 |
launch.run_pip(
|
35 |
f"install -U {wheel_url}",
|
36 |
+
f"Legacy Preprocessor Requirement: {pkg_name}",
|
37 |
)
|
38 |
except Exception as e:
|
39 |
+
display(e, "cnet req")
|
40 |
+
print(f"Failed to install {pkg_name}, some Preprocessors may not work...")
|
|
|
41 |
|
|
|
42 |
|
43 |
try_install_from_wheel(
|
44 |
"handrefinerportable",
|
|
|
46 |
"HANDREFINER_WHEEL",
|
47 |
"https://github.com/huchenlei/HandRefinerPortable/releases/download/v1.0.1/handrefinerportable-2024.2.12.0-py2.py3-none-any.whl",
|
48 |
),
|
|
|
49 |
)
|
50 |
+
|
51 |
try_install_from_wheel(
|
52 |
"depth_anything",
|
53 |
wheel_url=os.environ.get(
|
|
|
55 |
"https://github.com/huchenlei/Depth-Anything/releases/download/v1.0.0/depth_anything-2024.1.22.0-py2.py3-none-any.whl",
|
56 |
),
|
57 |
)
|
58 |
+
|
59 |
+
try_install_from_wheel(
|
60 |
+
"depth_anything_v2",
|
61 |
+
wheel_url=os.environ.get(
|
62 |
+
"DEPTH_ANYTHING_V2_WHEEL",
|
63 |
+
"https://github.com/MackinationsAi/UDAV2-ControlNet/releases/download/v1.0.0/depth_anything_v2-2024.7.1.0-py2.py3-none-any.whl",
|
64 |
+
),
|
65 |
+
)
|
extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor.py
CHANGED
@@ -168,9 +168,7 @@ def mediapipe_face(img, res=512, thr_a: int = 10, thr_b: float = 0.5, **kwargs):
|
|
168 |
from annotator.mediapipe_face import apply_mediapipe_face
|
169 |
|
170 |
model_mediapipe_face = apply_mediapipe_face
|
171 |
-
result = model_mediapipe_face(
|
172 |
-
img, max_faces=max_faces, min_confidence=min_confidence
|
173 |
-
)
|
174 |
return remove_pad(result), True
|
175 |
|
176 |
|
@@ -217,6 +215,26 @@ def unload_depth_anything():
|
|
217 |
model_depth_anything.unload_model()
|
218 |
|
219 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
220 |
model_midas = None
|
221 |
|
222 |
|
@@ -305,17 +323,20 @@ class OpenposeModel(object):
|
|
305 |
|
306 |
self.model_openpose = OpenposeDetector()
|
307 |
|
308 |
-
return
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
|
|
|
|
|
|
319 |
|
320 |
def unload(self):
|
321 |
if self.model_openpose is not None:
|
@@ -566,9 +587,7 @@ def lama_inpaint(img, res=512, **kwargs):
|
|
566 |
prd_color = cv2.resize(prd_color, (W, H))
|
567 |
|
568 |
alpha = raw_mask.astype(np.float32) / 255.0
|
569 |
-
fin_color = prd_color.astype(np.float32) * alpha + raw_color.astype(np.float32) * (
|
570 |
-
1 - alpha
|
571 |
-
)
|
572 |
fin_color = fin_color.clip(0, 255).astype(np.uint8)
|
573 |
|
574 |
result = np.concatenate([fin_color, raw_mask], axis=2)
|
@@ -613,46 +632,6 @@ def unload_normal_bae():
|
|
613 |
pass
|
614 |
|
615 |
|
616 |
-
model_oneformer_coco = None
|
617 |
-
|
618 |
-
|
619 |
-
def oneformer_coco(img, res=512, **kwargs):
|
620 |
-
img, remove_pad = resize_image_with_pad(img, res)
|
621 |
-
global model_oneformer_coco
|
622 |
-
if model_oneformer_coco is None:
|
623 |
-
from annotator.oneformer import OneformerDetector
|
624 |
-
|
625 |
-
model_oneformer_coco = OneformerDetector(OneformerDetector.configs["coco"])
|
626 |
-
result = model_oneformer_coco(img)
|
627 |
-
return remove_pad(result), True
|
628 |
-
|
629 |
-
|
630 |
-
def unload_oneformer_coco():
|
631 |
-
global model_oneformer_coco
|
632 |
-
if model_oneformer_coco is not None:
|
633 |
-
model_oneformer_coco.unload_model()
|
634 |
-
|
635 |
-
|
636 |
-
model_oneformer_ade20k = None
|
637 |
-
|
638 |
-
|
639 |
-
def oneformer_ade20k(img, res=512, **kwargs):
|
640 |
-
img, remove_pad = resize_image_with_pad(img, res)
|
641 |
-
global model_oneformer_ade20k
|
642 |
-
if model_oneformer_ade20k is None:
|
643 |
-
from annotator.oneformer import OneformerDetector
|
644 |
-
|
645 |
-
model_oneformer_ade20k = OneformerDetector(OneformerDetector.configs["ade20k"])
|
646 |
-
result = model_oneformer_ade20k(img)
|
647 |
-
return remove_pad(result), True
|
648 |
-
|
649 |
-
|
650 |
-
def unload_oneformer_ade20k():
|
651 |
-
global model_oneformer_ade20k
|
652 |
-
if model_oneformer_ade20k is not None:
|
653 |
-
model_oneformer_ade20k.unload_model()
|
654 |
-
|
655 |
-
|
656 |
model_shuffle = None
|
657 |
|
658 |
|
@@ -808,15 +787,10 @@ class InsightFaceModel:
|
|
808 |
if not faces:
|
809 |
raise Exception(f"Insightface: No face found in image {i}.")
|
810 |
if len(faces) > 1:
|
811 |
-
print(
|
812 |
-
"Insightface: More than one face is detected in the image. "
|
813 |
-
f"Only the first one will be used {i}."
|
814 |
-
)
|
815 |
return torch.from_numpy(faces[0].normed_embedding).unsqueeze(0), False
|
816 |
|
817 |
-
def run_model_instant_id(
|
818 |
-
self, img: np.ndarray, res: int = 512, return_keypoints: bool = False, **kwargs
|
819 |
-
) -> Tuple[Union[np.ndarray, torch.Tensor], bool]:
|
820 |
"""Run the insightface model for instant_id.
|
821 |
Arguments:
|
822 |
- img: Input image in any size.
|
@@ -877,10 +851,7 @@ class InsightFaceModel:
|
|
877 |
if not face_info:
|
878 |
raise Exception(f"Insightface: No face found in image.")
|
879 |
if len(face_info) > 1:
|
880 |
-
print(
|
881 |
-
"Insightface: More than one face is detected in the image. "
|
882 |
-
f"Only the biggest one will be used."
|
883 |
-
)
|
884 |
# only use the maximum face
|
885 |
face_info = sorted(
|
886 |
face_info,
|
@@ -893,9 +864,7 @@ class InsightFaceModel:
|
|
893 |
|
894 |
|
895 |
g_insight_face_model = InsightFaceModel()
|
896 |
-
g_insight_face_instant_id_model = InsightFaceModel(
|
897 |
-
face_analysis_model_name="antelopev2"
|
898 |
-
)
|
899 |
|
900 |
|
901 |
@dataclass
|
|
|
168 |
from annotator.mediapipe_face import apply_mediapipe_face
|
169 |
|
170 |
model_mediapipe_face = apply_mediapipe_face
|
171 |
+
result = model_mediapipe_face(img, max_faces=max_faces, min_confidence=min_confidence)
|
|
|
|
|
172 |
return remove_pad(result), True
|
173 |
|
174 |
|
|
|
215 |
model_depth_anything.unload_model()
|
216 |
|
217 |
|
218 |
+
model_depth_anything_v2 = None
|
219 |
+
|
220 |
+
|
221 |
+
def depth_anything_v2(img, res: int = 512, colored: bool = True, **kwargs):
|
222 |
+
img, remove_pad = resize_image_with_pad(img, res)
|
223 |
+
global model_depth_anything_v2
|
224 |
+
if model_depth_anything_v2 is None:
|
225 |
+
with Extra(torch_handler):
|
226 |
+
from annotator.depth_anything_v2 import DepthAnythingV2Detector
|
227 |
+
|
228 |
+
device = devices.get_device_for("controlnet")
|
229 |
+
model_depth_anything_v2 = DepthAnythingV2Detector(device)
|
230 |
+
return remove_pad(model_depth_anything_v2(img, colored=colored)), True
|
231 |
+
|
232 |
+
|
233 |
+
def unload_depth_anything_v2():
|
234 |
+
if model_depth_anything_v2 is not None:
|
235 |
+
model_depth_anything_v2.unload_model()
|
236 |
+
|
237 |
+
|
238 |
model_midas = None
|
239 |
|
240 |
|
|
|
323 |
|
324 |
self.model_openpose = OpenposeDetector()
|
325 |
|
326 |
+
return (
|
327 |
+
remove_pad(
|
328 |
+
self.model_openpose(
|
329 |
+
img,
|
330 |
+
include_body=include_body,
|
331 |
+
include_hand=include_hand,
|
332 |
+
include_face=include_face,
|
333 |
+
use_dw_pose=use_dw_pose,
|
334 |
+
use_animal_pose=use_animal_pose,
|
335 |
+
json_pose_callback=json_pose_callback,
|
336 |
+
)
|
337 |
+
),
|
338 |
+
True,
|
339 |
+
)
|
340 |
|
341 |
def unload(self):
|
342 |
if self.model_openpose is not None:
|
|
|
587 |
prd_color = cv2.resize(prd_color, (W, H))
|
588 |
|
589 |
alpha = raw_mask.astype(np.float32) / 255.0
|
590 |
+
fin_color = prd_color.astype(np.float32) * alpha + raw_color.astype(np.float32) * (1 - alpha)
|
|
|
|
|
591 |
fin_color = fin_color.clip(0, 255).astype(np.uint8)
|
592 |
|
593 |
result = np.concatenate([fin_color, raw_mask], axis=2)
|
|
|
632 |
pass
|
633 |
|
634 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
635 |
model_shuffle = None
|
636 |
|
637 |
|
|
|
787 |
if not faces:
|
788 |
raise Exception(f"Insightface: No face found in image {i}.")
|
789 |
if len(faces) > 1:
|
790 |
+
print("Insightface: More than one face is detected in the image. " f"Only the first one will be used {i}.")
|
|
|
|
|
|
|
791 |
return torch.from_numpy(faces[0].normed_embedding).unsqueeze(0), False
|
792 |
|
793 |
+
def run_model_instant_id(self, img: np.ndarray, res: int = 512, return_keypoints: bool = False, **kwargs) -> Tuple[Union[np.ndarray, torch.Tensor], bool]:
|
|
|
|
|
794 |
"""Run the insightface model for instant_id.
|
795 |
Arguments:
|
796 |
- img: Input image in any size.
|
|
|
851 |
if not face_info:
|
852 |
raise Exception(f"Insightface: No face found in image.")
|
853 |
if len(face_info) > 1:
|
854 |
+
print("Insightface: More than one face is detected in the image. " f"Only the biggest one will be used.")
|
|
|
|
|
|
|
855 |
# only use the maximum face
|
856 |
face_info = sorted(
|
857 |
face_info,
|
|
|
864 |
|
865 |
|
866 |
g_insight_face_model = InsightFaceModel()
|
867 |
+
g_insight_face_instant_id_model = InsightFaceModel(face_analysis_model_name="antelopev2")
|
|
|
|
|
868 |
|
869 |
|
870 |
@dataclass
|
extensions-builtin/forge_legacy_preprocessors/legacy_preprocessors/preprocessor_compiled.py
CHANGED
@@ -115,6 +115,19 @@ legacy_preprocessors = {
|
|
115 |
"priority": 0,
|
116 |
"tags": ["Depth"],
|
117 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
"depth_hand_refiner": {
|
119 |
"label": "depth_hand_refiner",
|
120 |
"call_function": g_hand_refiner_model.run_model,
|
@@ -250,9 +263,7 @@ legacy_preprocessors = {
|
|
250 |
},
|
251 |
"instant_id_face_keypoints": {
|
252 |
"label": "instant_id_face_keypoints",
|
253 |
-
"call_function": functools.partial(
|
254 |
-
g_insight_face_instant_id_model.run_model_instant_id, return_keypoints=True
|
255 |
-
),
|
256 |
"unload_function": None,
|
257 |
"managed_model": "unknown",
|
258 |
"model_free": False,
|
@@ -594,32 +605,6 @@ legacy_preprocessors = {
|
|
594 |
"priority": 0,
|
595 |
"tags": ["Segmentation"],
|
596 |
},
|
597 |
-
"seg_ofade20k": {
|
598 |
-
"label": "seg_ofade20k",
|
599 |
-
"call_function": oneformer_ade20k,
|
600 |
-
"unload_function": unload_oneformer_ade20k,
|
601 |
-
"managed_model": "model_oneformer_ade20k",
|
602 |
-
"model_free": False,
|
603 |
-
"no_control_mode": False,
|
604 |
-
"resolution": None,
|
605 |
-
"slider_1": None,
|
606 |
-
"slider_2": None,
|
607 |
-
"priority": 100,
|
608 |
-
"tags": ["Segmentation"],
|
609 |
-
},
|
610 |
-
"seg_ofcoco": {
|
611 |
-
"label": "seg_ofcoco",
|
612 |
-
"call_function": oneformer_coco,
|
613 |
-
"unload_function": unload_oneformer_coco,
|
614 |
-
"managed_model": "model_oneformer_coco",
|
615 |
-
"model_free": False,
|
616 |
-
"no_control_mode": False,
|
617 |
-
"resolution": None,
|
618 |
-
"slider_1": None,
|
619 |
-
"slider_2": None,
|
620 |
-
"priority": 0,
|
621 |
-
"tags": ["Segmentation"],
|
622 |
-
},
|
623 |
"seg_ufade20k": {
|
624 |
"label": "seg_ufade20k",
|
625 |
"call_function": uniformer,
|
|
|
115 |
"priority": 0,
|
116 |
"tags": ["Depth"],
|
117 |
},
|
118 |
+
"depth_anything_v2": {
|
119 |
+
"label": "depth_anything_v2",
|
120 |
+
"call_function": functools.partial(depth_anything_v2, colored=False),
|
121 |
+
"unload_function": unload_depth_anything_v2,
|
122 |
+
"managed_model": "model_depth_anything_v2",
|
123 |
+
"model_free": False,
|
124 |
+
"no_control_mode": False,
|
125 |
+
"resolution": None,
|
126 |
+
"slider_1": None,
|
127 |
+
"slider_2": None,
|
128 |
+
"priority": 0,
|
129 |
+
"tags": ["Depth"],
|
130 |
+
},
|
131 |
"depth_hand_refiner": {
|
132 |
"label": "depth_hand_refiner",
|
133 |
"call_function": g_hand_refiner_model.run_model,
|
|
|
263 |
},
|
264 |
"instant_id_face_keypoints": {
|
265 |
"label": "instant_id_face_keypoints",
|
266 |
+
"call_function": functools.partial(g_insight_face_instant_id_model.run_model_instant_id, return_keypoints=True),
|
|
|
|
|
267 |
"unload_function": None,
|
268 |
"managed_model": "unknown",
|
269 |
"model_free": False,
|
|
|
605 |
"priority": 0,
|
606 |
"tags": ["Segmentation"],
|
607 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
608 |
"seg_ufade20k": {
|
609 |
"label": "seg_ufade20k",
|
610 |
"call_function": uniformer,
|
extensions-builtin/forge_legacy_preprocessors/requirements.txt
CHANGED
@@ -1,5 +1,8 @@
|
|
|
|
1 |
fvcore
|
2 |
mediapipe
|
|
|
3 |
onnxruntime
|
4 |
-
opencv-python
|
5 |
svglib
|
|
|
|
1 |
+
addict
|
2 |
fvcore
|
3 |
mediapipe
|
4 |
+
onnx
|
5 |
onnxruntime
|
6 |
+
opencv-python
|
7 |
svglib
|
8 |
+
yapf
|
extensions-builtin/sd_forge_controlnet/lib_controlnet/controlnet_ui/controlnet_ui_group.py
CHANGED
@@ -14,7 +14,7 @@ from lib_controlnet.controlnet_ui.tool_button import ToolButton
|
|
14 |
from lib_controlnet.controlnet_ui.openpose_editor import OpenposeEditor
|
15 |
from lib_controlnet.controlnet_ui.preset import ControlNetPresetUI, NEW_PRESET
|
16 |
from lib_controlnet.utils import svg_preprocess, judge_image_type
|
17 |
-
from lib_controlnet.enums import
|
18 |
from lib_controlnet.external_code import UiControlNetUnit
|
19 |
from lib_controlnet import global_state, external_code
|
20 |
|
@@ -187,7 +187,6 @@ class ControlNetUiGroup:
|
|
187 |
self.upload_independent_img_in_img2img = None
|
188 |
self.image_upload_panel = None
|
189 |
self.save_detected_map = None
|
190 |
-
self.input_mode = gr.State(InputMode.SIMPLE)
|
191 |
self.hr_option = None
|
192 |
|
193 |
self.dummy_update_trigger = None
|
@@ -504,7 +503,6 @@ class ControlNetUiGroup:
|
|
504 |
self.preset_panel = ControlNetPresetUI(f"{elem_id_tabname}_{tabname}_")
|
505 |
|
506 |
unit_args = [
|
507 |
-
self.input_mode,
|
508 |
self.use_preview_as_input,
|
509 |
self.generated_image,
|
510 |
self.mask_image,
|
|
|
14 |
from lib_controlnet.controlnet_ui.openpose_editor import OpenposeEditor
|
15 |
from lib_controlnet.controlnet_ui.preset import ControlNetPresetUI, NEW_PRESET
|
16 |
from lib_controlnet.utils import svg_preprocess, judge_image_type
|
17 |
+
from lib_controlnet.enums import HiResFixOption
|
18 |
from lib_controlnet.external_code import UiControlNetUnit
|
19 |
from lib_controlnet import global_state, external_code
|
20 |
|
|
|
187 |
self.upload_independent_img_in_img2img = None
|
188 |
self.image_upload_panel = None
|
189 |
self.save_detected_map = None
|
|
|
190 |
self.hr_option = None
|
191 |
|
192 |
self.dummy_update_trigger = None
|
|
|
503 |
self.preset_panel = ControlNetPresetUI(f"{elem_id_tabname}_{tabname}_")
|
504 |
|
505 |
unit_args = [
|
|
|
506 |
self.use_preview_as_input,
|
507 |
self.generated_image,
|
508 |
self.mask_image,
|
extensions-builtin/sd_forge_controlnet/lib_controlnet/enums.py
CHANGED
@@ -26,60 +26,3 @@ class HiResFixOption(Enum):
|
|
26 |
@property
|
27 |
def high_res_enabled(self) -> bool:
|
28 |
return self in (HiResFixOption.BOTH, HiResFixOption.HIGH_RES_ONLY)
|
29 |
-
|
30 |
-
|
31 |
-
class StableDiffusionVersion(Enum):
|
32 |
-
"""The version family of stable diffusion model."""
|
33 |
-
|
34 |
-
UNKNOWN = 0
|
35 |
-
SD1x = 1
|
36 |
-
SD2x = 2
|
37 |
-
SDXL = 3
|
38 |
-
|
39 |
-
@staticmethod
|
40 |
-
def detect_from_model_name(model_name: str) -> "StableDiffusionVersion":
|
41 |
-
"""
|
42 |
-
Based on the model name provided, guess what stable diffusion version it is.
|
43 |
-
This might not be accurate without actually inspect the file content.
|
44 |
-
"""
|
45 |
-
if any(f"sd{v}" in model_name.lower() for v in ("14", "15", "16")):
|
46 |
-
return StableDiffusionVersion.SD1x
|
47 |
-
|
48 |
-
if "sd21" in model_name or "2.1" in model_name:
|
49 |
-
return StableDiffusionVersion.SD2x
|
50 |
-
|
51 |
-
if "xl" in model_name.lower():
|
52 |
-
return StableDiffusionVersion.SDXL
|
53 |
-
|
54 |
-
return StableDiffusionVersion.UNKNOWN
|
55 |
-
|
56 |
-
def encoder_block_num(self) -> int:
|
57 |
-
if self in (
|
58 |
-
StableDiffusionVersion.SD1x,
|
59 |
-
StableDiffusionVersion.SD2x,
|
60 |
-
StableDiffusionVersion.UNKNOWN,
|
61 |
-
):
|
62 |
-
return 12
|
63 |
-
else:
|
64 |
-
return 9 # SDXL
|
65 |
-
|
66 |
-
def controlnet_layer_num(self) -> int:
|
67 |
-
return self.encoder_block_num() + 1
|
68 |
-
|
69 |
-
def is_compatible_with(self, other: "StableDiffusionVersion") -> bool:
|
70 |
-
"""Incompatible only when one of version is SDXL and other is not"""
|
71 |
-
return (
|
72 |
-
any(v == StableDiffusionVersion.UNKNOWN for v in [self, other])
|
73 |
-
or sum(v == StableDiffusionVersion.SDXL for v in [self, other]) != 1
|
74 |
-
)
|
75 |
-
|
76 |
-
|
77 |
-
class InputMode(Enum):
|
78 |
-
# Single image to a single ControlNet unit.
|
79 |
-
SIMPLE = "simple"
|
80 |
-
# Input is a directory. N generations. Each generation takes 1 input image
|
81 |
-
# from the directory.
|
82 |
-
BATCH = "batch"
|
83 |
-
# Input is a directory. 1 generation. Each generation takes N input image
|
84 |
-
# from the directory.
|
85 |
-
MERGE = "merge"
|
|
|
26 |
@property
|
27 |
def high_res_enabled(self) -> bool:
|
28 |
return self in (HiResFixOption.BOTH, HiResFixOption.HIGH_RES_ONLY)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
extensions-builtin/sd_forge_controlnet/lib_controlnet/external_code.py
CHANGED
@@ -6,7 +6,7 @@ from enum import Enum
|
|
6 |
import numpy as np
|
7 |
|
8 |
from lib_controlnet.logging import logger
|
9 |
-
from lib_controlnet.enums import
|
10 |
|
11 |
|
12 |
class ControlMode(Enum):
|
@@ -159,8 +159,6 @@ class ControlNetUnit:
|
|
159 |
"""
|
160 |
|
161 |
# ====== UI-only Fields ======
|
162 |
-
# Specifies the input mode for the unit, defaulting to a simple mode.
|
163 |
-
input_mode: InputMode = InputMode.SIMPLE
|
164 |
# Determines whether to use the preview image as input; defaults to False.
|
165 |
use_preview_as_input: bool = False
|
166 |
# Holds the preview image as a NumPy array; defaults to None.
|
|
|
6 |
import numpy as np
|
7 |
|
8 |
from lib_controlnet.logging import logger
|
9 |
+
from lib_controlnet.enums import HiResFixOption
|
10 |
|
11 |
|
12 |
class ControlMode(Enum):
|
|
|
159 |
"""
|
160 |
|
161 |
# ====== UI-only Fields ======
|
|
|
|
|
162 |
# Determines whether to use the preview image as input; defaults to False.
|
163 |
use_preview_as_input: bool = False
|
164 |
# Holds the preview image as a NumPy array; defaults to None.
|
extensions-builtin/sd_forge_controlnet/lib_controlnet/global_state.py
CHANGED
@@ -2,6 +2,7 @@ from modules_forge.shared import controlnet_dir, supported_preprocessors
|
|
2 |
from modules import shared
|
3 |
|
4 |
from collections import OrderedDict
|
|
|
5 |
import glob
|
6 |
import os
|
7 |
|
@@ -44,17 +45,13 @@ def update_controlnet_filenames():
|
|
44 |
shared.opts.data.get("control_net_models_path", None),
|
45 |
getattr(shared.cmd_opts, "controlnet_dir", None),
|
46 |
)
|
47 |
-
extra_paths = (
|
48 |
-
extra_path
|
49 |
-
for extra_path in ext_dirs
|
50 |
-
if extra_path is not None and os.path.exists(extra_path)
|
51 |
-
)
|
52 |
|
53 |
for path in [controlnet_dir, *extra_paths]:
|
54 |
found = get_all_models(path, "name")
|
55 |
controlnet_filename_dict.update(found)
|
56 |
|
57 |
-
controlnet_names =
|
58 |
|
59 |
|
60 |
def get_all_controlnet_names() -> list[str]:
|
@@ -72,11 +69,7 @@ def get_filtered_controlnet_names(tag: str) -> list[str]:
|
|
72 |
for p in filtered_preprocessors.values():
|
73 |
filename_filters.extend(p.model_filename_filters)
|
74 |
|
75 |
-
return [
|
76 |
-
cnet
|
77 |
-
for cnet in controlnet_names
|
78 |
-
if cnet == "None" or any(f.lower() in cnet.lower() for f in filename_filters)
|
79 |
-
]
|
80 |
|
81 |
|
82 |
def get_all_preprocessor_tags() -> list[str]:
|
@@ -91,10 +84,11 @@ def get_preprocessor(name: str):
|
|
91 |
return supported_preprocessors[name]
|
92 |
|
93 |
|
|
|
94 |
def get_sorted_preprocessors() -> dict:
|
95 |
results = OrderedDict({"None": supported_preprocessors["None"]})
|
96 |
preprocessors = [p for (k, p) in supported_preprocessors.items() if k != "None"]
|
97 |
-
preprocessors = sorted(preprocessors, key=lambda
|
98 |
for p in preprocessors:
|
99 |
results[p.name] = p
|
100 |
return results
|
@@ -111,8 +105,4 @@ def get_filtered_preprocessor_names(tag: str) -> list[str]:
|
|
111 |
def get_filtered_preprocessors(tag: str) -> dict:
|
112 |
if tag == "All":
|
113 |
return supported_preprocessors
|
114 |
-
return {
|
115 |
-
k: v
|
116 |
-
for (k, v) in get_sorted_preprocessors().items()
|
117 |
-
if tag in v.tags or k == "None"
|
118 |
-
}
|
|
|
2 |
from modules import shared
|
3 |
|
4 |
from collections import OrderedDict
|
5 |
+
from functools import lru_cache
|
6 |
import glob
|
7 |
import os
|
8 |
|
|
|
45 |
shared.opts.data.get("control_net_models_path", None),
|
46 |
getattr(shared.cmd_opts, "controlnet_dir", None),
|
47 |
)
|
48 |
+
extra_paths = (extra_path for extra_path in ext_dirs if extra_path is not None and os.path.exists(extra_path))
|
|
|
|
|
|
|
|
|
49 |
|
50 |
for path in [controlnet_dir, *extra_paths]:
|
51 |
found = get_all_models(path, "name")
|
52 |
controlnet_filename_dict.update(found)
|
53 |
|
54 |
+
controlnet_names = sorted(controlnet_filename_dict.keys(), key=lambda mdl: mdl)
|
55 |
|
56 |
|
57 |
def get_all_controlnet_names() -> list[str]:
|
|
|
69 |
for p in filtered_preprocessors.values():
|
70 |
filename_filters.extend(p.model_filename_filters)
|
71 |
|
72 |
+
return [cnet for cnet in controlnet_names if cnet == "None" or any(f.lower() in cnet.lower() for f in filename_filters)]
|
|
|
|
|
|
|
|
|
73 |
|
74 |
|
75 |
def get_all_preprocessor_tags() -> list[str]:
|
|
|
84 |
return supported_preprocessors[name]
|
85 |
|
86 |
|
87 |
+
@lru_cache(maxsize=1, typed=False)
|
88 |
def get_sorted_preprocessors() -> dict:
|
89 |
results = OrderedDict({"None": supported_preprocessors["None"]})
|
90 |
preprocessors = [p for (k, p) in supported_preprocessors.items() if k != "None"]
|
91 |
+
preprocessors = sorted(preprocessors, key=lambda mdl: mdl.name)
|
92 |
for p in preprocessors:
|
93 |
results[p.name] = p
|
94 |
return results
|
|
|
105 |
def get_filtered_preprocessors(tag: str) -> dict:
|
106 |
if tag == "All":
|
107 |
return supported_preprocessors
|
108 |
+
return {k: v for (k, v) in get_sorted_preprocessors().items() if tag in v.tags or k == "None"}
|
|
|
|
|
|
|
|
extensions-builtin/sd_forge_controlnet/preload.py
CHANGED
@@ -2,12 +2,6 @@ def preload(parser):
|
|
2 |
parser.add_argument(
|
3 |
"--controlnet-loglevel",
|
4 |
default="INFO",
|
5 |
-
choices=
|
6 |
help="Set the log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)",
|
7 |
)
|
8 |
-
parser.add_argument(
|
9 |
-
"--controlnet-tracemalloc",
|
10 |
-
default=None,
|
11 |
-
action="store_true",
|
12 |
-
help="Enable memory tracing.",
|
13 |
-
)
|
|
|
2 |
parser.add_argument(
|
3 |
"--controlnet-loglevel",
|
4 |
default="INFO",
|
5 |
+
choices=("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"),
|
6 |
help="Set the log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)",
|
7 |
)
|
|
|
|
|
|
|
|
|
|
|
|
extensions-builtin/sd_forge_controlnet/scripts/controlnet.py
CHANGED
@@ -151,11 +151,9 @@ class ControlNetForForgeOfficial(scripts.Script):
|
|
151 |
return input_image
|
152 |
|
153 |
def get_input_data(self, p, unit, preprocessor, h, w):
|
154 |
-
logger.info(f"ControlNet Input Mode: {unit.input_mode}")
|
155 |
resize_mode = external_code.resize_mode_from_value(unit.resize_mode)
|
156 |
image_list = []
|
157 |
|
158 |
-
assert unit.input_mode == external_code.InputMode.SIMPLE
|
159 |
assert unit.use_preview_as_input is False
|
160 |
|
161 |
a1111_i2i_image = getattr(p, "init_images", [None])[0]
|
|
|
151 |
return input_image
|
152 |
|
153 |
def get_input_data(self, p, unit, preprocessor, h, w):
|
|
|
154 |
resize_mode = external_code.resize_mode_from_value(unit.resize_mode)
|
155 |
image_list = []
|
156 |
|
|
|
157 |
assert unit.use_preview_as_input is False
|
158 |
|
159 |
a1111_i2i_image = getattr(p, "init_images", [None])[0]
|
extensions-builtin/xyz/lib_xyz/builtins.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from modules import sd_models, sd_samplers,
|
2 |
|
3 |
from .axis_application import (
|
4 |
apply_checkpoint,
|
@@ -38,49 +38,16 @@ builtin_options = [
|
|
38 |
AxisOptionImg2Img("Image CFG Scale", float, apply_field("image_cfg_scale")),
|
39 |
AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value),
|
40 |
AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list),
|
41 |
-
AxisOptionTxt2Img(
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
format_value=format_value,
|
46 |
-
confirm=confirm_samplers,
|
47 |
-
choices=sd_samplers.visible_sampler_names,
|
48 |
-
),
|
49 |
-
AxisOptionTxt2Img(
|
50 |
-
"Hires sampler",
|
51 |
-
str,
|
52 |
-
apply_field("hr_sampler_name"),
|
53 |
-
confirm=confirm_samplers,
|
54 |
-
choices=sd_samplers.visible_sampler_names,
|
55 |
-
),
|
56 |
-
AxisOptionImg2Img(
|
57 |
-
"Sampler",
|
58 |
-
str,
|
59 |
-
apply_field("sampler_name"),
|
60 |
-
format_value=format_value,
|
61 |
-
confirm=confirm_samplers,
|
62 |
-
choices=sd_samplers.visible_sampler_names,
|
63 |
-
),
|
64 |
-
AxisOption(
|
65 |
-
"Checkpoint name",
|
66 |
-
str,
|
67 |
-
apply_checkpoint,
|
68 |
-
format_value=format_remove_path,
|
69 |
-
confirm=confirm_checkpoints,
|
70 |
-
cost=1.0,
|
71 |
-
choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold),
|
72 |
-
),
|
73 |
AxisOption("Negative Guidance minimum sigma", float, apply_field("s_min_uncond")),
|
74 |
AxisOption("Sigma Churn", float, apply_field("s_churn")),
|
75 |
AxisOption("Sigma min", float, apply_field("s_tmin")),
|
76 |
AxisOption("Sigma max", float, apply_field("s_tmax")),
|
77 |
AxisOption("Sigma noise", float, apply_field("s_noise")),
|
78 |
-
AxisOption(
|
79 |
-
"Schedule type",
|
80 |
-
str,
|
81 |
-
apply_override("k_sched_type"),
|
82 |
-
choices=lambda: list(sd_samplers_kdiffusion.k_diffusion_scheduler),
|
83 |
-
),
|
84 |
AxisOption("Schedule min sigma", float, apply_override("sigma_min")),
|
85 |
AxisOption("Schedule max sigma", float, apply_override("sigma_max")),
|
86 |
AxisOption("Schedule rho", float, apply_override("rho")),
|
@@ -89,15 +56,7 @@ builtin_options = [
|
|
89 |
AxisOption("Denoising", float, apply_field("denoising_strength")),
|
90 |
AxisOption("Initial noise multiplier", float, apply_field("initial_noise_multiplier")),
|
91 |
AxisOption("Extra noise", float, apply_override("img2img_extra_noise")),
|
92 |
-
AxisOptionTxt2Img(
|
93 |
-
"Hires upscaler",
|
94 |
-
str,
|
95 |
-
apply_field("hr_upscaler"),
|
96 |
-
choices=lambda: [
|
97 |
-
*shared.latent_upscale_modes,
|
98 |
-
*[x.name for x in shared.sd_upscalers],
|
99 |
-
],
|
100 |
-
),
|
101 |
AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")),
|
102 |
AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: ["None"] + list(sd_vae.vae_dict)),
|
103 |
AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)),
|
@@ -105,32 +64,9 @@ builtin_options = [
|
|
105 |
AxisOption("Face restore", str, apply_face_restore, format_value=format_value),
|
106 |
AxisOption("Token merging ratio", float, apply_override("token_merging_ratio")),
|
107 |
AxisOption("Token merging ratio high-res", float, apply_override("token_merging_ratio_hr")),
|
108 |
-
AxisOption(
|
109 |
-
|
110 |
-
|
111 |
-
apply_override("always_discard_next_to_last_sigma", boolean=True),
|
112 |
-
choices=boolean_choice(reverse=True),
|
113 |
-
),
|
114 |
-
AxisOption(
|
115 |
-
"SGM noise multiplier",
|
116 |
-
str,
|
117 |
-
apply_override("sgm_noise_multiplier", boolean=True),
|
118 |
-
choices=boolean_choice(reverse=True),
|
119 |
-
),
|
120 |
-
AxisOption(
|
121 |
-
"Refiner checkpoint",
|
122 |
-
str,
|
123 |
-
apply_field("refiner_checkpoint"),
|
124 |
-
format_value=format_remove_path,
|
125 |
-
confirm=confirm_checkpoints_or_none,
|
126 |
-
cost=1.0,
|
127 |
-
choices=lambda: ["None"] + sorted(sd_models.checkpoints_list, key=str.casefold),
|
128 |
-
),
|
129 |
AxisOption("Refiner switch at", float, apply_field("refiner_switch_at")),
|
130 |
-
AxisOption(
|
131 |
-
"RNG source",
|
132 |
-
str,
|
133 |
-
apply_override("randn_source"),
|
134 |
-
choices=lambda: ["GPU", "CPU", "NV"],
|
135 |
-
),
|
136 |
]
|
|
|
1 |
+
from modules import sd_models, sd_samplers, sd_schedulers, sd_vae, shared
|
2 |
|
3 |
from .axis_application import (
|
4 |
apply_checkpoint,
|
|
|
38 |
AxisOptionImg2Img("Image CFG Scale", float, apply_field("image_cfg_scale")),
|
39 |
AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value),
|
40 |
AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list),
|
41 |
+
AxisOptionTxt2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=sd_samplers.visible_sampler_names),
|
42 |
+
AxisOptionTxt2Img("Hires sampler", str, apply_field("hr_sampler_name"), confirm=confirm_samplers, choices=sd_samplers.visible_sampler_names),
|
43 |
+
AxisOptionImg2Img("Sampler", str, apply_field("sampler_name"), format_value=format_value, confirm=confirm_samplers, choices=sd_samplers.visible_sampler_names),
|
44 |
+
AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_remove_path, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
AxisOption("Negative Guidance minimum sigma", float, apply_field("s_min_uncond")),
|
46 |
AxisOption("Sigma Churn", float, apply_field("s_churn")),
|
47 |
AxisOption("Sigma min", float, apply_field("s_tmin")),
|
48 |
AxisOption("Sigma max", float, apply_field("s_tmax")),
|
49 |
AxisOption("Sigma noise", float, apply_field("s_noise")),
|
50 |
+
AxisOption("Schedule type", str, apply_field("scheduler"), choices=lambda: [x.label for x in sd_schedulers.schedulers]),
|
|
|
|
|
|
|
|
|
|
|
51 |
AxisOption("Schedule min sigma", float, apply_override("sigma_min")),
|
52 |
AxisOption("Schedule max sigma", float, apply_override("sigma_max")),
|
53 |
AxisOption("Schedule rho", float, apply_override("rho")),
|
|
|
56 |
AxisOption("Denoising", float, apply_field("denoising_strength")),
|
57 |
AxisOption("Initial noise multiplier", float, apply_field("initial_noise_multiplier")),
|
58 |
AxisOption("Extra noise", float, apply_override("img2img_extra_noise")),
|
59 |
+
AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")),
|
61 |
AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: ["None"] + list(sd_vae.vae_dict)),
|
62 |
AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)),
|
|
|
64 |
AxisOption("Face restore", str, apply_face_restore, format_value=format_value),
|
65 |
AxisOption("Token merging ratio", float, apply_override("token_merging_ratio")),
|
66 |
AxisOption("Token merging ratio high-res", float, apply_override("token_merging_ratio_hr")),
|
67 |
+
AxisOption("Always discard next-to-last sigma", str, apply_override("always_discard_next_to_last_sigma", boolean=True), choices=boolean_choice(reverse=True)),
|
68 |
+
AxisOption("SGM noise multiplier", str, apply_override("sgm_noise_multiplier", boolean=True), choices=boolean_choice(reverse=True)),
|
69 |
+
AxisOption("Refiner checkpoint", str, apply_field("refiner_checkpoint"), format_value=format_remove_path, confirm=confirm_checkpoints_or_none, cost=1.0, choices=lambda: ["None"] + sorted(sd_models.checkpoints_list, key=str.casefold)),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
AxisOption("Refiner switch at", float, apply_field("refiner_switch_at")),
|
71 |
+
AxisOption("RNG source", str, apply_override("randn_source"), choices=lambda: ["GPU", "CPU", "NV"]),
|
|
|
|
|
|
|
|
|
|
|
72 |
]
|
extensions-builtin/xyz/scripts/xyz_grid.py
CHANGED
@@ -540,15 +540,11 @@ class XYZ(scripts.Script):
|
|
540 |
# this could be moved to common code, but unlikely to be ever triggered anywhere else
|
541 |
Image.MAX_IMAGE_PIXELS = None # disable check in Pillow and rely on check below to allow large custom image sizes
|
542 |
grid_mp = round(len(xs) * len(ys) * len(zs) * p.width * p.height / 1000000)
|
543 |
-
assert (
|
544 |
-
grid_mp < opts.img_max_size_mp
|
545 |
-
), f"Error: Resulting grid would be too large ({grid_mp} MPixels) (max configured size is {opts.img_max_size_mp} MPixels)"
|
546 |
|
547 |
def fix_axis_seeds(axis_opt, axis_list):
|
548 |
if axis_opt.label in ["Seed", "Var. seed"]:
|
549 |
-
return [
|
550 |
-
(int(random.randrange(4294967294)) if val is None or val == "" or val == -1 else val) for val in axis_list
|
551 |
-
]
|
552 |
else:
|
553 |
return axis_list
|
554 |
|
|
|
540 |
# this could be moved to common code, but unlikely to be ever triggered anywhere else
|
541 |
Image.MAX_IMAGE_PIXELS = None # disable check in Pillow and rely on check below to allow large custom image sizes
|
542 |
grid_mp = round(len(xs) * len(ys) * len(zs) * p.width * p.height / 1000000)
|
543 |
+
assert grid_mp < opts.img_max_size_mp, f"Error: Resulting grid would be too large ({grid_mp} MPixels) (max configured size is {opts.img_max_size_mp} MPixels)"
|
|
|
|
|
544 |
|
545 |
def fix_axis_seeds(axis_opt, axis_list):
|
546 |
if axis_opt.label in ["Seed", "Var. seed"]:
|
547 |
+
return [(int(random.randrange(4294967294)) if val is None or val == "" or val == -1 else val) for val in axis_list]
|
|
|
|
|
548 |
else:
|
549 |
return axis_list
|
550 |
|
html/footer.html
CHANGED
@@ -1,11 +1,7 @@
|
|
1 |
<div>
|
2 |
<a href="{api_docs}">API</a>
|
3 |
•
|
4 |
-
<a href="
|
5 |
-
•
|
6 |
-
<a href="https://gradio.app">Gradio</a>
|
7 |
-
•
|
8 |
-
<a href="#" onclick="showProfile('./internal/profile-startup'); return false;">Startup profile</a>
|
9 |
•
|
10 |
<a href="/" onclick="javascript:gradioApp().getElementById('settings_restart_gradio').click(); return false">Reload UI</a>
|
11 |
</div>
|
|
|
1 |
<div>
|
2 |
<a href="{api_docs}">API</a>
|
3 |
•
|
4 |
+
<a href="#" onclick="showProfile('./internal/profile-startup'); return false;">Startup Profile</a>
|
|
|
|
|
|
|
|
|
5 |
•
|
6 |
<a href="/" onclick="javascript:gradioApp().getElementById('settings_restart_gradio').click(); return false">Reload UI</a>
|
7 |
</div>
|
javascript/ui.js
CHANGED
@@ -329,7 +329,7 @@ onOptionsChanged(function () {
|
|
329 |
if (elem && elem.textContent != shorthash) {
|
330 |
elem.textContent = shorthash;
|
331 |
elem.title = sd_checkpoint_hash;
|
332 |
-
elem.href = "https://
|
333 |
}
|
334 |
});
|
335 |
|
|
|
329 |
if (elem && elem.textContent != shorthash) {
|
330 |
elem.textContent = shorthash;
|
331 |
elem.title = sd_checkpoint_hash;
|
332 |
+
elem.href = "https://civitai.com/search/models?query=" + sd_checkpoint_hash;
|
333 |
}
|
334 |
});
|
335 |
|
ldm_patched/k_diffusion/sampling.py
CHANGED
@@ -6,7 +6,7 @@ import torch
|
|
6 |
import torchsde
|
7 |
from scipy import integrate
|
8 |
from torch import nn
|
9 |
-
from tqdm.auto import
|
10 |
|
11 |
from . import utils
|
12 |
|
@@ -26,12 +26,7 @@ def get_sigmas_karras(n, sigma_min, sigma_max, rho=7.0, device="cpu"):
|
|
26 |
|
27 |
def get_sigmas_exponential(n, sigma_min, sigma_max, device="cpu"):
|
28 |
"""Constructs an exponential noise schedule"""
|
29 |
-
sigmas = torch.linspace(
|
30 |
-
math.log(sigma_max),
|
31 |
-
math.log(sigma_min),
|
32 |
-
n,
|
33 |
-
device=device,
|
34 |
-
).exp()
|
35 |
return append_zero(sigmas)
|
36 |
|
37 |
|
@@ -42,32 +37,24 @@ def get_sigmas_polyexponential(n, sigma_min, sigma_max, rho=1.0, device="cpu"):
|
|
42 |
return append_zero(sigmas)
|
43 |
|
44 |
|
45 |
-
def get_sigmas_vp(n, beta_d=19.9, beta_min=0.1, eps_s=1e-3, device="cpu"):
|
46 |
-
"""Constructs a continuous VP noise schedule"""
|
47 |
-
t = torch.linspace(1, eps_s, n, device=device)
|
48 |
-
sigmas = torch.sqrt((beta_d * t**2 / 2 + beta_min * t).expm1())
|
49 |
-
return append_zero(sigmas)
|
50 |
-
|
51 |
-
|
52 |
def to_d(x, sigma, denoised):
|
53 |
"""Converts a denoiser output to a Karras ODE derivative"""
|
54 |
return (x - denoised) / utils.append_dims(sigma, x.ndim)
|
55 |
|
56 |
|
57 |
def get_ancestral_step(sigma_from, sigma_to, eta=1.0):
|
58 |
-
"""
|
59 |
-
|
|
|
|
|
60 |
if not eta:
|
61 |
return sigma_to, 0.0
|
62 |
-
sigma_up = min(
|
63 |
-
sigma_to,
|
64 |
-
eta * (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5,
|
65 |
-
)
|
66 |
sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
|
67 |
return sigma_down, sigma_up
|
68 |
|
69 |
|
70 |
-
def default_noise_sampler(x):
|
71 |
return lambda sigma, sigma_next: torch.randn_like(x)
|
72 |
|
73 |
|
@@ -140,18 +127,7 @@ class BrownianTreeNoiseSampler:
|
|
140 |
|
141 |
|
142 |
@torch.no_grad()
|
143 |
-
def sample_euler(
|
144 |
-
model,
|
145 |
-
x,
|
146 |
-
sigmas,
|
147 |
-
extra_args=None,
|
148 |
-
callback=None,
|
149 |
-
disable=None,
|
150 |
-
s_churn=0.0,
|
151 |
-
s_tmin=0.0,
|
152 |
-
s_tmax=float("inf"),
|
153 |
-
s_noise=1.0,
|
154 |
-
):
|
155 |
"""Implements Algorithm 2 (Euler steps) from Karras et al. (2022)"""
|
156 |
extra_args = {} if extra_args is None else extra_args
|
157 |
s_in = x.new_ones([x.shape[0]])
|
@@ -164,33 +140,14 @@ def sample_euler(
|
|
164 |
denoised = model(x, sigma_hat * s_in, **extra_args)
|
165 |
d = to_d(x, sigma_hat, denoised)
|
166 |
if callback is not None:
|
167 |
-
callback(
|
168 |
-
{
|
169 |
-
"x": x,
|
170 |
-
"i": i,
|
171 |
-
"sigma": sigmas[i],
|
172 |
-
"sigma_hat": sigma_hat,
|
173 |
-
"denoised": denoised,
|
174 |
-
}
|
175 |
-
)
|
176 |
dt = sigmas[i + 1] - sigma_hat
|
177 |
-
# Euler method
|
178 |
x = x + d * dt
|
179 |
return x
|
180 |
|
181 |
|
182 |
@torch.no_grad()
|
183 |
-
def sample_euler_ancestral(
|
184 |
-
model,
|
185 |
-
x,
|
186 |
-
sigmas,
|
187 |
-
extra_args=None,
|
188 |
-
callback=None,
|
189 |
-
disable=None,
|
190 |
-
eta=1.0,
|
191 |
-
s_noise=1.0,
|
192 |
-
noise_sampler=None,
|
193 |
-
):
|
194 |
"""Ancestral sampling with Euler method steps"""
|
195 |
extra_args = {} if extra_args is None else extra_args
|
196 |
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
@@ -199,17 +156,8 @@ def sample_euler_ancestral(
|
|
199 |
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
200 |
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
|
201 |
if callback is not None:
|
202 |
-
callback(
|
203 |
-
{
|
204 |
-
"x": x,
|
205 |
-
"i": i,
|
206 |
-
"sigma": sigmas[i],
|
207 |
-
"sigma_hat": sigmas[i],
|
208 |
-
"denoised": denoised,
|
209 |
-
}
|
210 |
-
)
|
211 |
d = to_d(x, sigmas[i], denoised)
|
212 |
-
# Euler method
|
213 |
dt = sigma_down - sigmas[i]
|
214 |
x = x + d * dt
|
215 |
if sigmas[i + 1] > 0:
|
@@ -218,18 +166,7 @@ def sample_euler_ancestral(
|
|
218 |
|
219 |
|
220 |
@torch.no_grad()
|
221 |
-
def sample_heun(
|
222 |
-
model,
|
223 |
-
x,
|
224 |
-
sigmas,
|
225 |
-
extra_args=None,
|
226 |
-
callback=None,
|
227 |
-
disable=None,
|
228 |
-
s_churn=0.0,
|
229 |
-
s_tmin=0.0,
|
230 |
-
s_tmax=float("inf"),
|
231 |
-
s_noise=1.0,
|
232 |
-
):
|
233 |
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)"""
|
234 |
extra_args = {} if extra_args is None else extra_args
|
235 |
s_in = x.new_ones([x.shape[0]])
|
@@ -242,21 +179,11 @@ def sample_heun(
|
|
242 |
denoised = model(x, sigma_hat * s_in, **extra_args)
|
243 |
d = to_d(x, sigma_hat, denoised)
|
244 |
if callback is not None:
|
245 |
-
callback(
|
246 |
-
{
|
247 |
-
"x": x,
|
248 |
-
"i": i,
|
249 |
-
"sigma": sigmas[i],
|
250 |
-
"sigma_hat": sigma_hat,
|
251 |
-
"denoised": denoised,
|
252 |
-
}
|
253 |
-
)
|
254 |
dt = sigmas[i + 1] - sigma_hat
|
255 |
if sigmas[i + 1] == 0:
|
256 |
-
# Euler method
|
257 |
x = x + d * dt
|
258 |
else:
|
259 |
-
# Heun's method
|
260 |
x_2 = x + d * dt
|
261 |
denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
|
262 |
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
|
@@ -266,18 +193,7 @@ def sample_heun(
|
|
266 |
|
267 |
|
268 |
@torch.no_grad()
|
269 |
-
def sample_dpm_2(
|
270 |
-
model,
|
271 |
-
x,
|
272 |
-
sigmas,
|
273 |
-
extra_args=None,
|
274 |
-
callback=None,
|
275 |
-
disable=None,
|
276 |
-
s_churn=0.0,
|
277 |
-
s_tmin=0.0,
|
278 |
-
s_tmax=float("inf"),
|
279 |
-
s_noise=1.0,
|
280 |
-
):
|
281 |
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)"""
|
282 |
extra_args = {} if extra_args is None else extra_args
|
283 |
s_in = x.new_ones([x.shape[0]])
|
@@ -290,21 +206,11 @@ def sample_dpm_2(
|
|
290 |
denoised = model(x, sigma_hat * s_in, **extra_args)
|
291 |
d = to_d(x, sigma_hat, denoised)
|
292 |
if callback is not None:
|
293 |
-
callback(
|
294 |
-
{
|
295 |
-
"x": x,
|
296 |
-
"i": i,
|
297 |
-
"sigma": sigmas[i],
|
298 |
-
"sigma_hat": sigma_hat,
|
299 |
-
"denoised": denoised,
|
300 |
-
}
|
301 |
-
)
|
302 |
if sigmas[i + 1] == 0:
|
303 |
-
# Euler method
|
304 |
dt = sigmas[i + 1] - sigma_hat
|
305 |
x = x + d * dt
|
306 |
else:
|
307 |
-
# DPM-Solver-2
|
308 |
sigma_mid = sigma_hat.log().lerp(sigmas[i + 1].log(), 0.5).exp()
|
309 |
dt_1 = sigma_mid - sigma_hat
|
310 |
dt_2 = sigmas[i + 1] - sigma_hat
|
@@ -315,54 +221,7 @@ def sample_dpm_2(
|
|
315 |
return x
|
316 |
|
317 |
|
318 |
-
|
319 |
-
def sample_dpm_2_ancestral(
|
320 |
-
model,
|
321 |
-
x,
|
322 |
-
sigmas,
|
323 |
-
extra_args=None,
|
324 |
-
callback=None,
|
325 |
-
disable=None,
|
326 |
-
eta=1.0,
|
327 |
-
s_noise=1.0,
|
328 |
-
noise_sampler=None,
|
329 |
-
):
|
330 |
-
"""Ancestral sampling with DPM-Solver second-order steps"""
|
331 |
-
extra_args = {} if extra_args is None else extra_args
|
332 |
-
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
333 |
-
s_in = x.new_ones([x.shape[0]])
|
334 |
-
for i in trange(len(sigmas) - 1, disable=disable):
|
335 |
-
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
336 |
-
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
|
337 |
-
if callback is not None:
|
338 |
-
callback(
|
339 |
-
{
|
340 |
-
"x": x,
|
341 |
-
"i": i,
|
342 |
-
"sigma": sigmas[i],
|
343 |
-
"sigma_hat": sigmas[i],
|
344 |
-
"denoised": denoised,
|
345 |
-
}
|
346 |
-
)
|
347 |
-
d = to_d(x, sigmas[i], denoised)
|
348 |
-
if sigma_down == 0:
|
349 |
-
# Euler method
|
350 |
-
dt = sigma_down - sigmas[i]
|
351 |
-
x = x + d * dt
|
352 |
-
else:
|
353 |
-
# DPM-Solver-2
|
354 |
-
sigma_mid = sigmas[i].log().lerp(sigma_down.log(), 0.5).exp()
|
355 |
-
dt_1 = sigma_mid - sigmas[i]
|
356 |
-
dt_2 = sigma_down - sigmas[i]
|
357 |
-
x_2 = x + d * dt_1
|
358 |
-
denoised_2 = model(x_2, sigma_mid * s_in, **extra_args)
|
359 |
-
d_2 = to_d(x_2, sigma_mid, denoised_2)
|
360 |
-
x = x + d_2 * dt_2
|
361 |
-
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
|
362 |
-
return x
|
363 |
-
|
364 |
-
|
365 |
-
def linear_multistep_coeff(order, t, i, j):
|
366 |
if order - 1 > i:
|
367 |
raise ValueError(f"Order {order} too high for step {i}")
|
368 |
|
@@ -390,17 +249,9 @@ def sample_lms(model, x, sigmas, extra_args=None, callback=None, disable=None, o
|
|
390 |
if len(ds) > order:
|
391 |
ds.pop(0)
|
392 |
if callback is not None:
|
393 |
-
callback(
|
394 |
-
{
|
395 |
-
"x": x,
|
396 |
-
"i": i,
|
397 |
-
"sigma": sigmas[i],
|
398 |
-
"sigma_hat": sigmas[i],
|
399 |
-
"denoised": denoised,
|
400 |
-
}
|
401 |
-
)
|
402 |
cur_order = min(i + 1, order)
|
403 |
-
coeffs = [
|
404 |
x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
|
405 |
return x
|
406 |
|
@@ -490,285 +341,9 @@ class DPMSolver(nn.Module):
|
|
490 |
x_3 = x - self.sigma(t_next) * h.expm1() * eps - self.sigma(t_next) / r2 * (h.expm1() / h - 1) * (eps_r2 - eps)
|
491 |
return x_3, eps_cache
|
492 |
|
493 |
-
def dpm_solver_fast(self, x, t_start, t_end, nfe, eta=0.0, s_noise=1.0, noise_sampler=None):
|
494 |
-
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
495 |
-
if not t_end > t_start and eta:
|
496 |
-
raise ValueError("eta must be 0 for reverse sampling")
|
497 |
-
|
498 |
-
m = math.floor(nfe / 3) + 1
|
499 |
-
ts = torch.linspace(t_start, t_end, m + 1, device=x.device)
|
500 |
-
|
501 |
-
if nfe % 3 == 0:
|
502 |
-
orders = [3] * (m - 2) + [2, 1]
|
503 |
-
else:
|
504 |
-
orders = [3] * (m - 1) + [nfe % 3]
|
505 |
-
|
506 |
-
for i in range(len(orders)):
|
507 |
-
eps_cache = {}
|
508 |
-
t, t_next = ts[i], ts[i + 1]
|
509 |
-
if eta:
|
510 |
-
sd, su = get_ancestral_step(self.sigma(t), self.sigma(t_next), eta)
|
511 |
-
t_next_ = torch.minimum(t_end, self.t(sd))
|
512 |
-
su = (self.sigma(t_next) ** 2 - self.sigma(t_next_) ** 2) ** 0.5
|
513 |
-
else:
|
514 |
-
t_next_, su = t_next, 0.0
|
515 |
-
|
516 |
-
eps, eps_cache = self.eps(eps_cache, "eps", x, t)
|
517 |
-
denoised = x - self.sigma(t) * eps
|
518 |
-
if self.info_callback is not None:
|
519 |
-
self.info_callback({"x": x, "i": i, "t": ts[i], "t_up": t, "denoised": denoised})
|
520 |
-
|
521 |
-
if orders[i] == 1:
|
522 |
-
x, eps_cache = self.dpm_solver_1_step(x, t, t_next_, eps_cache=eps_cache)
|
523 |
-
elif orders[i] == 2:
|
524 |
-
x, eps_cache = self.dpm_solver_2_step(x, t, t_next_, eps_cache=eps_cache)
|
525 |
-
else:
|
526 |
-
x, eps_cache = self.dpm_solver_3_step(x, t, t_next_, eps_cache=eps_cache)
|
527 |
-
|
528 |
-
x = x + su * s_noise * noise_sampler(self.sigma(t), self.sigma(t_next))
|
529 |
-
|
530 |
-
return x
|
531 |
-
|
532 |
-
def dpm_solver_adaptive(
|
533 |
-
self,
|
534 |
-
x,
|
535 |
-
t_start,
|
536 |
-
t_end,
|
537 |
-
order=3,
|
538 |
-
rtol=0.05,
|
539 |
-
atol=0.0078,
|
540 |
-
h_init=0.05,
|
541 |
-
pcoeff=0.0,
|
542 |
-
icoeff=1.0,
|
543 |
-
dcoeff=0.0,
|
544 |
-
accept_safety=0.81,
|
545 |
-
eta=0.0,
|
546 |
-
s_noise=1.0,
|
547 |
-
noise_sampler=None,
|
548 |
-
):
|
549 |
-
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
550 |
-
if order not in {2, 3}:
|
551 |
-
raise ValueError("order should be 2 or 3")
|
552 |
-
forward = t_end > t_start
|
553 |
-
if not forward and eta:
|
554 |
-
raise ValueError("eta must be 0 for reverse sampling")
|
555 |
-
h_init = abs(h_init) * (1 if forward else -1)
|
556 |
-
atol = torch.tensor(atol)
|
557 |
-
rtol = torch.tensor(rtol)
|
558 |
-
s = t_start
|
559 |
-
x_prev = x
|
560 |
-
accept = True
|
561 |
-
pid = PIDStepSizeController(h_init, pcoeff, icoeff, dcoeff, 1.5 if eta else order, accept_safety)
|
562 |
-
info = {"steps": 0, "nfe": 0, "n_accept": 0, "n_reject": 0}
|
563 |
-
|
564 |
-
while s < t_end - 1e-5 if forward else s > t_end + 1e-5:
|
565 |
-
eps_cache = {}
|
566 |
-
t = torch.minimum(t_end, s + pid.h) if forward else torch.maximum(t_end, s + pid.h)
|
567 |
-
if eta:
|
568 |
-
sd, su = get_ancestral_step(self.sigma(s), self.sigma(t), eta)
|
569 |
-
t_ = torch.minimum(t_end, self.t(sd))
|
570 |
-
su = (self.sigma(t) ** 2 - self.sigma(t_) ** 2) ** 0.5
|
571 |
-
else:
|
572 |
-
t_, su = t, 0.0
|
573 |
-
|
574 |
-
eps, eps_cache = self.eps(eps_cache, "eps", x, s)
|
575 |
-
denoised = x - self.sigma(s) * eps
|
576 |
-
|
577 |
-
if order == 2:
|
578 |
-
x_low, eps_cache = self.dpm_solver_1_step(x, s, t_, eps_cache=eps_cache)
|
579 |
-
x_high, eps_cache = self.dpm_solver_2_step(x, s, t_, eps_cache=eps_cache)
|
580 |
-
else:
|
581 |
-
x_low, eps_cache = self.dpm_solver_2_step(x, s, t_, r1=1 / 3, eps_cache=eps_cache)
|
582 |
-
x_high, eps_cache = self.dpm_solver_3_step(x, s, t_, eps_cache=eps_cache)
|
583 |
-
delta = torch.maximum(atol, rtol * torch.maximum(x_low.abs(), x_prev.abs()))
|
584 |
-
error = torch.linalg.norm((x_low - x_high) / delta) / x.numel() ** 0.5
|
585 |
-
accept = pid.propose_step(error)
|
586 |
-
if accept:
|
587 |
-
x_prev = x_low
|
588 |
-
x = x_high + su * s_noise * noise_sampler(self.sigma(s), self.sigma(t))
|
589 |
-
s = t
|
590 |
-
info["n_accept"] += 1
|
591 |
-
else:
|
592 |
-
info["n_reject"] += 1
|
593 |
-
info["nfe"] += order
|
594 |
-
info["steps"] += 1
|
595 |
-
|
596 |
-
if self.info_callback is not None:
|
597 |
-
self.info_callback(
|
598 |
-
{
|
599 |
-
"x": x,
|
600 |
-
"i": info["steps"] - 1,
|
601 |
-
"t": s,
|
602 |
-
"t_up": s,
|
603 |
-
"denoised": denoised,
|
604 |
-
"error": error,
|
605 |
-
"h": pid.h,
|
606 |
-
**info,
|
607 |
-
}
|
608 |
-
)
|
609 |
-
|
610 |
-
return x, info
|
611 |
-
|
612 |
-
|
613 |
-
@torch.no_grad()
|
614 |
-
def sample_dpm_fast(
|
615 |
-
model,
|
616 |
-
x,
|
617 |
-
sigma_min,
|
618 |
-
sigma_max,
|
619 |
-
n,
|
620 |
-
extra_args=None,
|
621 |
-
callback=None,
|
622 |
-
disable=None,
|
623 |
-
eta=0.0,
|
624 |
-
s_noise=1.0,
|
625 |
-
noise_sampler=None,
|
626 |
-
):
|
627 |
-
"""DPM-Solver-Fast (fixed step size). See https://arxiv.org/abs/2206.00927"""
|
628 |
-
if sigma_min <= 0 or sigma_max <= 0:
|
629 |
-
raise ValueError("sigma_min and sigma_max must not be 0")
|
630 |
-
with tqdm(total=n, disable=disable) as pbar:
|
631 |
-
dpm_solver = DPMSolver(model, extra_args, eps_callback=pbar.update)
|
632 |
-
if callback is not None:
|
633 |
-
dpm_solver.info_callback = lambda info: callback(
|
634 |
-
{
|
635 |
-
"sigma": dpm_solver.sigma(info["t"]),
|
636 |
-
"sigma_hat": dpm_solver.sigma(info["t_up"]),
|
637 |
-
**info,
|
638 |
-
}
|
639 |
-
)
|
640 |
-
return dpm_solver.dpm_solver_fast(
|
641 |
-
x,
|
642 |
-
dpm_solver.t(torch.tensor(sigma_max)),
|
643 |
-
dpm_solver.t(torch.tensor(sigma_min)),
|
644 |
-
n,
|
645 |
-
eta,
|
646 |
-
s_noise,
|
647 |
-
noise_sampler,
|
648 |
-
)
|
649 |
-
|
650 |
-
|
651 |
-
@torch.no_grad()
|
652 |
-
def sample_dpm_adaptive(
|
653 |
-
model,
|
654 |
-
x,
|
655 |
-
sigma_min,
|
656 |
-
sigma_max,
|
657 |
-
extra_args=None,
|
658 |
-
callback=None,
|
659 |
-
disable=None,
|
660 |
-
order=3,
|
661 |
-
rtol=0.05,
|
662 |
-
atol=0.0078,
|
663 |
-
h_init=0.05,
|
664 |
-
pcoeff=0.0,
|
665 |
-
icoeff=1.0,
|
666 |
-
dcoeff=0.0,
|
667 |
-
accept_safety=0.81,
|
668 |
-
eta=0.0,
|
669 |
-
s_noise=1.0,
|
670 |
-
noise_sampler=None,
|
671 |
-
return_info=False,
|
672 |
-
):
|
673 |
-
"""DPM-Solver-12 and 23 (adaptive step size). See https://arxiv.org/abs/2206.00927"""
|
674 |
-
if sigma_min <= 0 or sigma_max <= 0:
|
675 |
-
raise ValueError("sigma_min and sigma_max must not be 0")
|
676 |
-
with tqdm(disable=disable) as pbar:
|
677 |
-
dpm_solver = DPMSolver(model, extra_args, eps_callback=pbar.update)
|
678 |
-
if callback is not None:
|
679 |
-
dpm_solver.info_callback = lambda info: callback(
|
680 |
-
{
|
681 |
-
"sigma": dpm_solver.sigma(info["t"]),
|
682 |
-
"sigma_hat": dpm_solver.sigma(info["t_up"]),
|
683 |
-
**info,
|
684 |
-
}
|
685 |
-
)
|
686 |
-
x, info = dpm_solver.dpm_solver_adaptive(
|
687 |
-
x,
|
688 |
-
dpm_solver.t(torch.tensor(sigma_max)),
|
689 |
-
dpm_solver.t(torch.tensor(sigma_min)),
|
690 |
-
order,
|
691 |
-
rtol,
|
692 |
-
atol,
|
693 |
-
h_init,
|
694 |
-
pcoeff,
|
695 |
-
icoeff,
|
696 |
-
dcoeff,
|
697 |
-
accept_safety,
|
698 |
-
eta,
|
699 |
-
s_noise,
|
700 |
-
noise_sampler,
|
701 |
-
)
|
702 |
-
if return_info:
|
703 |
-
return x, info
|
704 |
-
return x
|
705 |
-
|
706 |
-
|
707 |
-
@torch.no_grad()
|
708 |
-
def sample_dpmpp_2s_ancestral(
|
709 |
-
model,
|
710 |
-
x,
|
711 |
-
sigmas,
|
712 |
-
extra_args=None,
|
713 |
-
callback=None,
|
714 |
-
disable=None,
|
715 |
-
eta=1.0,
|
716 |
-
s_noise=1.0,
|
717 |
-
noise_sampler=None,
|
718 |
-
):
|
719 |
-
"""Ancestral sampling with DPM-Solver++(2S) second-order steps"""
|
720 |
-
extra_args = {} if extra_args is None else extra_args
|
721 |
-
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
722 |
-
s_in = x.new_ones([x.shape[0]])
|
723 |
-
sigma_fn = lambda t: t.neg().exp()
|
724 |
-
t_fn = lambda sigma: sigma.log().neg()
|
725 |
-
|
726 |
-
for i in trange(len(sigmas) - 1, disable=disable):
|
727 |
-
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
728 |
-
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
|
729 |
-
if callback is not None:
|
730 |
-
callback(
|
731 |
-
{
|
732 |
-
"x": x,
|
733 |
-
"i": i,
|
734 |
-
"sigma": sigmas[i],
|
735 |
-
"sigma_hat": sigmas[i],
|
736 |
-
"denoised": denoised,
|
737 |
-
}
|
738 |
-
)
|
739 |
-
if sigma_down == 0:
|
740 |
-
# Euler method
|
741 |
-
d = to_d(x, sigmas[i], denoised)
|
742 |
-
dt = sigma_down - sigmas[i]
|
743 |
-
x = x + d * dt
|
744 |
-
else:
|
745 |
-
# DPM-Solver++(2S)
|
746 |
-
t, t_next = t_fn(sigmas[i]), t_fn(sigma_down)
|
747 |
-
r = 1 / 2
|
748 |
-
h = t_next - t
|
749 |
-
s = t + r * h
|
750 |
-
x_2 = (sigma_fn(s) / sigma_fn(t)) * x - (-h * r).expm1() * denoised
|
751 |
-
denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)
|
752 |
-
x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_2
|
753 |
-
# Noise addition
|
754 |
-
if sigmas[i + 1] > 0:
|
755 |
-
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
|
756 |
-
return x
|
757 |
-
|
758 |
|
759 |
@torch.no_grad()
|
760 |
-
def sample_dpmpp_sde(
|
761 |
-
model,
|
762 |
-
x,
|
763 |
-
sigmas,
|
764 |
-
extra_args=None,
|
765 |
-
callback=None,
|
766 |
-
disable=None,
|
767 |
-
eta=1.0,
|
768 |
-
s_noise=1.0,
|
769 |
-
noise_sampler=None,
|
770 |
-
r=1 / 2,
|
771 |
-
):
|
772 |
"""DPM-Solver++ (stochastic)"""
|
773 |
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
774 |
seed = extra_args.get("seed", None)
|
@@ -781,35 +356,23 @@ def sample_dpmpp_sde(
|
|
781 |
for i in trange(len(sigmas) - 1, disable=disable):
|
782 |
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
783 |
if callback is not None:
|
784 |
-
callback(
|
785 |
-
{
|
786 |
-
"x": x,
|
787 |
-
"i": i,
|
788 |
-
"sigma": sigmas[i],
|
789 |
-
"sigma_hat": sigmas[i],
|
790 |
-
"denoised": denoised,
|
791 |
-
}
|
792 |
-
)
|
793 |
if sigmas[i + 1] == 0:
|
794 |
-
# Euler method
|
795 |
d = to_d(x, sigmas[i], denoised)
|
796 |
dt = sigmas[i + 1] - sigmas[i]
|
797 |
x = x + d * dt
|
798 |
else:
|
799 |
-
# DPM-Solver++
|
800 |
t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
|
801 |
h = t_next - t
|
802 |
s = t + h * r
|
803 |
fac = 1 / (2 * r)
|
804 |
|
805 |
-
# Step 1
|
806 |
sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(s), eta)
|
807 |
s_ = t_fn(sd)
|
808 |
x_2 = (sigma_fn(s_) / sigma_fn(t)) * x - (t - s_).expm1() * denoised
|
809 |
x_2 = x_2 + noise_sampler(sigma_fn(t), sigma_fn(s)) * s_noise * su
|
810 |
denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)
|
811 |
|
812 |
-
# Step 2
|
813 |
sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(t_next), eta)
|
814 |
t_next_ = t_fn(sd)
|
815 |
denoised_d = (1 - fac) * denoised + fac * denoised_2
|
@@ -830,15 +393,7 @@ def sample_dpmpp_2m(model, x, sigmas, extra_args=None, callback=None, disable=No
|
|
830 |
for i in trange(len(sigmas) - 1, disable=disable):
|
831 |
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
832 |
if callback is not None:
|
833 |
-
callback(
|
834 |
-
{
|
835 |
-
"x": x,
|
836 |
-
"i": i,
|
837 |
-
"sigma": sigmas[i],
|
838 |
-
"sigma_hat": sigmas[i],
|
839 |
-
"denoised": denoised,
|
840 |
-
}
|
841 |
-
)
|
842 |
t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
|
843 |
h = t_next - t
|
844 |
if old_denoised is None or sigmas[i + 1] == 0:
|
@@ -853,83 +408,7 @@ def sample_dpmpp_2m(model, x, sigmas, extra_args=None, callback=None, disable=No
|
|
853 |
|
854 |
|
855 |
@torch.no_grad()
|
856 |
-
def
|
857 |
-
model,
|
858 |
-
x,
|
859 |
-
sigmas,
|
860 |
-
extra_args=None,
|
861 |
-
callback=None,
|
862 |
-
disable=None,
|
863 |
-
eta=1.0,
|
864 |
-
s_noise=1.0,
|
865 |
-
noise_sampler=None,
|
866 |
-
solver_type="midpoint",
|
867 |
-
):
|
868 |
-
"""DPM-Solver++(2M) SDE"""
|
869 |
-
|
870 |
-
if solver_type not in {"heun", "midpoint"}:
|
871 |
-
raise ValueError("solver_type must be 'heun' or 'midpoint'")
|
872 |
-
|
873 |
-
seed = extra_args.get("seed", None)
|
874 |
-
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
875 |
-
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
|
876 |
-
extra_args = {} if extra_args is None else extra_args
|
877 |
-
s_in = x.new_ones([x.shape[0]])
|
878 |
-
|
879 |
-
old_denoised = None
|
880 |
-
h_last = None
|
881 |
-
h = None
|
882 |
-
|
883 |
-
for i in trange(len(sigmas) - 1, disable=disable):
|
884 |
-
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
885 |
-
if callback is not None:
|
886 |
-
callback(
|
887 |
-
{
|
888 |
-
"x": x,
|
889 |
-
"i": i,
|
890 |
-
"sigma": sigmas[i],
|
891 |
-
"sigma_hat": sigmas[i],
|
892 |
-
"denoised": denoised,
|
893 |
-
}
|
894 |
-
)
|
895 |
-
if sigmas[i + 1] == 0:
|
896 |
-
# Denoising step
|
897 |
-
x = denoised
|
898 |
-
else:
|
899 |
-
# DPM-Solver++(2M) SDE
|
900 |
-
t, s = -sigmas[i].log(), -sigmas[i + 1].log()
|
901 |
-
h = s - t
|
902 |
-
eta_h = eta * h
|
903 |
-
|
904 |
-
x = sigmas[i + 1] / sigmas[i] * (-eta_h).exp() * x + (-h - eta_h).expm1().neg() * denoised
|
905 |
-
|
906 |
-
if old_denoised is not None:
|
907 |
-
r = h_last / h
|
908 |
-
if solver_type == "heun":
|
909 |
-
x = x + ((-h - eta_h).expm1().neg() / (-h - eta_h) + 1) * (1 / r) * (denoised - old_denoised)
|
910 |
-
elif solver_type == "midpoint":
|
911 |
-
x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised)
|
912 |
-
|
913 |
-
if eta:
|
914 |
-
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise
|
915 |
-
|
916 |
-
old_denoised = denoised
|
917 |
-
h_last = h
|
918 |
-
return x
|
919 |
-
|
920 |
-
|
921 |
-
@torch.no_grad()
|
922 |
-
def sample_dpmpp_3m_sde(
|
923 |
-
model,
|
924 |
-
x,
|
925 |
-
sigmas,
|
926 |
-
extra_args=None,
|
927 |
-
callback=None,
|
928 |
-
disable=None,
|
929 |
-
eta=1.0,
|
930 |
-
s_noise=1.0,
|
931 |
-
noise_sampler=None,
|
932 |
-
):
|
933 |
"""DPM-Solver++(3M) SDE"""
|
934 |
|
935 |
seed = extra_args.get("seed", None)
|
@@ -944,17 +423,8 @@ def sample_dpmpp_3m_sde(
|
|
944 |
for i in trange(len(sigmas) - 1, disable=disable):
|
945 |
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
946 |
if callback is not None:
|
947 |
-
callback(
|
948 |
-
{
|
949 |
-
"x": x,
|
950 |
-
"i": i,
|
951 |
-
"sigma": sigmas[i],
|
952 |
-
"sigma_hat": sigmas[i],
|
953 |
-
"denoised": denoised,
|
954 |
-
}
|
955 |
-
)
|
956 |
if sigmas[i + 1] == 0:
|
957 |
-
# Denoising step
|
958 |
x = denoised
|
959 |
else:
|
960 |
t, s = -sigmas[i].log(), -sigmas[i + 1].log()
|
@@ -988,17 +458,7 @@ def sample_dpmpp_3m_sde(
|
|
988 |
|
989 |
|
990 |
@torch.no_grad()
|
991 |
-
def sample_dpmpp_3m_sde_gpu(
|
992 |
-
model,
|
993 |
-
x,
|
994 |
-
sigmas,
|
995 |
-
extra_args=None,
|
996 |
-
callback=None,
|
997 |
-
disable=None,
|
998 |
-
eta=1.0,
|
999 |
-
s_noise=1.0,
|
1000 |
-
noise_sampler=None,
|
1001 |
-
):
|
1002 |
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
1003 |
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
1004 |
return sample_dpmpp_3m_sde(
|
@@ -1015,47 +475,7 @@ def sample_dpmpp_3m_sde_gpu(
|
|
1015 |
|
1016 |
|
1017 |
@torch.no_grad()
|
1018 |
-
def
|
1019 |
-
model,
|
1020 |
-
x,
|
1021 |
-
sigmas,
|
1022 |
-
extra_args=None,
|
1023 |
-
callback=None,
|
1024 |
-
disable=None,
|
1025 |
-
eta=1.0,
|
1026 |
-
s_noise=1.0,
|
1027 |
-
noise_sampler=None,
|
1028 |
-
solver_type="midpoint",
|
1029 |
-
):
|
1030 |
-
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
1031 |
-
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
1032 |
-
return sample_dpmpp_2m_sde(
|
1033 |
-
model,
|
1034 |
-
x,
|
1035 |
-
sigmas,
|
1036 |
-
extra_args=extra_args,
|
1037 |
-
callback=callback,
|
1038 |
-
disable=disable,
|
1039 |
-
eta=eta,
|
1040 |
-
s_noise=s_noise,
|
1041 |
-
noise_sampler=noise_sampler,
|
1042 |
-
solver_type=solver_type,
|
1043 |
-
)
|
1044 |
-
|
1045 |
-
|
1046 |
-
@torch.no_grad()
|
1047 |
-
def sample_dpmpp_sde_gpu(
|
1048 |
-
model,
|
1049 |
-
x,
|
1050 |
-
sigmas,
|
1051 |
-
extra_args=None,
|
1052 |
-
callback=None,
|
1053 |
-
disable=None,
|
1054 |
-
eta=1.0,
|
1055 |
-
s_noise=1.0,
|
1056 |
-
noise_sampler=None,
|
1057 |
-
r=1 / 2,
|
1058 |
-
):
|
1059 |
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
1060 |
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
1061 |
return sample_dpmpp_sde(
|
@@ -1083,16 +503,7 @@ def DDPMSampler_step(x, sigma, sigma_prev, noise, noise_sampler):
|
|
1083 |
return mu
|
1084 |
|
1085 |
|
1086 |
-
def generic_step_sampler(
|
1087 |
-
model,
|
1088 |
-
x,
|
1089 |
-
sigmas,
|
1090 |
-
extra_args=None,
|
1091 |
-
callback=None,
|
1092 |
-
disable=None,
|
1093 |
-
noise_sampler=None,
|
1094 |
-
step_function=None,
|
1095 |
-
):
|
1096 |
extra_args = {} if extra_args is None else extra_args
|
1097 |
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
1098 |
s_in = x.new_ones([x.shape[0]])
|
@@ -1100,15 +511,7 @@ def generic_step_sampler(
|
|
1100 |
for i in trange(len(sigmas) - 1, disable=disable):
|
1101 |
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
1102 |
if callback is not None:
|
1103 |
-
callback(
|
1104 |
-
{
|
1105 |
-
"x": x,
|
1106 |
-
"i": i,
|
1107 |
-
"sigma": sigmas[i],
|
1108 |
-
"sigma_hat": sigmas[i],
|
1109 |
-
"denoised": denoised,
|
1110 |
-
}
|
1111 |
-
)
|
1112 |
x = step_function(
|
1113 |
x / torch.sqrt(1.0 + sigmas[i] ** 2.0),
|
1114 |
sigmas[i],
|
@@ -1124,101 +527,3 @@ def generic_step_sampler(
|
|
1124 |
@torch.no_grad()
|
1125 |
def sample_ddpm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
|
1126 |
return generic_step_sampler(model, x, sigmas, extra_args, callback, disable, noise_sampler, DDPMSampler_step)
|
1127 |
-
|
1128 |
-
|
1129 |
-
@torch.no_grad()
|
1130 |
-
def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
|
1131 |
-
extra_args = {} if extra_args is None else extra_args
|
1132 |
-
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
1133 |
-
s_in = x.new_ones([x.shape[0]])
|
1134 |
-
for i in trange(len(sigmas) - 1, disable=disable):
|
1135 |
-
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
1136 |
-
if callback is not None:
|
1137 |
-
callback(
|
1138 |
-
{
|
1139 |
-
"x": x,
|
1140 |
-
"i": i,
|
1141 |
-
"sigma": sigmas[i],
|
1142 |
-
"sigma_hat": sigmas[i],
|
1143 |
-
"denoised": denoised,
|
1144 |
-
}
|
1145 |
-
)
|
1146 |
-
|
1147 |
-
x = denoised
|
1148 |
-
if sigmas[i + 1] > 0:
|
1149 |
-
x += sigmas[i + 1] * noise_sampler(sigmas[i], sigmas[i + 1])
|
1150 |
-
return x
|
1151 |
-
|
1152 |
-
|
1153 |
-
@torch.no_grad()
|
1154 |
-
def sample_heunpp2(
|
1155 |
-
model,
|
1156 |
-
x,
|
1157 |
-
sigmas,
|
1158 |
-
extra_args=None,
|
1159 |
-
callback=None,
|
1160 |
-
disable=None,
|
1161 |
-
s_churn=0.0,
|
1162 |
-
s_tmin=0.0,
|
1163 |
-
s_tmax=float("inf"),
|
1164 |
-
s_noise=1.0,
|
1165 |
-
):
|
1166 |
-
# From MIT licensed: https://github.com/Carzit/sd-webui-samplers-scheduler/
|
1167 |
-
extra_args = {} if extra_args is None else extra_args
|
1168 |
-
s_in = x.new_ones([x.shape[0]])
|
1169 |
-
s_end = sigmas[-1]
|
1170 |
-
for i in trange(len(sigmas) - 1, disable=disable):
|
1171 |
-
gamma = min(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
|
1172 |
-
eps = torch.randn_like(x) * s_noise
|
1173 |
-
sigma_hat = sigmas[i] * (gamma + 1)
|
1174 |
-
if gamma > 0:
|
1175 |
-
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
|
1176 |
-
denoised = model(x, sigma_hat * s_in, **extra_args)
|
1177 |
-
d = to_d(x, sigma_hat, denoised)
|
1178 |
-
if callback is not None:
|
1179 |
-
callback(
|
1180 |
-
{
|
1181 |
-
"x": x,
|
1182 |
-
"i": i,
|
1183 |
-
"sigma": sigmas[i],
|
1184 |
-
"sigma_hat": sigma_hat,
|
1185 |
-
"denoised": denoised,
|
1186 |
-
}
|
1187 |
-
)
|
1188 |
-
dt = sigmas[i + 1] - sigma_hat
|
1189 |
-
if sigmas[i + 1] == s_end:
|
1190 |
-
# Euler method
|
1191 |
-
x = x + d * dt
|
1192 |
-
elif sigmas[i + 2] == s_end:
|
1193 |
-
# Heun's method
|
1194 |
-
x_2 = x + d * dt
|
1195 |
-
denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
|
1196 |
-
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
|
1197 |
-
|
1198 |
-
w = 2 * sigmas[0]
|
1199 |
-
w2 = sigmas[i + 1] / w
|
1200 |
-
w1 = 1 - w2
|
1201 |
-
|
1202 |
-
d_prime = d * w1 + d_2 * w2
|
1203 |
-
|
1204 |
-
x = x + d_prime * dt
|
1205 |
-
|
1206 |
-
else:
|
1207 |
-
# Heun++
|
1208 |
-
x_2 = x + d * dt
|
1209 |
-
denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
|
1210 |
-
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
|
1211 |
-
dt_2 = sigmas[i + 2] - sigmas[i + 1]
|
1212 |
-
|
1213 |
-
x_3 = x_2 + d_2 * dt_2
|
1214 |
-
denoised_3 = model(x_3, sigmas[i + 2] * s_in, **extra_args)
|
1215 |
-
d_3 = to_d(x_3, sigmas[i + 2], denoised_3)
|
1216 |
-
|
1217 |
-
w = 3 * sigmas[0]
|
1218 |
-
w2 = sigmas[i + 1] / w
|
1219 |
-
w3 = sigmas[i + 2] / w
|
1220 |
-
w1 = 1 - w2 - w3
|
1221 |
-
|
1222 |
-
d_prime = w1 * d + w2 * d_2 + w3 * d_3
|
1223 |
-
x = x + d_prime * dt
|
1224 |
-
return x
|
|
|
6 |
import torchsde
|
7 |
from scipy import integrate
|
8 |
from torch import nn
|
9 |
+
from tqdm.auto import trange
|
10 |
|
11 |
from . import utils
|
12 |
|
|
|
26 |
|
27 |
def get_sigmas_exponential(n, sigma_min, sigma_max, device="cpu"):
|
28 |
"""Constructs an exponential noise schedule"""
|
29 |
+
sigmas = torch.linspace(math.log(sigma_max), math.log(sigma_min), n, device=device).exp()
|
|
|
|
|
|
|
|
|
|
|
30 |
return append_zero(sigmas)
|
31 |
|
32 |
|
|
|
37 |
return append_zero(sigmas)
|
38 |
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
def to_d(x, sigma, denoised):
|
41 |
"""Converts a denoiser output to a Karras ODE derivative"""
|
42 |
return (x - denoised) / utils.append_dims(sigma, x.ndim)
|
43 |
|
44 |
|
45 |
def get_ancestral_step(sigma_from, sigma_to, eta=1.0):
|
46 |
+
"""
|
47 |
+
Calculates the noise level (sigma_down) to step down to and the
|
48 |
+
amount of noise to add (sigma_up) when doing an ancestral sampling step
|
49 |
+
"""
|
50 |
if not eta:
|
51 |
return sigma_to, 0.0
|
52 |
+
sigma_up = min(eta * (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5, sigma_to)
|
|
|
|
|
|
|
53 |
sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
|
54 |
return sigma_down, sigma_up
|
55 |
|
56 |
|
57 |
+
def default_noise_sampler(x, seed=None):
|
58 |
return lambda sigma, sigma_next: torch.randn_like(x)
|
59 |
|
60 |
|
|
|
127 |
|
128 |
|
129 |
@torch.no_grad()
|
130 |
+
def sample_euler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0.0, s_tmin=0.0, s_tmax=float("inf"), s_noise=1.0):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
"""Implements Algorithm 2 (Euler steps) from Karras et al. (2022)"""
|
132 |
extra_args = {} if extra_args is None else extra_args
|
133 |
s_in = x.new_ones([x.shape[0]])
|
|
|
140 |
denoised = model(x, sigma_hat * s_in, **extra_args)
|
141 |
d = to_d(x, sigma_hat, denoised)
|
142 |
if callback is not None:
|
143 |
+
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
dt = sigmas[i + 1] - sigma_hat
|
|
|
145 |
x = x + d * dt
|
146 |
return x
|
147 |
|
148 |
|
149 |
@torch.no_grad()
|
150 |
+
def sample_euler_ancestral(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
"""Ancestral sampling with Euler method steps"""
|
152 |
extra_args = {} if extra_args is None else extra_args
|
153 |
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
|
|
156 |
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
157 |
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
|
158 |
if callback is not None:
|
159 |
+
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
d = to_d(x, sigmas[i], denoised)
|
|
|
161 |
dt = sigma_down - sigmas[i]
|
162 |
x = x + d * dt
|
163 |
if sigmas[i + 1] > 0:
|
|
|
166 |
|
167 |
|
168 |
@torch.no_grad()
|
169 |
+
def sample_heun(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0.0, s_tmin=0.0, s_tmax=float("inf"), s_noise=1.0):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)"""
|
171 |
extra_args = {} if extra_args is None else extra_args
|
172 |
s_in = x.new_ones([x.shape[0]])
|
|
|
179 |
denoised = model(x, sigma_hat * s_in, **extra_args)
|
180 |
d = to_d(x, sigma_hat, denoised)
|
181 |
if callback is not None:
|
182 |
+
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
183 |
dt = sigmas[i + 1] - sigma_hat
|
184 |
if sigmas[i + 1] == 0:
|
|
|
185 |
x = x + d * dt
|
186 |
else:
|
|
|
187 |
x_2 = x + d * dt
|
188 |
denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args)
|
189 |
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
|
|
|
193 |
|
194 |
|
195 |
@torch.no_grad()
|
196 |
+
def sample_dpm_2(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0.0, s_tmin=0.0, s_tmax=float("inf"), s_noise=1.0):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)"""
|
198 |
extra_args = {} if extra_args is None else extra_args
|
199 |
s_in = x.new_ones([x.shape[0]])
|
|
|
206 |
denoised = model(x, sigma_hat * s_in, **extra_args)
|
207 |
d = to_d(x, sigma_hat, denoised)
|
208 |
if callback is not None:
|
209 |
+
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
210 |
if sigmas[i + 1] == 0:
|
|
|
211 |
dt = sigmas[i + 1] - sigma_hat
|
212 |
x = x + d * dt
|
213 |
else:
|
|
|
214 |
sigma_mid = sigma_hat.log().lerp(sigmas[i + 1].log(), 0.5).exp()
|
215 |
dt_1 = sigma_mid - sigma_hat
|
216 |
dt_2 = sigmas[i + 1] - sigma_hat
|
|
|
221 |
return x
|
222 |
|
223 |
|
224 |
+
def _linear_multistep_coeff(order, t, i, j):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
225 |
if order - 1 > i:
|
226 |
raise ValueError(f"Order {order} too high for step {i}")
|
227 |
|
|
|
249 |
if len(ds) > order:
|
250 |
ds.pop(0)
|
251 |
if callback is not None:
|
252 |
+
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
253 |
cur_order = min(i + 1, order)
|
254 |
+
coeffs = [_linear_multistep_coeff(cur_order, sigmas_cpu, i, j) for j in range(cur_order)]
|
255 |
x = x + sum(coeff * d for coeff, d in zip(coeffs, reversed(ds)))
|
256 |
return x
|
257 |
|
|
|
341 |
x_3 = x - self.sigma(t_next) * h.expm1() * eps - self.sigma(t_next) / r2 * (h.expm1() / h - 1) * (eps_r2 - eps)
|
342 |
return x_3, eps_cache
|
343 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
344 |
|
345 |
@torch.no_grad()
|
346 |
+
def sample_dpmpp_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None, r=0.5):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
347 |
"""DPM-Solver++ (stochastic)"""
|
348 |
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
349 |
seed = extra_args.get("seed", None)
|
|
|
356 |
for i in trange(len(sigmas) - 1, disable=disable):
|
357 |
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
358 |
if callback is not None:
|
359 |
+
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
360 |
if sigmas[i + 1] == 0:
|
|
|
361 |
d = to_d(x, sigmas[i], denoised)
|
362 |
dt = sigmas[i + 1] - sigmas[i]
|
363 |
x = x + d * dt
|
364 |
else:
|
|
|
365 |
t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
|
366 |
h = t_next - t
|
367 |
s = t + h * r
|
368 |
fac = 1 / (2 * r)
|
369 |
|
|
|
370 |
sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(s), eta)
|
371 |
s_ = t_fn(sd)
|
372 |
x_2 = (sigma_fn(s_) / sigma_fn(t)) * x - (t - s_).expm1() * denoised
|
373 |
x_2 = x_2 + noise_sampler(sigma_fn(t), sigma_fn(s)) * s_noise * su
|
374 |
denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)
|
375 |
|
|
|
376 |
sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(t_next), eta)
|
377 |
t_next_ = t_fn(sd)
|
378 |
denoised_d = (1 - fac) * denoised + fac * denoised_2
|
|
|
393 |
for i in trange(len(sigmas) - 1, disable=disable):
|
394 |
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
395 |
if callback is not None:
|
396 |
+
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
397 |
t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
|
398 |
h = t_next - t
|
399 |
if old_denoised is None or sigmas[i + 1] == 0:
|
|
|
408 |
|
409 |
|
410 |
@torch.no_grad()
|
411 |
+
def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
412 |
"""DPM-Solver++(3M) SDE"""
|
413 |
|
414 |
seed = extra_args.get("seed", None)
|
|
|
423 |
for i in trange(len(sigmas) - 1, disable=disable):
|
424 |
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
425 |
if callback is not None:
|
426 |
+
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
427 |
if sigmas[i + 1] == 0:
|
|
|
428 |
x = denoised
|
429 |
else:
|
430 |
t, s = -sigmas[i].log(), -sigmas[i + 1].log()
|
|
|
458 |
|
459 |
|
460 |
@torch.no_grad()
|
461 |
+
def sample_dpmpp_3m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
462 |
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
463 |
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
464 |
return sample_dpmpp_3m_sde(
|
|
|
475 |
|
476 |
|
477 |
@torch.no_grad()
|
478 |
+
def sample_dpmpp_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None, r=0.5):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
479 |
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
480 |
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler
|
481 |
return sample_dpmpp_sde(
|
|
|
503 |
return mu
|
504 |
|
505 |
|
506 |
+
def generic_step_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, step_function=None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
507 |
extra_args = {} if extra_args is None else extra_args
|
508 |
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
509 |
s_in = x.new_ones([x.shape[0]])
|
|
|
511 |
for i in trange(len(sigmas) - 1, disable=disable):
|
512 |
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
513 |
if callback is not None:
|
514 |
+
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
515 |
x = step_function(
|
516 |
x / torch.sqrt(1.0 + sigmas[i] ** 2.0),
|
517 |
sigmas[i],
|
|
|
527 |
@torch.no_grad()
|
528 |
def sample_ddpm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
|
529 |
return generic_step_sampler(model, x, sigmas, extra_args, callback, disable, noise_sampler, DDPMSampler_step)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm_patched/ldm/modules/attention.py
CHANGED
@@ -16,6 +16,7 @@ from torch import einsum, nn
|
|
16 |
|
17 |
from .diffusionmodules.util import AlphaBlender, checkpoint, timestep_embedding
|
18 |
|
|
|
19 |
if model_management.sage_enabled():
|
20 |
import importlib.metadata
|
21 |
from sageattention import sageattn
|
@@ -30,9 +31,7 @@ if model_management.flash_enabled():
|
|
30 |
from flash_attn import flash_attn_func
|
31 |
|
32 |
@torch.library.custom_op("flash_attention::flash_attn", mutates_args=())
|
33 |
-
def flash_attn_wrapper(
|
34 |
-
q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, dropout_p: float = 0.0, causal: bool = False
|
35 |
-
) -> torch.Tensor:
|
36 |
return flash_attn_func(q, k, v, dropout_p=dropout_p, causal=causal)
|
37 |
|
38 |
@flash_attn_wrapper.register_fake
|
@@ -41,7 +40,7 @@ if model_management.flash_enabled():
|
|
41 |
|
42 |
|
43 |
import ldm_patched.modules.ops
|
44 |
-
from ldm_patched.modules.args_parser import args
|
45 |
|
46 |
ops = ldm_patched.modules.ops.disable_weight_init
|
47 |
|
@@ -104,11 +103,7 @@ class FeedForward(nn.Module):
|
|
104 |
super().__init__()
|
105 |
inner_dim = int(dim * mult)
|
106 |
dim_out = default(dim_out, dim)
|
107 |
-
project_in = (
|
108 |
-
nn.Sequential(operations.Linear(dim, inner_dim, dtype=dtype, device=device), nn.GELU())
|
109 |
-
if not glu
|
110 |
-
else GEGLU(dim, inner_dim, dtype=dtype, device=device, operations=operations)
|
111 |
-
)
|
112 |
|
113 |
self.net = nn.Sequential(
|
114 |
project_in,
|
@@ -138,11 +133,7 @@ def attention_basic(q, k, v, heads, mask=None):
|
|
138 |
|
139 |
h = heads
|
140 |
q, k, v = map(
|
141 |
-
lambda t: t.unsqueeze(3)
|
142 |
-
.reshape(b, -1, heads, dim_head)
|
143 |
-
.permute(0, 2, 1, 3)
|
144 |
-
.reshape(b * heads, -1, dim_head)
|
145 |
-
.contiguous(),
|
146 |
(q, k, v),
|
147 |
)
|
148 |
|
@@ -214,6 +205,18 @@ def attention_xformers(q, k, v, heads, mask=None):
|
|
214 |
return out.unsqueeze(0).reshape(b, heads, -1, dim_head).transpose(1, 2).reshape(b, -1, heads * dim_head)
|
215 |
|
216 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
217 |
def attention_sage(q, k, v, heads, mask=None):
|
218 |
"""
|
219 |
Reference: https://github.com/comfyanonymous/ComfyUI/blob/v0.3.13/comfy/ldm/modules/attention.py#L472
|
@@ -234,13 +237,9 @@ def attention_sage(q, k, v, heads, mask=None):
|
|
234 |
(q, k, v),
|
235 |
)
|
236 |
|
237 |
-
|
238 |
-
if mask.ndim == 2:
|
239 |
-
mask = mask.unsqueeze(0)
|
240 |
-
if mask.ndim == 3:
|
241 |
-
mask = mask.unsqueeze(1)
|
242 |
|
243 |
-
out = sageattn(q, k, v,
|
244 |
return out.reshape(b, -1, heads * dim_head)
|
245 |
|
246 |
|
@@ -285,7 +284,16 @@ def attention_flash(q, k, v, heads, mask=None, attn_precision=None, skip_reshape
|
|
285 |
|
286 |
|
287 |
if model_management.sage_enabled():
|
288 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
289 |
optimized_attention = attention_sage
|
290 |
elif model_management.flash_enabled():
|
291 |
print("Using flash attention")
|
|
|
16 |
|
17 |
from .diffusionmodules.util import AlphaBlender, checkpoint, timestep_embedding
|
18 |
|
19 |
+
isSage2 = False
|
20 |
if model_management.sage_enabled():
|
21 |
import importlib.metadata
|
22 |
from sageattention import sageattn
|
|
|
31 |
from flash_attn import flash_attn_func
|
32 |
|
33 |
@torch.library.custom_op("flash_attention::flash_attn", mutates_args=())
|
34 |
+
def flash_attn_wrapper(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, dropout_p: float = 0.0, causal: bool = False) -> torch.Tensor:
|
|
|
|
|
35 |
return flash_attn_func(q, k, v, dropout_p=dropout_p, causal=causal)
|
36 |
|
37 |
@flash_attn_wrapper.register_fake
|
|
|
40 |
|
41 |
|
42 |
import ldm_patched.modules.ops
|
43 |
+
from ldm_patched.modules.args_parser import args, SageAttentionAPIs
|
44 |
|
45 |
ops = ldm_patched.modules.ops.disable_weight_init
|
46 |
|
|
|
103 |
super().__init__()
|
104 |
inner_dim = int(dim * mult)
|
105 |
dim_out = default(dim_out, dim)
|
106 |
+
project_in = nn.Sequential(operations.Linear(dim, inner_dim, dtype=dtype, device=device), nn.GELU()) if not glu else GEGLU(dim, inner_dim, dtype=dtype, device=device, operations=operations)
|
|
|
|
|
|
|
|
|
107 |
|
108 |
self.net = nn.Sequential(
|
109 |
project_in,
|
|
|
133 |
|
134 |
h = heads
|
135 |
q, k, v = map(
|
136 |
+
lambda t: t.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head).contiguous(),
|
|
|
|
|
|
|
|
|
137 |
(q, k, v),
|
138 |
)
|
139 |
|
|
|
205 |
return out.unsqueeze(0).reshape(b, heads, -1, dim_head).transpose(1, 2).reshape(b, -1, heads * dim_head)
|
206 |
|
207 |
|
208 |
+
if isSage2 and args.sageattn2_api is not SageAttentionAPIs.Automatic:
|
209 |
+
from functools import partial
|
210 |
+
from sageattention import sageattn_qk_int8_pv_fp16_triton, sageattn_qk_int8_pv_fp16_cuda, sageattn_qk_int8_pv_fp8_cuda
|
211 |
+
|
212 |
+
if args.sageattn2_api is SageAttentionAPIs.Triton16:
|
213 |
+
sageattn = sageattn_qk_int8_pv_fp16_triton
|
214 |
+
if args.sageattn2_api is SageAttentionAPIs.CUDA16:
|
215 |
+
sageattn = partial(sageattn_qk_int8_pv_fp16_cuda, qk_quant_gran="per_warp", pv_accum_dtype="fp16+fp32")
|
216 |
+
if args.sageattn2_api is SageAttentionAPIs.CUDA8:
|
217 |
+
sageattn = partial(sageattn_qk_int8_pv_fp8_cuda, qk_quant_gran="per_warp", pv_accum_dtype="fp16+fp32")
|
218 |
+
|
219 |
+
|
220 |
def attention_sage(q, k, v, heads, mask=None):
|
221 |
"""
|
222 |
Reference: https://github.com/comfyanonymous/ComfyUI/blob/v0.3.13/comfy/ldm/modules/attention.py#L472
|
|
|
237 |
(q, k, v),
|
238 |
)
|
239 |
|
240 |
+
assert mask is None
|
|
|
|
|
|
|
|
|
241 |
|
242 |
+
out = sageattn(q, k, v, is_causal=False, tensor_layout="NHD")
|
243 |
return out.reshape(b, -1, heads * dim_head)
|
244 |
|
245 |
|
|
|
284 |
|
285 |
|
286 |
if model_management.sage_enabled():
|
287 |
+
match args.sageattn2_api:
|
288 |
+
case SageAttentionAPIs.Automatic:
|
289 |
+
print("Using sage attention")
|
290 |
+
case SageAttentionAPIs.Triton16:
|
291 |
+
print("Using sage attention (Triton fp16)")
|
292 |
+
case SageAttentionAPIs.CUDA16:
|
293 |
+
print("Using sage attention (CUDA fp16)")
|
294 |
+
case SageAttentionAPIs.CUDA8:
|
295 |
+
print("Using sage attention (CUDA fp8)")
|
296 |
+
|
297 |
optimized_attention = attention_sage
|
298 |
elif model_management.flash_enabled():
|
299 |
print("Using flash attention")
|
ldm_patched/modules/args_parser.py
CHANGED
@@ -2,6 +2,26 @@
|
|
2 |
|
3 |
|
4 |
import argparse
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
|
7 |
parser = argparse.ArgumentParser()
|
@@ -58,4 +78,15 @@ parser.add_argument("--pin-shared-memory", action="store_true")
|
|
58 |
|
59 |
parser.add_argument("--fast-fp16", action="store_true")
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
args = parser.parse_args([])
|
|
|
2 |
|
3 |
|
4 |
import argparse
|
5 |
+
import enum
|
6 |
+
|
7 |
+
|
8 |
+
class EnumAction(argparse.Action):
|
9 |
+
"""Argparse `action` for handling Enum"""
|
10 |
+
|
11 |
+
def __init__(self, **kwargs):
|
12 |
+
enum_type = kwargs.pop("type", None)
|
13 |
+
assert issubclass(enum_type, enum.Enum)
|
14 |
+
|
15 |
+
choices = tuple(e.value for e in enum_type)
|
16 |
+
kwargs.setdefault("choices", choices)
|
17 |
+
kwargs.setdefault("metavar", f"[{','.join(list(choices))}]")
|
18 |
+
|
19 |
+
super(EnumAction, self).__init__(**kwargs)
|
20 |
+
self._enum = enum_type
|
21 |
+
|
22 |
+
def __call__(self, parser, namespace, values, option_string=None):
|
23 |
+
value = self._enum(values)
|
24 |
+
setattr(namespace, self.dest, value)
|
25 |
|
26 |
|
27 |
parser = argparse.ArgumentParser()
|
|
|
78 |
|
79 |
parser.add_argument("--fast-fp16", action="store_true")
|
80 |
|
81 |
+
|
82 |
+
class SageAttentionAPIs(enum.Enum):
|
83 |
+
Automatic = "auto"
|
84 |
+
Triton16 = "triton-fp16"
|
85 |
+
CUDA16 = "cuda-fp16"
|
86 |
+
CUDA8 = "cuda-fp8"
|
87 |
+
|
88 |
+
|
89 |
+
parser.add_argument("--sageattn2-api", type=SageAttentionAPIs, default=SageAttentionAPIs.Automatic, action=EnumAction)
|
90 |
+
|
91 |
+
|
92 |
args = parser.parse_args([])
|
modules/api/api.py
CHANGED
@@ -34,6 +34,7 @@ from modules import (
|
|
34 |
sd_hijack,
|
35 |
sd_models,
|
36 |
sd_samplers,
|
|
|
37 |
shared_items,
|
38 |
ui,
|
39 |
)
|
@@ -246,6 +247,7 @@ class Api:
|
|
246 |
self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
|
247 |
self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel)
|
248 |
self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=list[models.SamplerItem])
|
|
|
249 |
self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=list[models.UpscalerItem])
|
250 |
self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=list[models.LatentUpscalerModeItem])
|
251 |
self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=list[models.SDModelItem])
|
@@ -684,6 +686,18 @@ class Api:
|
|
684 |
def get_samplers(self):
|
685 |
return [{"name": sampler[0], "aliases": sampler[2], "options": sampler[3]} for sampler in sd_samplers.all_samplers]
|
686 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
687 |
def get_upscalers(self):
|
688 |
return [
|
689 |
{
|
|
|
34 |
sd_hijack,
|
35 |
sd_models,
|
36 |
sd_samplers,
|
37 |
+
sd_schedulers,
|
38 |
shared_items,
|
39 |
ui,
|
40 |
)
|
|
|
247 |
self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
|
248 |
self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel)
|
249 |
self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=list[models.SamplerItem])
|
250 |
+
self.add_api_route("/sdapi/v1/schedulers", self.get_schedulers, methods=["GET"], response_model=list[models.SchedulerItem])
|
251 |
self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=list[models.UpscalerItem])
|
252 |
self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=list[models.LatentUpscalerModeItem])
|
253 |
self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=list[models.SDModelItem])
|
|
|
686 |
def get_samplers(self):
|
687 |
return [{"name": sampler[0], "aliases": sampler[2], "options": sampler[3]} for sampler in sd_samplers.all_samplers]
|
688 |
|
689 |
+
def get_schedulers(self):
|
690 |
+
return [
|
691 |
+
{
|
692 |
+
"name": scheduler.name,
|
693 |
+
"label": scheduler.label,
|
694 |
+
"aliases": scheduler.aliases,
|
695 |
+
"default_rho": scheduler.default_rho,
|
696 |
+
"need_inner_model": scheduler.need_inner_model,
|
697 |
+
}
|
698 |
+
for scheduler in sd_schedulers.schedulers
|
699 |
+
]
|
700 |
+
|
701 |
def get_upscalers(self):
|
702 |
return [
|
703 |
{
|
modules/api/models.py
CHANGED
@@ -290,6 +290,14 @@ class SamplerItem(BaseModel):
|
|
290 |
options: dict[str, str] = Field(title="Options")
|
291 |
|
292 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
293 |
class UpscalerItem(BaseModel):
|
294 |
name: str = Field(title="Name")
|
295 |
model_name: Optional[str] = Field(title="Model Name")
|
|
|
290 |
options: dict[str, str] = Field(title="Options")
|
291 |
|
292 |
|
293 |
+
class SchedulerItem(BaseModel):
|
294 |
+
name: str = Field(title="Name")
|
295 |
+
label: str = Field(title="Label")
|
296 |
+
aliases: Optional[list[str]] = Field(title="Aliases")
|
297 |
+
default_rho: Optional[float] = Field(title="Default Rho")
|
298 |
+
need_inner_model: Optional[bool] = Field(title="Needs Inner Model")
|
299 |
+
|
300 |
+
|
301 |
class UpscalerItem(BaseModel):
|
302 |
name: str = Field(title="Name")
|
303 |
model_name: Optional[str] = Field(title="Model Name")
|
modules/cmd_args.py
CHANGED
@@ -95,6 +95,7 @@ parser.add_argument("--disable-extra-extensions", action="store_true", help="pre
|
|
95 |
parser.add_argument("--forge-ref-a1111-home", type=Path, help="Look for models in an existing A1111 checkout's path", default=None)
|
96 |
parser.add_argument("--controlnet-dir", type=Path, help="Path to directory with ControlNet models", default=None)
|
97 |
parser.add_argument("--controlnet-preprocessor-models-dir", type=Path, help="Path to directory with annotator model directories", default=None)
|
|
|
98 |
parser.add_argument("--fps", type=int, default=30, help="refresh rate for threads")
|
99 |
|
100 |
pkm = parser.add_mutually_exclusive_group()
|
|
|
95 |
parser.add_argument("--forge-ref-a1111-home", type=Path, help="Look for models in an existing A1111 checkout's path", default=None)
|
96 |
parser.add_argument("--controlnet-dir", type=Path, help="Path to directory with ControlNet models", default=None)
|
97 |
parser.add_argument("--controlnet-preprocessor-models-dir", type=Path, help="Path to directory with annotator model directories", default=None)
|
98 |
+
parser.add_argument("--adv-samplers", action="store_true", help='show the "sampler parameters" advanced settings')
|
99 |
parser.add_argument("--fps", type=int, default=30, help="refresh rate for threads")
|
100 |
|
101 |
pkm = parser.add_mutually_exclusive_group()
|
modules/esrgan_model.py
CHANGED
@@ -1,38 +1,46 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
2 |
from modules.shared import opts
|
3 |
from modules.upscaler import Upscaler, UpscalerData
|
4 |
from modules.upscaler_utils import upscale_with_model
|
5 |
from modules_forge.forge_util import prepare_free_memory
|
6 |
-
from functools import lru_cache
|
7 |
-
|
8 |
-
|
9 |
-
URL = "https://github.com/cszn/KAIR/releases/download/v1.0/ESRGAN.pth"
|
10 |
|
11 |
|
12 |
class UpscalerESRGAN(Upscaler):
|
13 |
-
|
14 |
def __init__(self, dirname: str):
|
|
|
|
|
|
|
|
|
15 |
self.name = "ESRGAN"
|
16 |
-
self.model_url =
|
17 |
self.model_name = "ESRGAN"
|
18 |
self.scalers = []
|
19 |
-
|
20 |
-
super().__init__()
|
21 |
model_paths = self.find_models(ext_filter=[".pt", ".pth", ".safetensors"])
|
22 |
-
scalers = []
|
23 |
if len(model_paths) == 0:
|
24 |
scaler_data = UpscalerData(self.model_name, self.model_url, self, 4)
|
25 |
-
scalers.append(scaler_data)
|
|
|
26 |
for file in model_paths:
|
27 |
if file.startswith("http"):
|
28 |
name = self.model_name
|
29 |
else:
|
30 |
name = modelloader.friendly_name(file)
|
31 |
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
33 |
self.scalers.append(scaler_data)
|
34 |
|
35 |
-
def do_upscale(self, img, selected_model):
|
36 |
prepare_free_memory()
|
37 |
try:
|
38 |
model = self.load_model(selected_model)
|
@@ -40,8 +48,8 @@ class UpscalerESRGAN(Upscaler):
|
|
40 |
errors.report(f"Unable to load {selected_model}", exc_info=True)
|
41 |
return img
|
42 |
return upscale_with_model(
|
43 |
-
model,
|
44 |
-
img,
|
45 |
tile_size=opts.ESRGAN_tile,
|
46 |
tile_overlap=opts.ESRGAN_tile_overlap,
|
47 |
)
|
@@ -57,10 +65,6 @@ class UpscalerESRGAN(Upscaler):
|
|
57 |
file_name=path.rsplit("/", 1)[-1],
|
58 |
)
|
59 |
|
60 |
-
model = modelloader.load_spandrel_model(
|
61 |
-
filename,
|
62 |
-
device=("cpu" if devices.device_esrgan.type == "mps" else None),
|
63 |
-
expected_architecture="ESRGAN",
|
64 |
-
)
|
65 |
model.to(devices.device_esrgan)
|
66 |
return model
|
|
|
1 |
+
import re
|
2 |
+
from functools import lru_cache
|
3 |
+
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
from modules import devices, errors, modelloader
|
7 |
from modules.shared import opts
|
8 |
from modules.upscaler import Upscaler, UpscalerData
|
9 |
from modules.upscaler_utils import upscale_with_model
|
10 |
from modules_forge.forge_util import prepare_free_memory
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
class UpscalerESRGAN(Upscaler):
|
|
|
14 |
def __init__(self, dirname: str):
|
15 |
+
self.user_path = dirname
|
16 |
+
self.model_path = dirname
|
17 |
+
super().__init__(True)
|
18 |
+
|
19 |
self.name = "ESRGAN"
|
20 |
+
self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/ESRGAN.pth"
|
21 |
self.model_name = "ESRGAN"
|
22 |
self.scalers = []
|
23 |
+
|
|
|
24 |
model_paths = self.find_models(ext_filter=[".pt", ".pth", ".safetensors"])
|
|
|
25 |
if len(model_paths) == 0:
|
26 |
scaler_data = UpscalerData(self.model_name, self.model_url, self, 4)
|
27 |
+
self.scalers.append(scaler_data)
|
28 |
+
|
29 |
for file in model_paths:
|
30 |
if file.startswith("http"):
|
31 |
name = self.model_name
|
32 |
else:
|
33 |
name = modelloader.friendly_name(file)
|
34 |
|
35 |
+
if match := re.search(r"(\d)[xX]|[xX](\d)", name):
|
36 |
+
scale = int(match.group(1) or match.group(2))
|
37 |
+
else:
|
38 |
+
scale = 4
|
39 |
+
|
40 |
+
scaler_data = UpscalerData(name, file, self, scale)
|
41 |
self.scalers.append(scaler_data)
|
42 |
|
43 |
+
def do_upscale(self, img: Image.Image, selected_model: str):
|
44 |
prepare_free_memory()
|
45 |
try:
|
46 |
model = self.load_model(selected_model)
|
|
|
48 |
errors.report(f"Unable to load {selected_model}", exc_info=True)
|
49 |
return img
|
50 |
return upscale_with_model(
|
51 |
+
model=model,
|
52 |
+
img=img,
|
53 |
tile_size=opts.ESRGAN_tile,
|
54 |
tile_overlap=opts.ESRGAN_tile_overlap,
|
55 |
)
|
|
|
65 |
file_name=path.rsplit("/", 1)[-1],
|
66 |
)
|
67 |
|
68 |
+
model = modelloader.load_spandrel_model(filename, device="cpu")
|
|
|
|
|
|
|
|
|
69 |
model.to(devices.device_esrgan)
|
70 |
return model
|
modules/images.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
from __future__ import annotations
|
2 |
|
3 |
import datetime
|
|
|
4 |
|
5 |
import pytz
|
6 |
import io
|
@@ -22,14 +23,12 @@ from modules import sd_samplers, shared, script_callbacks, errors
|
|
22 |
from modules.paths_internal import roboto_ttf_file
|
23 |
from modules.shared import opts
|
24 |
|
25 |
-
LANCZOS =
|
|
|
26 |
|
27 |
|
28 |
def get_font(fontsize: int):
|
29 |
-
|
30 |
-
return ImageFont.truetype(opts.font or roboto_ttf_file, fontsize)
|
31 |
-
except Exception:
|
32 |
-
return ImageFont.truetype(roboto_ttf_file, fontsize)
|
33 |
|
34 |
|
35 |
def image_grid(imgs, batch_size=1, rows=None):
|
@@ -347,6 +346,32 @@ def sanitize_filename_part(text, replace_spaces=True):
|
|
347 |
return text
|
348 |
|
349 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
350 |
class FilenameGenerator:
|
351 |
replacements = {
|
352 |
"seed": lambda self: self.seed if self.seed is not None else "",
|
@@ -358,6 +383,8 @@ class FilenameGenerator:
|
|
358 |
"height": lambda self: self.image.height,
|
359 |
"styles": lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False),
|
360 |
"sampler": lambda self: self.p and sanitize_filename_part(self.p.sampler_name, replace_spaces=False),
|
|
|
|
|
361 |
"model_hash": lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash),
|
362 |
"model_name": lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.name_for_extra, replace_spaces=False),
|
363 |
"date": lambda self: datetime.datetime.now().strftime("%Y-%m-%d"),
|
|
|
1 |
from __future__ import annotations
|
2 |
|
3 |
import datetime
|
4 |
+
import functools
|
5 |
|
6 |
import pytz
|
7 |
import io
|
|
|
23 |
from modules.paths_internal import roboto_ttf_file
|
24 |
from modules.shared import opts
|
25 |
|
26 |
+
LANCZOS = Image.LANCZOS if hasattr(Image, "LANCZOS") else Image.Resampling.LANCZOS
|
27 |
+
NEAREST = Image.LANCZOS if hasattr(Image, "NEAREST") else Image.Resampling.NEAREST
|
28 |
|
29 |
|
30 |
def get_font(fontsize: int):
|
31 |
+
return ImageFont.truetype(roboto_ttf_file, fontsize)
|
|
|
|
|
|
|
32 |
|
33 |
|
34 |
def image_grid(imgs, batch_size=1, rows=None):
|
|
|
346 |
return text
|
347 |
|
348 |
|
349 |
+
@functools.lru_cache(maxsize=10, typed=False)
|
350 |
+
def get_scheduler_str(sampler_name: str, scheduler_name: str):
|
351 |
+
"""Returns {Scheduler} if the scheduler is applicable to the sampler"""
|
352 |
+
if scheduler_name == "Automatic":
|
353 |
+
config = sd_samplers.find_sampler_config(sampler_name)
|
354 |
+
scheduler_name = config.options.get("scheduler", "Automatic")
|
355 |
+
return scheduler_name.capitalize()
|
356 |
+
|
357 |
+
|
358 |
+
@functools.lru_cache(maxsize=10, typed=False)
|
359 |
+
def get_sampler_scheduler_str(sampler_name: str, scheduler_name: str):
|
360 |
+
"""Returns the '{Sampler} {Scheduler}' if the scheduler is applicable to the sampler"""
|
361 |
+
return f"{sampler_name} {get_scheduler_str(sampler_name, scheduler_name)}"
|
362 |
+
|
363 |
+
|
364 |
+
def get_sampler_scheduler(p, sampler: str):
|
365 |
+
"""Returns '{Sampler} {Scheduler}' / '{Scheduler}' / 'NOTHING_AND_SKIP_PREVIOUS_TEXT'"""
|
366 |
+
if hasattr(p, "scheduler") and hasattr(p, "sampler_name"):
|
367 |
+
if sampler:
|
368 |
+
sampler_scheduler = get_sampler_scheduler_str(p.sampler_name, p.scheduler)
|
369 |
+
else:
|
370 |
+
sampler_scheduler = get_scheduler_str(p.sampler_name, p.scheduler)
|
371 |
+
return sanitize_filename_part(sampler_scheduler, replace_spaces=False)
|
372 |
+
return NOTHING_AND_SKIP_PREVIOUS_TEXT
|
373 |
+
|
374 |
+
|
375 |
class FilenameGenerator:
|
376 |
replacements = {
|
377 |
"seed": lambda self: self.seed if self.seed is not None else "",
|
|
|
383 |
"height": lambda self: self.image.height,
|
384 |
"styles": lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False),
|
385 |
"sampler": lambda self: self.p and sanitize_filename_part(self.p.sampler_name, replace_spaces=False),
|
386 |
+
"sampler_scheduler": lambda self: self.p and get_sampler_scheduler(self.p, True),
|
387 |
+
"scheduler": lambda self: self.p and get_sampler_scheduler(self.p, False),
|
388 |
"model_hash": lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash),
|
389 |
"model_name": lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.name_for_extra, replace_spaces=False),
|
390 |
"date": lambda self: datetime.datetime.now().strftime("%Y-%m-%d"),
|
modules/img2img.py
CHANGED
@@ -160,8 +160,6 @@ def img2img_function(
|
|
160 |
inpaint_color_sketch_orig,
|
161 |
init_img_inpaint,
|
162 |
init_mask_inpaint,
|
163 |
-
steps: int,
|
164 |
-
sampler_name: str,
|
165 |
mask_blur: int,
|
166 |
mask_alpha: float,
|
167 |
inpainting_fill: int,
|
@@ -235,10 +233,8 @@ def img2img_function(
|
|
235 |
prompt=prompt,
|
236 |
negative_prompt=negative_prompt,
|
237 |
styles=prompt_styles,
|
238 |
-
sampler_name=sampler_name,
|
239 |
batch_size=batch_size,
|
240 |
n_iter=n_iter,
|
241 |
-
steps=steps,
|
242 |
cfg_scale=cfg_scale,
|
243 |
width=width,
|
244 |
height=height,
|
@@ -300,8 +296,6 @@ def img2img(
|
|
300 |
inpaint_color_sketch_orig,
|
301 |
init_img_inpaint,
|
302 |
init_mask_inpaint,
|
303 |
-
steps: int,
|
304 |
-
sampler_name: str,
|
305 |
mask_blur: int,
|
306 |
mask_alpha: float,
|
307 |
inpainting_fill: int,
|
@@ -342,8 +336,6 @@ def img2img(
|
|
342 |
inpaint_color_sketch_orig,
|
343 |
init_img_inpaint,
|
344 |
init_mask_inpaint,
|
345 |
-
steps,
|
346 |
-
sampler_name,
|
347 |
mask_blur,
|
348 |
mask_alpha,
|
349 |
inpainting_fill,
|
|
|
160 |
inpaint_color_sketch_orig,
|
161 |
init_img_inpaint,
|
162 |
init_mask_inpaint,
|
|
|
|
|
163 |
mask_blur: int,
|
164 |
mask_alpha: float,
|
165 |
inpainting_fill: int,
|
|
|
233 |
prompt=prompt,
|
234 |
negative_prompt=negative_prompt,
|
235 |
styles=prompt_styles,
|
|
|
236 |
batch_size=batch_size,
|
237 |
n_iter=n_iter,
|
|
|
238 |
cfg_scale=cfg_scale,
|
239 |
width=width,
|
240 |
height=height,
|
|
|
296 |
inpaint_color_sketch_orig,
|
297 |
init_img_inpaint,
|
298 |
init_mask_inpaint,
|
|
|
|
|
299 |
mask_blur: int,
|
300 |
mask_alpha: float,
|
301 |
inpainting_fill: int,
|
|
|
336 |
inpaint_color_sketch_orig,
|
337 |
init_img_inpaint,
|
338 |
init_mask_inpaint,
|
|
|
|
|
339 |
mask_blur,
|
340 |
mask_alpha,
|
341 |
inpainting_fill,
|
modules/infotext_utils.py
CHANGED
@@ -299,6 +299,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
|
|
299 |
if "Hires sampler" not in res:
|
300 |
res["Hires sampler"] = "Use same sampler"
|
301 |
|
|
|
|
|
|
|
302 |
if "Hires checkpoint" not in res:
|
303 |
res["Hires checkpoint"] = "Use same checkpoint"
|
304 |
|
|
|
299 |
if "Hires sampler" not in res:
|
300 |
res["Hires sampler"] = "Use same sampler"
|
301 |
|
302 |
+
if "Hires schedule type" not in res:
|
303 |
+
res["Hires schedule type"] = "Use same scheduler"
|
304 |
+
|
305 |
if "Hires checkpoint" not in res:
|
306 |
res["Hires checkpoint"] = "Use same checkpoint"
|
307 |
|
modules/launch_utils.py
CHANGED
@@ -336,12 +336,10 @@ def prepare_environment():
|
|
336 |
startup_timer.record("install requirements")
|
337 |
|
338 |
if not is_installed("insightface"):
|
339 |
-
|
340 |
-
f
|
341 |
-
|
342 |
-
|
343 |
-
live=False,
|
344 |
-
)
|
345 |
|
346 |
if not args.skip_install:
|
347 |
run_extensions_installers(settings_file=args.ui_settings_file)
|
|
|
336 |
startup_timer.record("install requirements")
|
337 |
|
338 |
if not is_installed("insightface"):
|
339 |
+
try:
|
340 |
+
run_pip(f"install --no-deps {insightface_package}", "insightface")
|
341 |
+
except RuntimeError:
|
342 |
+
print("Failed to install insightface; please manually install C++ build tools first")
|
|
|
|
|
343 |
|
344 |
if not args.skip_install:
|
345 |
run_extensions_installers(settings_file=args.ui_settings_file)
|
modules/modelloader.py
CHANGED
@@ -1,29 +1,23 @@
|
|
1 |
from __future__ import annotations
|
2 |
|
3 |
-
import importlib
|
4 |
import logging
|
5 |
import os
|
|
|
6 |
from urllib.parse import urlparse
|
7 |
|
8 |
-
import torch
|
9 |
import spandrel
|
10 |
import spandrel_extra_arches
|
|
|
11 |
|
12 |
from modules import shared
|
13 |
-
from modules.
|
14 |
-
|
15 |
|
16 |
spandrel_extra_arches.install()
|
17 |
logger = logging.getLogger(__name__)
|
18 |
|
19 |
|
20 |
-
def load_file_from_url(
|
21 |
-
url: str,
|
22 |
-
*,
|
23 |
-
model_dir: str,
|
24 |
-
progress: bool = True,
|
25 |
-
file_name: str | None = None,
|
26 |
-
) -> str:
|
27 |
"""
|
28 |
Download a file from `url` into `model_dir`, using the file present if possible.
|
29 |
Returns the path to the downloaded file.
|
@@ -36,6 +30,7 @@ def load_file_from_url(
|
|
36 |
if not os.path.exists(cached_file):
|
37 |
print(f'Downloading: "{url}" to {cached_file}\n')
|
38 |
from torch.hub import download_url_to_file
|
|
|
39 |
download_url_to_file(url, cached_file, progress=progress)
|
40 |
return cached_file
|
41 |
|
@@ -59,44 +54,40 @@ def load_models(
|
|
59 |
|
60 |
@return: A list of paths containing the desired model(s)
|
61 |
"""
|
62 |
-
output =
|
63 |
|
64 |
try:
|
65 |
-
|
66 |
-
|
67 |
-
if command_path is not None and command_path != model_path:
|
68 |
-
pretrained_path = os.path.join(command_path, "experiments", "pretrained_models")
|
69 |
-
if os.path.exists(pretrained_path):
|
70 |
-
print(f"Appending path: {pretrained_path}")
|
71 |
-
places.append(pretrained_path)
|
72 |
-
elif os.path.exists(command_path):
|
73 |
-
places.append(command_path)
|
74 |
|
75 |
-
|
|
|
|
|
|
|
|
|
76 |
|
77 |
-
for place in
|
78 |
for full_path in shared.walk_files(place, allowed_extensions=ext_filter):
|
79 |
if os.path.islink(full_path) and not os.path.exists(full_path):
|
80 |
print(f"Skipping broken symlink: {full_path}")
|
81 |
continue
|
82 |
if ext_blacklist is not None and any(full_path.endswith(x) for x in ext_blacklist):
|
83 |
continue
|
84 |
-
if full_path
|
85 |
-
output.
|
86 |
|
87 |
if model_url is not None and len(output) == 0:
|
88 |
if download_name is not None:
|
89 |
-
output.
|
90 |
else:
|
91 |
-
output.
|
92 |
|
93 |
-
except Exception:
|
94 |
-
|
95 |
|
96 |
-
return output
|
97 |
|
98 |
|
99 |
-
def friendly_name(file: str):
|
100 |
if file.startswith("http"):
|
101 |
file = urlparse(file).path
|
102 |
|
@@ -106,39 +97,19 @@ def friendly_name(file: str):
|
|
106 |
|
107 |
|
108 |
def load_upscalers():
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
used_classes[classname] = cls
|
123 |
-
|
124 |
-
for cls in reversed(used_classes.values()):
|
125 |
-
name = cls.__name__
|
126 |
-
cmd_name = f"{name.lower().replace('upscaler', '')}_models_path"
|
127 |
-
commandline_model_path = commandline_options.get(cmd_name, None)
|
128 |
-
scaler = cls(commandline_model_path)
|
129 |
-
scaler.user_path = commandline_model_path
|
130 |
-
scaler.model_download_path = commandline_model_path or scaler.model_path
|
131 |
-
all_upscalers += scaler.scalers
|
132 |
-
|
133 |
-
shared.sd_upscalers = sorted(
|
134 |
-
all_upscalers,
|
135 |
-
# Special case for UpscalerNone keeps it at the beginning of the list.
|
136 |
-
key=lambda x: (
|
137 |
-
""
|
138 |
-
if isinstance(x.scaler, (UpscalerNone, UpscalerLanczos, UpscalerNearest))
|
139 |
-
else x.name.lower()
|
140 |
-
),
|
141 |
-
)
|
142 |
|
143 |
|
144 |
def load_spandrel_model(
|
@@ -164,10 +135,7 @@ def load_spandrel_model(
|
|
164 |
if dtype:
|
165 |
model_descriptor.model.to(dtype=dtype)
|
166 |
|
167 |
-
logger.debug(
|
168 |
-
"Loaded %s from %s (device=%s, half=%s, dtype=%s)",
|
169 |
-
arch, path, device, half, dtype,
|
170 |
-
)
|
171 |
|
172 |
model_descriptor.model.eval()
|
173 |
return model_descriptor
|
|
|
1 |
from __future__ import annotations
|
2 |
|
|
|
3 |
import logging
|
4 |
import os
|
5 |
+
import os.path
|
6 |
from urllib.parse import urlparse
|
7 |
|
|
|
8 |
import spandrel
|
9 |
import spandrel_extra_arches
|
10 |
+
import torch
|
11 |
|
12 |
from modules import shared
|
13 |
+
from modules.errors import display
|
14 |
+
from modules.upscaler import UpscalerLanczos, UpscalerNearest, UpscalerNone
|
15 |
|
16 |
spandrel_extra_arches.install()
|
17 |
logger = logging.getLogger(__name__)
|
18 |
|
19 |
|
20 |
+
def load_file_from_url(url: str, *, model_dir: str, progress: bool = True, file_name: str | None = None) -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
"""
|
22 |
Download a file from `url` into `model_dir`, using the file present if possible.
|
23 |
Returns the path to the downloaded file.
|
|
|
30 |
if not os.path.exists(cached_file):
|
31 |
print(f'Downloading: "{url}" to {cached_file}\n')
|
32 |
from torch.hub import download_url_to_file
|
33 |
+
|
34 |
download_url_to_file(url, cached_file, progress=progress)
|
35 |
return cached_file
|
36 |
|
|
|
54 |
|
55 |
@return: A list of paths containing the desired model(s)
|
56 |
"""
|
57 |
+
output: set[str] = set()
|
58 |
|
59 |
try:
|
60 |
+
folders = [model_path]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
+
if command_path != model_path and command_path is not None:
|
63 |
+
if os.path.isdir(command_path):
|
64 |
+
folders.append(command_path)
|
65 |
+
elif os.path.isfile(command_path):
|
66 |
+
output.add(command_path)
|
67 |
|
68 |
+
for place in folders:
|
69 |
for full_path in shared.walk_files(place, allowed_extensions=ext_filter):
|
70 |
if os.path.islink(full_path) and not os.path.exists(full_path):
|
71 |
print(f"Skipping broken symlink: {full_path}")
|
72 |
continue
|
73 |
if ext_blacklist is not None and any(full_path.endswith(x) for x in ext_blacklist):
|
74 |
continue
|
75 |
+
if os.path.isfile(full_path):
|
76 |
+
output.add(full_path)
|
77 |
|
78 |
if model_url is not None and len(output) == 0:
|
79 |
if download_name is not None:
|
80 |
+
output.add(load_file_from_url(model_url, model_dir=folders[0], file_name=download_name))
|
81 |
else:
|
82 |
+
output.add(model_url)
|
83 |
|
84 |
+
except Exception as e:
|
85 |
+
display(e, "load_models")
|
86 |
|
87 |
+
return sorted(output, key=lambda mdl: mdl.lower())
|
88 |
|
89 |
|
90 |
+
def friendly_name(file: str) -> str:
|
91 |
if file.startswith("http"):
|
92 |
file = urlparse(file).path
|
93 |
|
|
|
97 |
|
98 |
|
99 |
def load_upscalers():
|
100 |
+
from modules.esrgan_model import UpscalerESRGAN
|
101 |
+
|
102 |
+
commandline_model_path = shared.cmd_opts.esrgan_models_path
|
103 |
+
upscaler = UpscalerESRGAN(commandline_model_path)
|
104 |
+
upscaler.user_path = commandline_model_path
|
105 |
+
upscaler.model_download_path = commandline_model_path or upscaler.model_path
|
106 |
+
|
107 |
+
shared.sd_upscalers = [
|
108 |
+
*UpscalerNone().scalers,
|
109 |
+
*UpscalerLanczos().scalers,
|
110 |
+
*UpscalerNearest().scalers,
|
111 |
+
*sorted(upscaler.scalers, key=lambda s: s.name.lower()),
|
112 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
|
114 |
|
115 |
def load_spandrel_model(
|
|
|
135 |
if dtype:
|
136 |
model_descriptor.model.to(dtype=dtype)
|
137 |
|
138 |
+
logger.debug("Loaded %s from %s (device=%s, half=%s, dtype=%s)", arch, path, device, half, dtype)
|
|
|
|
|
|
|
139 |
|
140 |
model_descriptor.model.eval()
|
141 |
return model_descriptor
|
modules/options.py
CHANGED
@@ -11,7 +11,7 @@ from modules.paths_internal import script_path
|
|
11 |
|
12 |
|
13 |
class OptionInfo:
|
14 |
-
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before=
|
15 |
self.default = default
|
16 |
self.label = label
|
17 |
self.component = component
|
@@ -60,7 +60,14 @@ class OptionInfo:
|
|
60 |
|
61 |
class OptionHTML(OptionInfo):
|
62 |
def __init__(self, text):
|
63 |
-
super().__init__(str(text).strip(), label=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
self.do_not_save = True
|
66 |
|
@@ -104,19 +111,19 @@ class Options:
|
|
104 |
|
105 |
# Restrict component arguments
|
106 |
comp_args = info.component_args if info else None
|
107 |
-
if isinstance(comp_args, dict) and comp_args.get(
|
108 |
raise RuntimeError(f"not possible to set '{key}' because it is restricted")
|
109 |
|
110 |
# Check that this section isn't frozen
|
111 |
if cmd_opts.freeze_settings_in_sections is not None:
|
112 |
-
frozen_sections = list(map(str.strip, cmd_opts.freeze_settings_in_sections.split(
|
113 |
section_key = info.section[0]
|
114 |
section_name = info.section[1]
|
115 |
assert section_key not in frozen_sections, f"not possible to set '{key}' because settings in section '{section_name}' ({section_key}) are frozen with --freeze-settings-in-sections"
|
116 |
|
117 |
# Check that this section of the settings isn't frozen
|
118 |
if cmd_opts.freeze_specific_settings is not None:
|
119 |
-
frozen_keys = list(map(str.strip, cmd_opts.freeze_specific_settings.split(
|
120 |
assert key not in frozen_keys, f"not possible to set '{key}' because this setting is frozen with --freeze-specific-settings"
|
121 |
|
122 |
# Check shorthand option which disables editing options in "saving-paths"
|
@@ -201,20 +208,20 @@ class Options:
|
|
201 |
except FileNotFoundError:
|
202 |
self.data = {}
|
203 |
except Exception:
|
204 |
-
errors.report(f'\nCould not load settings\nThe config file "{filename}" is likely corrupted\nIt has been moved to the "tmp/config.json"\nReverting config to default\n\n'
|
205 |
os.replace(filename, os.path.join(script_path, "tmp", "config.json"))
|
206 |
self.data = {}
|
207 |
# 1.6.0 VAE defaults
|
208 |
-
if self.data.get(
|
209 |
-
self.data[
|
210 |
|
211 |
# 1.1.1 quicksettings list migration
|
212 |
-
if self.data.get(
|
213 |
-
self.data[
|
214 |
|
215 |
# 1.4.0 ui_reorder
|
216 |
-
if isinstance(self.data.get(
|
217 |
-
self.data[
|
218 |
|
219 |
bad_settings = 0
|
220 |
for k, v in self.data.items():
|
@@ -319,6 +326,7 @@ class OptionsCategory:
|
|
319 |
id: str
|
320 |
label: str
|
321 |
|
|
|
322 |
class OptionsCategories:
|
323 |
def __init__(self):
|
324 |
self.mapping = {}
|
|
|
11 |
|
12 |
|
13 |
class OptionInfo:
|
14 |
+
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before="", comment_after="", infotext=None, restrict_api=False, category_id=None):
|
15 |
self.default = default
|
16 |
self.label = label
|
17 |
self.component = component
|
|
|
60 |
|
61 |
class OptionHTML(OptionInfo):
|
62 |
def __init__(self, text):
|
63 |
+
super().__init__(str(text).strip(), label="", component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs))
|
64 |
+
|
65 |
+
self.do_not_save = True
|
66 |
+
|
67 |
+
|
68 |
+
class OptionDiv(OptionInfo):
|
69 |
+
def __init__(self):
|
70 |
+
super().__init__("", label="", component=lambda **kwargs: gr.HTML(elem_classes="settings-div", **kwargs))
|
71 |
|
72 |
self.do_not_save = True
|
73 |
|
|
|
111 |
|
112 |
# Restrict component arguments
|
113 |
comp_args = info.component_args if info else None
|
114 |
+
if isinstance(comp_args, dict) and comp_args.get("visible", True) is False:
|
115 |
raise RuntimeError(f"not possible to set '{key}' because it is restricted")
|
116 |
|
117 |
# Check that this section isn't frozen
|
118 |
if cmd_opts.freeze_settings_in_sections is not None:
|
119 |
+
frozen_sections = list(map(str.strip, cmd_opts.freeze_settings_in_sections.split(","))) # Trim whitespace from section names
|
120 |
section_key = info.section[0]
|
121 |
section_name = info.section[1]
|
122 |
assert section_key not in frozen_sections, f"not possible to set '{key}' because settings in section '{section_name}' ({section_key}) are frozen with --freeze-settings-in-sections"
|
123 |
|
124 |
# Check that this section of the settings isn't frozen
|
125 |
if cmd_opts.freeze_specific_settings is not None:
|
126 |
+
frozen_keys = list(map(str.strip, cmd_opts.freeze_specific_settings.split(","))) # Trim whitespace from setting keys
|
127 |
assert key not in frozen_keys, f"not possible to set '{key}' because this setting is frozen with --freeze-specific-settings"
|
128 |
|
129 |
# Check shorthand option which disables editing options in "saving-paths"
|
|
|
208 |
except FileNotFoundError:
|
209 |
self.data = {}
|
210 |
except Exception:
|
211 |
+
errors.report(f'\nCould not load settings\nThe config file "{filename}" is likely corrupted\nIt has been moved to the "tmp/config.json"\nReverting config to default\n\n' "", exc_info=True)
|
212 |
os.replace(filename, os.path.join(script_path, "tmp", "config.json"))
|
213 |
self.data = {}
|
214 |
# 1.6.0 VAE defaults
|
215 |
+
if self.data.get("sd_vae_as_default") is not None and self.data.get("sd_vae_overrides_per_model_preferences") is None:
|
216 |
+
self.data["sd_vae_overrides_per_model_preferences"] = not self.data.get("sd_vae_as_default")
|
217 |
|
218 |
# 1.1.1 quicksettings list migration
|
219 |
+
if self.data.get("quicksettings") is not None and self.data.get("quicksettings_list") is None:
|
220 |
+
self.data["quicksettings_list"] = [i.strip() for i in self.data.get("quicksettings").split(",")]
|
221 |
|
222 |
# 1.4.0 ui_reorder
|
223 |
+
if isinstance(self.data.get("ui_reorder"), str) and self.data.get("ui_reorder") and "ui_reorder_list" not in self.data:
|
224 |
+
self.data["ui_reorder_list"] = [i.strip() for i in self.data.get("ui_reorder").split(",")]
|
225 |
|
226 |
bad_settings = 0
|
227 |
for k, v in self.data.items():
|
|
|
326 |
id: str
|
327 |
label: str
|
328 |
|
329 |
+
|
330 |
class OptionsCategories:
|
331 |
def __init__(self):
|
332 |
self.mapping = {}
|
modules/postprocessing.py
CHANGED
@@ -95,30 +95,6 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir,
|
|
95 |
if save_output:
|
96 |
fullfn, _ = images.save_image(pp.image, path=outpath, basename=basename, extension=opts.samples_format, info=infotext, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=forced_filename, suffix=suffix)
|
97 |
|
98 |
-
if pp.caption:
|
99 |
-
caption_filename = os.path.splitext(fullfn)[0] + ".txt"
|
100 |
-
existing_caption = ""
|
101 |
-
try:
|
102 |
-
with open(caption_filename, encoding="utf8") as file:
|
103 |
-
existing_caption = file.read().strip()
|
104 |
-
except FileNotFoundError:
|
105 |
-
pass
|
106 |
-
|
107 |
-
action = shared.opts.postprocessing_existing_caption_action
|
108 |
-
if action == 'Prepend' and existing_caption:
|
109 |
-
caption = f"{existing_caption} {pp.caption}"
|
110 |
-
elif action == 'Append' and existing_caption:
|
111 |
-
caption = f"{pp.caption} {existing_caption}"
|
112 |
-
elif action == 'Keep' and existing_caption:
|
113 |
-
caption = existing_caption
|
114 |
-
else:
|
115 |
-
caption = pp.caption
|
116 |
-
|
117 |
-
caption = caption.strip()
|
118 |
-
if caption:
|
119 |
-
with open(caption_filename, "w", encoding="utf8") as file:
|
120 |
-
file.write(caption)
|
121 |
-
|
122 |
if extras_mode != 2 or show_extras_results:
|
123 |
outputs.append(pp.image)
|
124 |
|
|
|
95 |
if save_output:
|
96 |
fullfn, _ = images.save_image(pp.image, path=outpath, basename=basename, extension=opts.samples_format, info=infotext, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=forced_filename, suffix=suffix)
|
97 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
if extras_mode != 2 or show_extras_results:
|
99 |
outputs.append(pp.image)
|
100 |
|
modules/processing.py
CHANGED
@@ -147,6 +147,7 @@ class StableDiffusionProcessing:
|
|
147 |
seed_resize_from_w: int = -1
|
148 |
seed_enable_extras: bool = True
|
149 |
sampler_name: str = None
|
|
|
150 |
batch_size: int = 1
|
151 |
n_iter: int = 1
|
152 |
steps: int = 50
|
@@ -660,6 +661,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
|
|
660 |
generation_params = {
|
661 |
"Steps": p.steps,
|
662 |
"Sampler": p.sampler_name,
|
|
|
663 |
"CFG scale": p.cfg_scale,
|
664 |
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
|
665 |
"Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index],
|
@@ -728,6 +730,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
|
|
728 |
if k == 'sd_vae':
|
729 |
sd_vae.reload_vae_weights()
|
730 |
|
|
|
|
|
731 |
res = process_images_inner(p)
|
732 |
|
733 |
finally:
|
@@ -1049,6 +1053,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
|
1049 |
hr_resize_y: int = 0
|
1050 |
hr_checkpoint_name: str = None
|
1051 |
hr_sampler_name: str = None
|
|
|
1052 |
hr_prompt: str = ''
|
1053 |
hr_negative_prompt: str = ''
|
1054 |
force_task_id: str = None
|
@@ -1140,6 +1145,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
|
1140 |
if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name:
|
1141 |
self.extra_generation_params["Hires sampler"] = self.hr_sampler_name
|
1142 |
|
|
|
|
|
|
|
|
|
|
|
1143 |
if tuple(self.hr_prompt) != tuple(self.prompt):
|
1144 |
self.extra_generation_params["Hires prompt"] = self.hr_prompt
|
1145 |
|
|
|
147 |
seed_resize_from_w: int = -1
|
148 |
seed_enable_extras: bool = True
|
149 |
sampler_name: str = None
|
150 |
+
scheduler: str = None
|
151 |
batch_size: int = 1
|
152 |
n_iter: int = 1
|
153 |
steps: int = 50
|
|
|
661 |
generation_params = {
|
662 |
"Steps": p.steps,
|
663 |
"Sampler": p.sampler_name,
|
664 |
+
"Schedule type": p.scheduler,
|
665 |
"CFG scale": p.cfg_scale,
|
666 |
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
|
667 |
"Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index],
|
|
|
730 |
if k == 'sd_vae':
|
731 |
sd_vae.reload_vae_weights()
|
732 |
|
733 |
+
sd_samplers.fix_p_invalid_sampler_and_scheduler(p)
|
734 |
+
|
735 |
res = process_images_inner(p)
|
736 |
|
737 |
finally:
|
|
|
1053 |
hr_resize_y: int = 0
|
1054 |
hr_checkpoint_name: str = None
|
1055 |
hr_sampler_name: str = None
|
1056 |
+
hr_scheduler: str = None
|
1057 |
hr_prompt: str = ''
|
1058 |
hr_negative_prompt: str = ''
|
1059 |
force_task_id: str = None
|
|
|
1145 |
if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name:
|
1146 |
self.extra_generation_params["Hires sampler"] = self.hr_sampler_name
|
1147 |
|
1148 |
+
self.extra_generation_params["Hires schedule type"] = None # to be set in sd_samplers_kdiffusion.py
|
1149 |
+
|
1150 |
+
if self.hr_scheduler is None:
|
1151 |
+
self.hr_scheduler = self.scheduler
|
1152 |
+
|
1153 |
if tuple(self.hr_prompt) != tuple(self.prompt):
|
1154 |
self.extra_generation_params["Hires prompt"] = self.hr_prompt
|
1155 |
|
modules/processing_scripts/comments.py
CHANGED
@@ -40,7 +40,7 @@ script_callbacks.on_before_token_counter(before_token_counter)
|
|
40 |
|
41 |
shared.options_templates.update(
|
42 |
shared.options_section(
|
43 |
-
("
|
44 |
-
{"enable_prompt_comments": shared.OptionInfo(True, "Enable
|
45 |
)
|
46 |
)
|
|
|
40 |
|
41 |
shared.options_templates.update(
|
42 |
shared.options_section(
|
43 |
+
("ui_alternatives", "UI Alternatives", "ui"),
|
44 |
+
{"enable_prompt_comments": shared.OptionInfo(True, "Enable Comments").info("Ignore the texts between # and the end of the line from the prompts")},
|
45 |
)
|
46 |
)
|
modules/processing_scripts/mahiro.py
CHANGED
@@ -6,6 +6,7 @@ https://github.com/comfyanonymous/ComfyUI/blob/v0.3.26/comfy_extras/nodes_mahiro
|
|
6 |
import gradio as gr
|
7 |
import torch
|
8 |
import torch.nn.functional as F
|
|
|
9 |
from modules import scripts
|
10 |
from modules.infotext_utils import PasteField
|
11 |
from modules.shared import opts
|
|
|
6 |
import gradio as gr
|
7 |
import torch
|
8 |
import torch.nn.functional as F
|
9 |
+
|
10 |
from modules import scripts
|
11 |
from modules.infotext_utils import PasteField
|
12 |
from modules.shared import opts
|
modules/processing_scripts/refiner.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
from modules import scripts, sd_models
|
3 |
from modules.infotext_utils import PasteField
|
4 |
from modules.shared import opts
|
|
|
1 |
import gradio as gr
|
2 |
+
|
3 |
from modules import scripts, sd_models
|
4 |
from modules.infotext_utils import PasteField
|
5 |
from modules.shared import opts
|
modules/processing_scripts/rescale_cfg.py
CHANGED
@@ -5,6 +5,7 @@ https://github.com/comfyanonymous/ComfyUI/blob/v0.3.7/comfy_extras/nodes_model_a
|
|
5 |
|
6 |
import gradio as gr
|
7 |
import torch
|
|
|
8 |
from modules import scripts
|
9 |
from modules.infotext_utils import PasteField
|
10 |
from modules.shared import opts
|
|
|
5 |
|
6 |
import gradio as gr
|
7 |
import torch
|
8 |
+
|
9 |
from modules import scripts
|
10 |
from modules.infotext_utils import PasteField
|
11 |
from modules.shared import opts
|
modules/processing_scripts/sampler.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
from modules import scripts, sd_samplers, sd_schedulers, shared
|
4 |
+
from modules.infotext_utils import PasteField
|
5 |
+
from modules.ui_components import FormRow
|
6 |
+
|
7 |
+
|
8 |
+
class ScriptSampler(scripts.ScriptBuiltinUI):
|
9 |
+
create_group = False
|
10 |
+
section = "sampler"
|
11 |
+
|
12 |
+
def __init__(self):
|
13 |
+
self.steps = None
|
14 |
+
self.sampler_name = None
|
15 |
+
self.scheduler = None
|
16 |
+
|
17 |
+
def title(self):
|
18 |
+
return "Sampler"
|
19 |
+
|
20 |
+
def show(self, is_img2img):
|
21 |
+
return scripts.AlwaysVisible
|
22 |
+
|
23 |
+
def ui(self, is_img2img):
|
24 |
+
sampler_names: list[str] = sd_samplers.visible_sampler_names()
|
25 |
+
scheduler_names: list[str] = [x.label for x in sd_schedulers.schedulers]
|
26 |
+
|
27 |
+
with FormRow(elem_id=f"sampler_selection_{self.tabname}"):
|
28 |
+
self.sampler_name = gr.Dropdown(
|
29 |
+
label="Sampling method",
|
30 |
+
elem_id=f"{self.tabname}_sampling",
|
31 |
+
choices=sampler_names,
|
32 |
+
value=sampler_names[0],
|
33 |
+
)
|
34 |
+
if shared.opts.show_scheduler:
|
35 |
+
self.scheduler = gr.Dropdown(
|
36 |
+
label="Schedule type",
|
37 |
+
elem_id=f"{self.tabname}_scheduler",
|
38 |
+
choices=scheduler_names,
|
39 |
+
value=scheduler_names[0],
|
40 |
+
)
|
41 |
+
else:
|
42 |
+
self.scheduler = gr.State(value="Automatic")
|
43 |
+
self.scheduler.do_not_save_to_config = True
|
44 |
+
self.steps = gr.Slider(
|
45 |
+
minimum=1,
|
46 |
+
maximum=150,
|
47 |
+
step=1,
|
48 |
+
elem_id=f"{self.tabname}_steps",
|
49 |
+
label="Sampling steps",
|
50 |
+
value=20,
|
51 |
+
)
|
52 |
+
|
53 |
+
self.infotext_fields = [
|
54 |
+
PasteField(self.steps, "Steps", api="steps"),
|
55 |
+
PasteField(self.sampler_name, sd_samplers.get_sampler_from_infotext, api="sampler_name"),
|
56 |
+
]
|
57 |
+
|
58 |
+
if shared.opts.show_scheduler:
|
59 |
+
self.infotext_fields.append(PasteField(self.scheduler, sd_samplers.get_scheduler_from_infotext, api="scheduler"))
|
60 |
+
|
61 |
+
return self.steps, self.sampler_name, self.scheduler
|
62 |
+
|
63 |
+
def setup(self, p, steps, sampler_name, scheduler):
|
64 |
+
p.steps = steps
|
65 |
+
p.sampler_name = sampler_name
|
66 |
+
p.scheduler = scheduler
|
modules/processing_scripts/seed.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import json
|
2 |
|
3 |
import gradio as gr
|
|
|
4 |
from modules import errors, infotext_utils, scripts, ui
|
5 |
from modules.infotext_utils import PasteField
|
6 |
from modules.shared import cmd_opts
|
|
|
1 |
import json
|
2 |
|
3 |
import gradio as gr
|
4 |
+
|
5 |
from modules import errors, infotext_utils, scripts, ui
|
6 |
from modules.infotext_utils import PasteField
|
7 |
from modules.shared import cmd_opts
|
modules/scripts_postprocessing.py
CHANGED
@@ -19,7 +19,6 @@ class PostprocessedImage:
|
|
19 |
self.extra_images = []
|
20 |
self.nametags = []
|
21 |
self.disable_processing = False
|
22 |
-
self.caption = None
|
23 |
|
24 |
def get_suffix(self, used_suffixes=None):
|
25 |
used_suffixes = {} if used_suffixes is None else used_suffixes
|
|
|
19 |
self.extra_images = []
|
20 |
self.nametags = []
|
21 |
self.disable_processing = False
|
|
|
22 |
|
23 |
def get_suffix(self, used_suffixes=None):
|
24 |
used_suffixes = {} if used_suffixes is None else used_suffixes
|
modules/sd_emphasis.py
CHANGED
@@ -25,12 +25,12 @@ class Emphasis:
|
|
25 |
|
26 |
class EmphasisNone(Emphasis):
|
27 |
name = "None"
|
28 |
-
description = "disable
|
29 |
|
30 |
|
31 |
class EmphasisIgnore(Emphasis):
|
32 |
name = "Ignore"
|
33 |
-
description = "treat all
|
34 |
|
35 |
|
36 |
class EmphasisOriginal(Emphasis):
|
@@ -48,7 +48,7 @@ class EmphasisOriginal(Emphasis):
|
|
48 |
|
49 |
class EmphasisOriginalNoNorm(EmphasisOriginal):
|
50 |
name = "No norm"
|
51 |
-
description = "
|
52 |
|
53 |
def after_transformers(self):
|
54 |
self.z = self.z * self.multipliers.reshape(self.multipliers.shape + (1,)).expand(self.z.shape)
|
@@ -59,7 +59,11 @@ def get_current_option(emphasis_option_name):
|
|
59 |
|
60 |
|
61 |
def get_options_descriptions():
|
62 |
-
return "
|
|
|
|
|
|
|
|
|
63 |
|
64 |
|
65 |
options = [
|
|
|
25 |
|
26 |
class EmphasisNone(Emphasis):
|
27 |
name = "None"
|
28 |
+
description = "disable Emphasis entirely and treat (:1.2) as literal characters"
|
29 |
|
30 |
|
31 |
class EmphasisIgnore(Emphasis):
|
32 |
name = "Ignore"
|
33 |
+
description = "treat all words as if they have no emphasis"
|
34 |
|
35 |
|
36 |
class EmphasisOriginal(Emphasis):
|
|
|
48 |
|
49 |
class EmphasisOriginalNoNorm(EmphasisOriginal):
|
50 |
name = "No norm"
|
51 |
+
description = "implementation without normalization (fix certain issues for SDXL)"
|
52 |
|
53 |
def after_transformers(self):
|
54 |
self.z = self.z * self.multipliers.reshape(self.multipliers.shape + (1,)).expand(self.z.shape)
|
|
|
59 |
|
60 |
|
61 |
def get_options_descriptions():
|
62 |
+
return f"""
|
63 |
+
<ul style='margin-left: 1.5em'><li>
|
64 |
+
{"</li><li>".join(f"<b>{x.name}</b>: {x.description}" for x in options)}
|
65 |
+
</li></ul>
|
66 |
+
"""
|
67 |
|
68 |
|
69 |
options = [
|
modules/sd_samplers.py
CHANGED
@@ -1,21 +1,43 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
from modules_forge import forge_alter_samplers
|
6 |
|
7 |
all_samplers = [
|
8 |
*sd_samplers_kdiffusion.samplers_data_k_diffusion,
|
9 |
*sd_samplers_timesteps.samplers_data_timesteps,
|
10 |
*sd_samplers_lcm.samplers_data_lcm,
|
11 |
-
*forge_alter_samplers.samplers_data_alter
|
12 |
]
|
13 |
all_samplers_map = {x.name: x for x in all_samplers}
|
14 |
|
15 |
-
samplers = []
|
16 |
-
samplers_for_img2img = []
|
17 |
-
samplers_map = {}
|
18 |
-
samplers_hidden = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
|
21 |
def find_sampler_config(name):
|
@@ -30,7 +52,7 @@ def find_sampler_config(name):
|
|
30 |
def create_sampler(name, model):
|
31 |
config = find_sampler_config(name)
|
32 |
|
33 |
-
assert config is not None, f
|
34 |
|
35 |
if model.is_sdxl and config.options.get("no_sdxl", False):
|
36 |
raise Exception(f"Sampler {config.name} is not supported for SDXL")
|
@@ -44,10 +66,15 @@ def create_sampler(name, model):
|
|
44 |
def set_samplers():
|
45 |
global samplers, samplers_for_img2img, samplers_hidden
|
46 |
|
47 |
-
samplers_hidden = set(shared.opts.hide_samplers)
|
48 |
samplers = all_samplers
|
49 |
samplers_for_img2img = all_samplers
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
samplers_map.clear()
|
52 |
for sampler in all_samplers:
|
53 |
samplers_map[sampler.name.lower()] = sampler.name
|
@@ -55,11 +82,73 @@ def set_samplers():
|
|
55 |
samplers_map[alias.lower()] = sampler.name
|
56 |
|
57 |
|
58 |
-
def
|
59 |
-
if
|
60 |
-
|
61 |
-
|
62 |
-
|
|
|
63 |
|
64 |
|
65 |
set_samplers()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import functools
|
2 |
+
import logging
|
3 |
+
from typing import TYPE_CHECKING
|
4 |
+
|
5 |
+
if TYPE_CHECKING:
|
6 |
+
from modules.sd_samplers_common import SamplerData
|
7 |
+
|
8 |
+
from modules import (
|
9 |
+
sd_samplers_kdiffusion,
|
10 |
+
sd_samplers_lcm,
|
11 |
+
sd_samplers_timesteps,
|
12 |
+
sd_schedulers,
|
13 |
+
shared,
|
14 |
+
)
|
15 |
+
from modules.sd_samplers_common import ( # noqa: F401
|
16 |
+
sample_to_image,
|
17 |
+
samples_to_image_grid,
|
18 |
+
)
|
19 |
from modules_forge import forge_alter_samplers
|
20 |
|
21 |
all_samplers = [
|
22 |
*sd_samplers_kdiffusion.samplers_data_k_diffusion,
|
23 |
*sd_samplers_timesteps.samplers_data_timesteps,
|
24 |
*sd_samplers_lcm.samplers_data_lcm,
|
25 |
+
*forge_alter_samplers.samplers_data_alter,
|
26 |
]
|
27 |
all_samplers_map = {x.name: x for x in all_samplers}
|
28 |
|
29 |
+
samplers: list["SamplerData"] = []
|
30 |
+
samplers_for_img2img: list["SamplerData"] = []
|
31 |
+
samplers_map: dict[str, str] = {}
|
32 |
+
samplers_hidden: set[str] = {}
|
33 |
+
|
34 |
+
|
35 |
+
def get_sampler_from_infotext(d: dict):
|
36 |
+
return get_sampler_and_scheduler(d.get("Sampler"), d.get("Schedule type"))[0]
|
37 |
+
|
38 |
+
|
39 |
+
def get_scheduler_from_infotext(d: dict):
|
40 |
+
return get_sampler_and_scheduler(d.get("Sampler"), d.get("Schedule type"))[1]
|
41 |
|
42 |
|
43 |
def find_sampler_config(name):
|
|
|
52 |
def create_sampler(name, model):
|
53 |
config = find_sampler_config(name)
|
54 |
|
55 |
+
assert config is not None, f"bad sampler name: {name}"
|
56 |
|
57 |
if model.is_sdxl and config.options.get("no_sdxl", False):
|
58 |
raise Exception(f"Sampler {config.name} is not supported for SDXL")
|
|
|
66 |
def set_samplers():
|
67 |
global samplers, samplers_for_img2img, samplers_hidden
|
68 |
|
|
|
69 |
samplers = all_samplers
|
70 |
samplers_for_img2img = all_samplers
|
71 |
|
72 |
+
_samplers_hidden = set(shared.opts.hide_samplers)
|
73 |
+
if shared.opts.hide_samplers_invert:
|
74 |
+
samplers_hidden = set(x.name for x in samplers if x.name not in _samplers_hidden)
|
75 |
+
else:
|
76 |
+
samplers_hidden = _samplers_hidden
|
77 |
+
|
78 |
samplers_map.clear()
|
79 |
for sampler in all_samplers:
|
80 |
samplers_map[sampler.name.lower()] = sampler.name
|
|
|
82 |
samplers_map[alias.lower()] = sampler.name
|
83 |
|
84 |
|
85 |
+
def visible_samplers() -> list["SamplerData"]:
|
86 |
+
return [x for x in samplers if x.name not in samplers_hidden]
|
87 |
+
|
88 |
+
|
89 |
+
def visible_sampler_names() -> list[str]:
|
90 |
+
return [x.name for x in samplers if x.name not in samplers_hidden]
|
91 |
|
92 |
|
93 |
set_samplers()
|
94 |
+
|
95 |
+
|
96 |
+
def get_hr_sampler_and_scheduler(d: dict):
|
97 |
+
hr_sampler = d.get("Hires sampler", "Use same sampler")
|
98 |
+
sampler = d.get("Sampler") if hr_sampler == "Use same sampler" else hr_sampler
|
99 |
+
|
100 |
+
hr_scheduler = d.get("Hires schedule type", "Use same scheduler")
|
101 |
+
scheduler = d.get("Schedule type") if hr_scheduler == "Use same scheduler" else hr_scheduler
|
102 |
+
|
103 |
+
sampler, scheduler = get_sampler_and_scheduler(sampler, scheduler)
|
104 |
+
|
105 |
+
sampler = sampler if sampler != d.get("Sampler") else "Use same sampler"
|
106 |
+
scheduler = scheduler if scheduler != d.get("Schedule type") else "Use same scheduler"
|
107 |
+
|
108 |
+
return sampler, scheduler
|
109 |
+
|
110 |
+
|
111 |
+
def get_hr_sampler_from_infotext(d: dict):
|
112 |
+
return get_hr_sampler_and_scheduler(d)[0]
|
113 |
+
|
114 |
+
|
115 |
+
def get_hr_scheduler_from_infotext(d: dict):
|
116 |
+
return get_hr_sampler_and_scheduler(d)[1]
|
117 |
+
|
118 |
+
|
119 |
+
@functools.lru_cache(maxsize=10, typed=False)
|
120 |
+
def get_sampler_and_scheduler(sampler_name: str, scheduler_name: str, *, status: bool = False):
|
121 |
+
default_sampler = samplers[0]
|
122 |
+
found_scheduler = sd_schedulers.schedulers_map.get(scheduler_name, sd_schedulers.schedulers[0])
|
123 |
+
|
124 |
+
name = sampler_name or default_sampler.name
|
125 |
+
|
126 |
+
for scheduler in sd_schedulers.schedulers:
|
127 |
+
name_options = [scheduler.label, scheduler.name, *(scheduler.aliases or [])]
|
128 |
+
|
129 |
+
for name_option in name_options:
|
130 |
+
if name.endswith(" " + name_option):
|
131 |
+
found_scheduler = scheduler
|
132 |
+
name = name[0 : -(len(name_option) + 1)]
|
133 |
+
break
|
134 |
+
|
135 |
+
sampler = all_samplers_map.get(name, default_sampler)
|
136 |
+
|
137 |
+
_automatic = False
|
138 |
+
if sampler.options.get("scheduler", None) == found_scheduler.name:
|
139 |
+
found_scheduler = sd_schedulers.schedulers[0]
|
140 |
+
_automatic = True
|
141 |
+
|
142 |
+
if not status:
|
143 |
+
return sampler.name, found_scheduler.label
|
144 |
+
else:
|
145 |
+
return sampler.name, found_scheduler.label, _automatic
|
146 |
+
|
147 |
+
|
148 |
+
def fix_p_invalid_sampler_and_scheduler(p):
|
149 |
+
i_sampler_name, i_scheduler = p.sampler_name, p.scheduler
|
150 |
+
p.sampler_name, p.scheduler, _automatic = get_sampler_and_scheduler(p.sampler_name, p.scheduler, status=True)
|
151 |
+
if i_sampler_name != p.sampler_name:
|
152 |
+
logging.warning(f'Sampler Correction: "{i_sampler_name}" -> "{p.sampler_name}"')
|
153 |
+
if i_scheduler != p.scheduler and not _automatic:
|
154 |
+
logging.warning(f'Scheduler Correction: "{i_scheduler}" -> "{p.scheduler}"')
|
modules/sd_samplers_cfg_denoiser.py
CHANGED
@@ -1,14 +1,14 @@
|
|
1 |
import torch
|
2 |
from modules import prompt_parser, sd_samplers_common
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
4 |
from modules.shared import opts, state
|
5 |
from modules_forge import forge_sampler
|
6 |
|
7 |
-
from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
|
8 |
-
from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback
|
9 |
-
|
10 |
-
# from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback
|
11 |
-
|
12 |
|
13 |
def catenate_conds(conds):
|
14 |
if not isinstance(conds[0], dict):
|
@@ -53,37 +53,29 @@ class CFGDenoiser(torch.nn.Module):
|
|
53 |
"""expected number of calls to denoiser calculated from self.steps and specifics of the selected sampler"""
|
54 |
|
55 |
self.step = 0
|
56 |
-
self.image_cfg_scale =
|
57 |
-
self.padded_cond_uncond =
|
58 |
self.padded_cond_uncond_v0 = False
|
59 |
self.sampler = sampler
|
60 |
self.model_wrap = None
|
61 |
self.p = None
|
62 |
|
63 |
-
# Backward Compatibility
|
64 |
self.mask_before_denoising = False
|
65 |
-
|
66 |
self.classic_ddim_eps_estimation = False
|
67 |
|
68 |
@property
|
69 |
def inner_model(self):
|
70 |
-
raise NotImplementedError
|
71 |
-
|
72 |
-
def combine_denoised(self, x_out, conds_list, uncond, cond_scale, timestep, x_in, cond):
|
73 |
-
denoised_uncond = x_out[-uncond.shape[0] :]
|
74 |
-
denoised = torch.clone(denoised_uncond)
|
75 |
-
|
76 |
-
for i, conds in enumerate(conds_list):
|
77 |
-
for cond_index, weight in conds:
|
78 |
-
denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale)
|
79 |
|
80 |
-
|
|
|
|
|
|
|
81 |
|
82 |
-
def combine_denoised_for_edit_model(self, x_out, cond_scale):
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
return denoised
|
87 |
|
88 |
def get_pred_x0(self, x_in, x_out, sigma):
|
89 |
return x_out
|
@@ -95,7 +87,7 @@ class CFGDenoiser(torch.nn.Module):
|
|
95 |
self.sampler.sampler_extra_args["cond"] = c
|
96 |
self.sampler.sampler_extra_args["uncond"] = uc
|
97 |
|
98 |
-
def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond):
|
99 |
if state.interrupted or state.skipped:
|
100 |
raise sd_samplers_common.InterruptedException
|
101 |
|
@@ -148,7 +140,16 @@ class CFGDenoiser(torch.nn.Module):
|
|
148 |
] * torch.randn_like(self.init_latent)
|
149 |
x = apply_blend(x, noisy_initial_latent.to(self.init_latent))
|
150 |
|
151 |
-
denoiser_params = CFGDenoiserParams(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
cfg_denoiser_callback(denoiser_params)
|
153 |
|
154 |
if 0.0 <= self.step / self.total_steps <= opts.skip_early_cond:
|
@@ -156,13 +157,23 @@ class CFGDenoiser(torch.nn.Module):
|
|
156 |
if 0.0 <= sigma[0] <= s_min_uncond:
|
157 |
cond_scale = 1.0
|
158 |
|
|
|
|
|
|
|
159 |
denoised = forge_sampler.forge_sample(
|
160 |
self,
|
161 |
denoiser_params=denoiser_params,
|
162 |
cond_scale=cond_scale,
|
163 |
cond_composition=cond_composition,
|
|
|
|
|
164 |
)
|
165 |
|
|
|
|
|
|
|
|
|
|
|
166 |
if not self.mask_before_denoising and self.mask is not None:
|
167 |
denoised = apply_blend(denoised)
|
168 |
|
|
|
1 |
import torch
|
2 |
from modules import prompt_parser, sd_samplers_common
|
3 |
+
from modules.script_callbacks import (
|
4 |
+
AfterCFGCallbackParams,
|
5 |
+
CFGDenoiserParams,
|
6 |
+
cfg_after_cfg_callback,
|
7 |
+
cfg_denoiser_callback,
|
8 |
+
)
|
9 |
from modules.shared import opts, state
|
10 |
from modules_forge import forge_sampler
|
11 |
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
def catenate_conds(conds):
|
14 |
if not isinstance(conds[0], dict):
|
|
|
53 |
"""expected number of calls to denoiser calculated from self.steps and specifics of the selected sampler"""
|
54 |
|
55 |
self.step = 0
|
56 |
+
self.image_cfg_scale = 1.0
|
57 |
+
self.padded_cond_uncond = True
|
58 |
self.padded_cond_uncond_v0 = False
|
59 |
self.sampler = sampler
|
60 |
self.model_wrap = None
|
61 |
self.p = None
|
62 |
|
|
|
63 |
self.mask_before_denoising = False
|
|
|
64 |
self.classic_ddim_eps_estimation = False
|
65 |
|
66 |
@property
|
67 |
def inner_model(self):
|
68 |
+
raise NotImplementedError
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
|
70 |
+
# def combine_denoised(self, x_out, conds_list, uncond, cond_scale):
|
71 |
+
# denoised_uncond = x_out[-uncond.shape[0] :]
|
72 |
+
# denoised = torch.clone(denoised_uncond)
|
73 |
+
# return denoised
|
74 |
|
75 |
+
# def combine_denoised_for_edit_model(self, x_out, cond_scale):
|
76 |
+
# out_cond, out_img_cond, out_uncond = x_out.chunk(3)
|
77 |
+
# denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond)
|
78 |
+
# return denoised
|
|
|
79 |
|
80 |
def get_pred_x0(self, x_in, x_out, sigma):
|
81 |
return x_out
|
|
|
87 |
self.sampler.sampler_extra_args["cond"] = c
|
88 |
self.sampler.sampler_extra_args["uncond"] = uc
|
89 |
|
90 |
+
def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond, **kwargs):
|
91 |
if state.interrupted or state.skipped:
|
92 |
raise sd_samplers_common.InterruptedException
|
93 |
|
|
|
140 |
] * torch.randn_like(self.init_latent)
|
141 |
x = apply_blend(x, noisy_initial_latent.to(self.init_latent))
|
142 |
|
143 |
+
denoiser_params = CFGDenoiserParams(
|
144 |
+
x,
|
145 |
+
image_cond,
|
146 |
+
sigma,
|
147 |
+
state.sampling_step,
|
148 |
+
state.sampling_steps,
|
149 |
+
cond,
|
150 |
+
uncond,
|
151 |
+
self,
|
152 |
+
)
|
153 |
cfg_denoiser_callback(denoiser_params)
|
154 |
|
155 |
if 0.0 <= self.step / self.total_steps <= opts.skip_early_cond:
|
|
|
157 |
if 0.0 <= sigma[0] <= s_min_uncond:
|
158 |
cond_scale = 1.0
|
159 |
|
160 |
+
skip_uncond: bool = abs(cond_scale - 1.0) < 10**-6
|
161 |
+
self.padded_cond_uncond = not skip_uncond
|
162 |
+
|
163 |
denoised = forge_sampler.forge_sample(
|
164 |
self,
|
165 |
denoiser_params=denoiser_params,
|
166 |
cond_scale=cond_scale,
|
167 |
cond_composition=cond_composition,
|
168 |
+
skip_uncond=skip_uncond,
|
169 |
+
options=kwargs.get("model_options", None),
|
170 |
)
|
171 |
|
172 |
+
# if getattr(self.p.sd_model, "cond_stage_key", None) == "edit" and getattr(self, "image_cfg_scale", 1.0) != 1.0:
|
173 |
+
# denoised = self.combine_denoised_for_edit_model(denoised, cond_scale)
|
174 |
+
# elif not skip_uncond:
|
175 |
+
# denoised = self.combine_denoised(denoised, cond_composition, uncond, cond_scale)
|
176 |
+
|
177 |
if not self.mask_before_denoising and self.mask is not None:
|
178 |
denoised = apply_blend(denoised)
|
179 |
|
modules/sd_samplers_cfgpp.py
ADDED
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from k_diffusion.sampling import (
|
3 |
+
BrownianTreeNoiseSampler,
|
4 |
+
default_noise_sampler,
|
5 |
+
get_ancestral_step,
|
6 |
+
to_d,
|
7 |
+
)
|
8 |
+
from tqdm.auto import trange
|
9 |
+
|
10 |
+
|
11 |
+
def _set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=False):
|
12 |
+
model_options["sampler_post_cfg_function"] = model_options.get("sampler_post_cfg_function", []) + [post_cfg_function]
|
13 |
+
if disable_cfg1_optimization:
|
14 |
+
model_options["disable_cfg1_optimization"] = True
|
15 |
+
return model_options
|
16 |
+
|
17 |
+
|
18 |
+
def _sigma_fn(t):
|
19 |
+
return t.neg().exp()
|
20 |
+
|
21 |
+
|
22 |
+
def _t_fn(sigma):
|
23 |
+
return sigma.log().neg()
|
24 |
+
|
25 |
+
|
26 |
+
@torch.no_grad()
|
27 |
+
def sample_euler_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None):
|
28 |
+
extra_args = {} if extra_args is None else extra_args
|
29 |
+
|
30 |
+
temp = [0]
|
31 |
+
|
32 |
+
def post_cfg_function(args):
|
33 |
+
temp[0] = args["uncond_denoised"]
|
34 |
+
return args["denoised"]
|
35 |
+
|
36 |
+
model_options = extra_args.get("model_options", {}).copy()
|
37 |
+
extra_args["model_options"] = _set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
|
38 |
+
|
39 |
+
s_in = x.new_ones([x.shape[0]])
|
40 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
41 |
+
sigma_hat = sigmas[i]
|
42 |
+
denoised = model(x, sigma_hat * s_in, **extra_args)
|
43 |
+
d = to_d(x, sigma_hat, temp[0])
|
44 |
+
if callback is not None:
|
45 |
+
callback(
|
46 |
+
{
|
47 |
+
"x": x,
|
48 |
+
"i": i,
|
49 |
+
"sigma": sigmas[i],
|
50 |
+
"sigma_hat": sigma_hat,
|
51 |
+
"denoised": denoised,
|
52 |
+
}
|
53 |
+
)
|
54 |
+
x = denoised + d * sigmas[i + 1]
|
55 |
+
return x
|
56 |
+
|
57 |
+
|
58 |
+
@torch.no_grad()
|
59 |
+
def sample_euler_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
|
60 |
+
eta = 1.0
|
61 |
+
s_noise = 1.0
|
62 |
+
extra_args = {} if extra_args is None else extra_args
|
63 |
+
seed = extra_args.get("seed", None)
|
64 |
+
noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
|
65 |
+
|
66 |
+
temp = [0]
|
67 |
+
|
68 |
+
def post_cfg_function(args):
|
69 |
+
temp[0] = args["uncond_denoised"]
|
70 |
+
return args["denoised"]
|
71 |
+
|
72 |
+
model_options = extra_args.get("model_options", {}).copy()
|
73 |
+
extra_args["model_options"] = _set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
|
74 |
+
|
75 |
+
s_in = x.new_ones([x.shape[0]])
|
76 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
77 |
+
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
78 |
+
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
|
79 |
+
if callback is not None:
|
80 |
+
callback(
|
81 |
+
{
|
82 |
+
"x": x,
|
83 |
+
"i": i,
|
84 |
+
"sigma": sigmas[i],
|
85 |
+
"sigma_hat": sigmas[i],
|
86 |
+
"denoised": denoised,
|
87 |
+
}
|
88 |
+
)
|
89 |
+
d = to_d(x, sigmas[i], temp[0])
|
90 |
+
x = denoised + d * sigma_down
|
91 |
+
if sigmas[i + 1] > 0:
|
92 |
+
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
|
93 |
+
return x
|
94 |
+
|
95 |
+
|
96 |
+
@torch.no_grad()
|
97 |
+
def sample_dpmpp_sde_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
|
98 |
+
eta = 1.0
|
99 |
+
s_noise = 1.0
|
100 |
+
r = 0.5
|
101 |
+
|
102 |
+
if len(sigmas) <= 1:
|
103 |
+
return x
|
104 |
+
|
105 |
+
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
106 |
+
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=True) if noise_sampler is None else noise_sampler
|
107 |
+
extra_args = {} if extra_args is None else extra_args
|
108 |
+
|
109 |
+
temp = [0]
|
110 |
+
|
111 |
+
def post_cfg_function(args):
|
112 |
+
temp[0] = args["uncond_denoised"]
|
113 |
+
return args["denoised"]
|
114 |
+
|
115 |
+
model_options = extra_args.get("model_options", {}).copy()
|
116 |
+
extra_args["model_options"] = _set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
|
117 |
+
|
118 |
+
s_in = x.new_ones([x.shape[0]])
|
119 |
+
|
120 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
121 |
+
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
122 |
+
if callback is not None:
|
123 |
+
callback(
|
124 |
+
{
|
125 |
+
"x": x,
|
126 |
+
"i": i,
|
127 |
+
"sigma": sigmas[i],
|
128 |
+
"sigma_hat": sigmas[i],
|
129 |
+
"denoised": denoised,
|
130 |
+
}
|
131 |
+
)
|
132 |
+
|
133 |
+
if sigmas[i + 1] == 0:
|
134 |
+
d = to_d(x, sigmas[i], temp[0])
|
135 |
+
x = denoised + d * sigmas[i + 1]
|
136 |
+
else:
|
137 |
+
t, t_next = _t_fn(sigmas[i]), _t_fn(sigmas[i + 1])
|
138 |
+
h = t_next - t
|
139 |
+
s = t + h * r
|
140 |
+
fac = 1 / (2 * r)
|
141 |
+
|
142 |
+
sd, su = get_ancestral_step(_sigma_fn(t), _sigma_fn(s), eta)
|
143 |
+
s_ = _t_fn(sd)
|
144 |
+
x_2 = (_sigma_fn(s_) / _sigma_fn(t)) * x - (t - s_).expm1() * denoised
|
145 |
+
x_2 = x_2 + noise_sampler(_sigma_fn(t), _sigma_fn(s)) * s_noise * su
|
146 |
+
denoised_2 = model(x_2, _sigma_fn(s) * s_in, **extra_args)
|
147 |
+
|
148 |
+
sd, su = get_ancestral_step(_sigma_fn(t), _sigma_fn(t_next), eta)
|
149 |
+
denoised_d = (1 - fac) * temp[0] + fac * temp[0]
|
150 |
+
x = denoised_2 + to_d(x, sigmas[i], denoised_d) * sd
|
151 |
+
x = x + noise_sampler(_sigma_fn(t), _sigma_fn(t_next)) * s_noise * su
|
152 |
+
return x
|
153 |
+
|
154 |
+
|
155 |
+
@torch.no_grad()
|
156 |
+
def sample_dpmpp_2m_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None):
|
157 |
+
extra_args = {} if extra_args is None else extra_args
|
158 |
+
s_in = x.new_ones([x.shape[0]])
|
159 |
+
|
160 |
+
old_uncond_denoised = None
|
161 |
+
uncond_denoised = None
|
162 |
+
|
163 |
+
def post_cfg_function(args):
|
164 |
+
nonlocal uncond_denoised
|
165 |
+
uncond_denoised = args["uncond_denoised"]
|
166 |
+
return args["denoised"]
|
167 |
+
|
168 |
+
model_options = extra_args.get("model_options", {}).copy()
|
169 |
+
extra_args["model_options"] = _set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
|
170 |
+
|
171 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
172 |
+
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
173 |
+
if callback is not None:
|
174 |
+
callback(
|
175 |
+
{
|
176 |
+
"x": x,
|
177 |
+
"i": i,
|
178 |
+
"sigma": sigmas[i],
|
179 |
+
"sigma_hat": sigmas[i],
|
180 |
+
"denoised": denoised,
|
181 |
+
}
|
182 |
+
)
|
183 |
+
t, t_next = _t_fn(sigmas[i]), _t_fn(sigmas[i + 1])
|
184 |
+
h = t_next - t
|
185 |
+
if old_uncond_denoised is None or sigmas[i + 1] == 0:
|
186 |
+
denoised_mix = -torch.exp(-h) * uncond_denoised
|
187 |
+
else:
|
188 |
+
h_last = t - _t_fn(sigmas[i - 1])
|
189 |
+
r = h_last / h
|
190 |
+
denoised_mix = -torch.exp(-h) * uncond_denoised - torch.expm1(-h) * (1 / (2 * r)) * (denoised - old_uncond_denoised)
|
191 |
+
x = denoised + denoised_mix + torch.exp(-h) * x
|
192 |
+
old_uncond_denoised = uncond_denoised
|
193 |
+
return x
|
194 |
+
|
195 |
+
|
196 |
+
@torch.no_grad()
|
197 |
+
def sample_dpmpp_3m_sde_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=None, s_noise=None, noise_sampler=None):
|
198 |
+
eta = 1.0 if eta is None else eta
|
199 |
+
s_noise = 1.0 if s_noise is None else s_noise
|
200 |
+
|
201 |
+
if len(sigmas) <= 1:
|
202 |
+
return x
|
203 |
+
|
204 |
+
seed = extra_args.get("seed", None)
|
205 |
+
sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
|
206 |
+
noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler
|
207 |
+
extra_args = {} if extra_args is None else extra_args
|
208 |
+
s_in = x.new_ones([x.shape[0]])
|
209 |
+
|
210 |
+
denoised_1, denoised_2 = None, None
|
211 |
+
h, h_1, h_2 = None, None, None
|
212 |
+
|
213 |
+
temp = [0]
|
214 |
+
|
215 |
+
def post_cfg_function(args):
|
216 |
+
temp[0] = args["uncond_denoised"]
|
217 |
+
return args["denoised"]
|
218 |
+
|
219 |
+
model_options = extra_args.get("model_options", {}).copy()
|
220 |
+
extra_args["model_options"] = _set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
|
221 |
+
|
222 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
223 |
+
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
224 |
+
if callback is not None:
|
225 |
+
callback(
|
226 |
+
{
|
227 |
+
"x": x,
|
228 |
+
"i": i,
|
229 |
+
"sigma": sigmas[i],
|
230 |
+
"sigma_hat": sigmas[i],
|
231 |
+
"denoised": denoised,
|
232 |
+
}
|
233 |
+
)
|
234 |
+
if sigmas[i + 1] == 0:
|
235 |
+
x = denoised
|
236 |
+
else:
|
237 |
+
t, s = -sigmas[i].log(), -sigmas[i + 1].log()
|
238 |
+
h = s - t
|
239 |
+
h_eta = h * (eta + 1)
|
240 |
+
|
241 |
+
x = torch.exp(-h_eta) * (x + (denoised - temp[0])) + (-h_eta).expm1().neg() * denoised
|
242 |
+
|
243 |
+
if h_2 is not None:
|
244 |
+
r0 = h_1 / h
|
245 |
+
r1 = h_2 / h
|
246 |
+
d1_0 = (denoised - denoised_1) / r0
|
247 |
+
d1_1 = (denoised_1 - denoised_2) / r1
|
248 |
+
d1 = d1_0 + (d1_0 - d1_1) * r0 / (r0 + r1)
|
249 |
+
d2 = (d1_0 - d1_1) / (r0 + r1)
|
250 |
+
phi_2 = h_eta.neg().expm1() / h_eta + 1
|
251 |
+
phi_3 = phi_2 / h_eta - 0.5
|
252 |
+
x = x + phi_2 * d1 - phi_3 * d2
|
253 |
+
elif h_1 is not None:
|
254 |
+
r = h_1 / h
|
255 |
+
d = (denoised - denoised_1) / r
|
256 |
+
phi_2 = h_eta.neg().expm1() / h_eta + 1
|
257 |
+
x = x + phi_2 * d
|
258 |
+
|
259 |
+
if eta:
|
260 |
+
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * h * eta).expm1().neg().sqrt() * s_noise
|
261 |
+
|
262 |
+
denoised_1, denoised_2 = denoised, denoised_1
|
263 |
+
h_1, h_2 = h, h_1
|
264 |
+
return x
|