WhiteAiZ commited on
Commit
0070fce
·
verified ·
1 Parent(s): cf7c8c6

Upload 1420 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. .gitignore +44 -0
  3. LICENSE +623 -0
  4. README.md +424 -0
  5. configs/sd_xl_base.yaml +98 -0
  6. configs/sd_xl_inpaint.yaml +98 -0
  7. configs/sd_xl_refiner.yaml +91 -0
  8. configs/sd_xl_v.yaml +98 -0
  9. configs/v1-inference.yaml +69 -0
  10. configs/v1-inpainting-inference.yaml +69 -0
  11. environment-wsl2.yaml +11 -0
  12. extensions-builtin/Lora/extra_networks_lora.py +67 -0
  13. extensions-builtin/Lora/lora.py +9 -0
  14. extensions-builtin/Lora/network.py +198 -0
  15. extensions-builtin/Lora/networks.py +150 -0
  16. extensions-builtin/Lora/preload.py +13 -0
  17. extensions-builtin/Lora/scripts/lora_script.py +83 -0
  18. extensions-builtin/Lora/ui_edit_user_metadata.py +217 -0
  19. extensions-builtin/Lora/ui_extra_networks_lora.py +94 -0
  20. extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +940 -0
  21. extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py +17 -0
  22. extensions-builtin/canvas-zoom-and-pan/style.css +71 -0
  23. extensions-builtin/extra-options-section/scripts/extra_options_section.py +124 -0
  24. extensions-builtin/forge_legacy_preprocessors/.gitignore +185 -0
  25. extensions-builtin/forge_legacy_preprocessors/LICENSE +674 -0
  26. extensions-builtin/forge_legacy_preprocessors/annotator/anime_face_segment/LICENSE +21 -0
  27. extensions-builtin/forge_legacy_preprocessors/annotator/anime_face_segment/__init__.py +186 -0
  28. extensions-builtin/forge_legacy_preprocessors/annotator/annotator_path.py +17 -0
  29. extensions-builtin/forge_legacy_preprocessors/annotator/binary/__init__.py +16 -0
  30. extensions-builtin/forge_legacy_preprocessors/annotator/canny/__init__.py +5 -0
  31. extensions-builtin/forge_legacy_preprocessors/annotator/color/__init__.py +24 -0
  32. extensions-builtin/forge_legacy_preprocessors/annotator/densepose/__init__.py +80 -0
  33. extensions-builtin/forge_legacy_preprocessors/annotator/densepose/densepose.py +361 -0
  34. extensions-builtin/forge_legacy_preprocessors/annotator/depth_anything.py +81 -0
  35. extensions-builtin/forge_legacy_preprocessors/annotator/hed/__init__.py +137 -0
  36. extensions-builtin/forge_legacy_preprocessors/annotator/keypose/__init__.py +279 -0
  37. extensions-builtin/forge_legacy_preprocessors/annotator/keypose/faster_rcnn_r50_fpn_coco.py +198 -0
  38. extensions-builtin/forge_legacy_preprocessors/annotator/keypose/hrnet_w48_coco_256x192.py +181 -0
  39. extensions-builtin/forge_legacy_preprocessors/annotator/leres/__init__.py +124 -0
  40. extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/LICENSE +23 -0
  41. extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/Resnet.py +205 -0
  42. extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/Resnext_torch.py +291 -0
  43. extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/depthmap.py +666 -0
  44. extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/multi_depth_model_woauxi.py +35 -0
  45. extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/net_tools.py +60 -0
  46. extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/network_auxi.py +509 -0
  47. extensions-builtin/forge_legacy_preprocessors/annotator/leres/pix2pix/LICENSE +19 -0
  48. extensions-builtin/forge_legacy_preprocessors/annotator/leres/pix2pix/models/__init__.py +69 -0
  49. extensions-builtin/forge_legacy_preprocessors/annotator/leres/pix2pix/models/base_model.py +263 -0
  50. extensions-builtin/forge_legacy_preprocessors/annotator/leres/pix2pix/models/base_model_hg.py +59 -0
.gitattributes CHANGED
@@ -57,3 +57,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ extensions/sd-webui-infinite-image-browsing/vue/src-tauri/icons/icon.icns filter=lfs diff=lfs merge=lfs -text
61
+ modules/Roboto-Regular.ttf filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ .idea
3
+ .venv
4
+ .vscode
5
+ .vs
6
+
7
+ **.ckpt
8
+ **.bin
9
+ **.sft
10
+ **.safetensors
11
+ **.pth
12
+ **.pt
13
+ **.sh
14
+
15
+ /cache
16
+ /extensions
17
+ /models/**/*
18
+ /output
19
+ /outputs
20
+ /repositories
21
+ /venv
22
+ /log
23
+ /tmp
24
+
25
+ /cache.json
26
+ /config.json
27
+ /ui-config.json
28
+ /styles.csv
29
+ /styles.csv.bak
30
+ /params.txt
31
+ /webui-user.bat
32
+ /webui-user.sh
33
+ /user.css
34
+ notification.mp3
35
+
36
+ /config_states
37
+ /node_modules
38
+ /package-lock.json
39
+
40
+ **.exp
41
+ **.lib
42
+ **.obj
43
+
44
+ *.*.*
LICENSE ADDED
@@ -0,0 +1,623 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU AFFERO GENERAL PUBLIC LICENSE
2
+ Version 3, 19 November 2007
3
+
4
+ Copyright (c) 2023 AUTOMATIC1111
5
+ Copyright (c) 2024 lllyasviel
6
+ Copyright (c) 2025 Haoming02
7
+
8
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
9
+ Everyone is permitted to copy and distribute verbatim copies
10
+ of this license document, but changing it is not allowed.
11
+
12
+ Preamble
13
+
14
+ The GNU Affero General Public License is a free, copyleft license for
15
+ software and other kinds of works, specifically designed to ensure
16
+ cooperation with the community in the case of network server software.
17
+
18
+ The licenses for most software and other practical works are designed
19
+ to take away your freedom to share and change the works. By contrast,
20
+ our General Public Licenses are intended to guarantee your freedom to
21
+ share and change all versions of a program--to make sure it remains free
22
+ software for all its users.
23
+
24
+ When we speak of free software, we are referring to freedom, not
25
+ price. Our General Public Licenses are designed to make sure that you
26
+ have the freedom to distribute copies of free software (and charge for
27
+ them if you wish), that you receive source code or can get it if you
28
+ want it, that you can change the software or use pieces of it in new
29
+ free programs, and that you know you can do these things.
30
+
31
+ Developers that use our General Public Licenses protect your rights
32
+ with two steps: (1) assert copyright on the software, and (2) offer
33
+ you this License which gives you legal permission to copy, distribute
34
+ and/or modify the software.
35
+
36
+ A secondary benefit of defending all users' freedom is that
37
+ improvements made in alternate versions of the program, if they
38
+ receive widespread use, become available for other developers to
39
+ incorporate. Many developers of free software are heartened and
40
+ encouraged by the resulting cooperation. However, in the case of
41
+ software used on network servers, this result may fail to come about.
42
+ The GNU General Public License permits making a modified version and
43
+ letting the public access it on a server without ever releasing its
44
+ source code to the public.
45
+
46
+ The GNU Affero General Public License is designed specifically to
47
+ ensure that, in such cases, the modified source code becomes available
48
+ to the community. It requires the operator of a network server to
49
+ provide the source code of the modified version running there to the
50
+ users of that server. Therefore, public use of a modified version, on
51
+ a publicly accessible server, gives the public access to the source
52
+ code of the modified version.
53
+
54
+ An older license, called the Affero General Public License and
55
+ published by Affero, was designed to accomplish similar goals. This is
56
+ a different license, not a version of the Affero GPL, but Affero has
57
+ released a new version of the Affero GPL which permits relicensing under
58
+ this license.
59
+
60
+ The precise terms and conditions for copying, distribution and
61
+ modification follow.
62
+
63
+ TERMS AND CONDITIONS
64
+
65
+ 0. Definitions.
66
+
67
+ "This License" refers to version 3 of the GNU Affero General Public License.
68
+
69
+ "Copyright" also means copyright-like laws that apply to other kinds of
70
+ works, such as semiconductor masks.
71
+
72
+ "The Program" refers to any copyrightable work licensed under this
73
+ License. Each licensee is addressed as "you". "Licensees" and
74
+ "recipients" may be individuals or organizations.
75
+
76
+ To "modify" a work means to copy from or adapt all or part of the work
77
+ in a fashion requiring copyright permission, other than the making of an
78
+ exact copy. The resulting work is called a "modified version" of the
79
+ earlier work or a work "based on" the earlier work.
80
+
81
+ A "covered work" means either the unmodified Program or a work based
82
+ on the Program.
83
+
84
+ To "propagate" a work means to do anything with it that, without
85
+ permission, would make you directly or secondarily liable for
86
+ infringement under applicable copyright law, except executing it on a
87
+ computer or modifying a private copy. Propagation includes copying,
88
+ distribution (with or without modification), making available to the
89
+ public, and in some countries other activities as well.
90
+
91
+ To "convey" a work means any kind of propagation that enables other
92
+ parties to make or receive copies. Mere interaction with a user through
93
+ a computer network, with no transfer of a copy, is not conveying.
94
+
95
+ An interactive user interface displays "Appropriate Legal Notices"
96
+ to the extent that it includes a convenient and prominently visible
97
+ feature that (1) displays an appropriate copyright notice, and (2)
98
+ tells the user that there is no warranty for the work (except to the
99
+ extent that warranties are provided), that licensees may convey the
100
+ work under this License, and how to view a copy of this License. If
101
+ the interface presents a list of user commands or options, such as a
102
+ menu, a prominent item in the list meets this criterion.
103
+
104
+ 1. Source Code.
105
+
106
+ The "source code" for a work means the preferred form of the work
107
+ for making modifications to it. "Object code" means any non-source
108
+ form of a work.
109
+
110
+ A "Standard Interface" means an interface that either is an official
111
+ standard defined by a recognized standards body, or, in the case of
112
+ interfaces specified for a particular programming language, one that
113
+ is widely used among developers working in that language.
114
+
115
+ The "System Libraries" of an executable work include anything, other
116
+ than the work as a whole, that (a) is included in the normal form of
117
+ packaging a Major Component, but which is not part of that Major
118
+ Component, and (b) serves only to enable use of the work with that
119
+ Major Component, or to implement a Standard Interface for which an
120
+ implementation is available to the public in source code form. A
121
+ "Major Component", in this context, means a major essential component
122
+ (kernel, window system, and so on) of the specific operating system
123
+ (if any) on which the executable work runs, or a compiler used to
124
+ produce the work, or an object code interpreter used to run it.
125
+
126
+ The "Corresponding Source" for a work in object code form means all
127
+ the source code needed to generate, install, and (for an executable
128
+ work) run the object code and to modify the work, including scripts to
129
+ control those activities. However, it does not include the work's
130
+ System Libraries, or general-purpose tools or generally available free
131
+ programs which are used unmodified in performing those activities but
132
+ which are not part of the work. For example, Corresponding Source
133
+ includes interface definition files associated with source files for
134
+ the work, and the source code for shared libraries and dynamically
135
+ linked subprograms that the work is specifically designed to require,
136
+ such as by intimate data communication or control flow between those
137
+ subprograms and other parts of the work.
138
+
139
+ The Corresponding Source need not include anything that users
140
+ can regenerate automatically from other parts of the Corresponding
141
+ Source.
142
+
143
+ The Corresponding Source for a work in source code form is that
144
+ same work.
145
+
146
+ 2. Basic Permissions.
147
+
148
+ All rights granted under this License are granted for the term of
149
+ copyright on the Program, and are irrevocable provided the stated
150
+ conditions are met. This License explicitly affirms your unlimited
151
+ permission to run the unmodified Program. The output from running a
152
+ covered work is covered by this License only if the output, given its
153
+ content, constitutes a covered work. This License acknowledges your
154
+ rights of fair use or other equivalent, as provided by copyright law.
155
+
156
+ You may make, run and propagate covered works that you do not
157
+ convey, without conditions so long as your license otherwise remains
158
+ in force. You may convey covered works to others for the sole purpose
159
+ of having them make modifications exclusively for you, or provide you
160
+ with facilities for running those works, provided that you comply with
161
+ the terms of this License in conveying all material for which you do
162
+ not control copyright. Those thus making or running the covered works
163
+ for you must do so exclusively on your behalf, under your direction
164
+ and control, on terms that prohibit them from making any copies of
165
+ your copyrighted material outside their relationship with you.
166
+
167
+ Conveying under any other circumstances is permitted solely under
168
+ the conditions stated below. Sublicensing is not allowed; section 10
169
+ makes it unnecessary.
170
+
171
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
172
+
173
+ No covered work shall be deemed part of an effective technological
174
+ measure under any applicable law fulfilling obligations under article
175
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
176
+ similar laws prohibiting or restricting circumvention of such
177
+ measures.
178
+
179
+ When you convey a covered work, you waive any legal power to forbid
180
+ circumvention of technological measures to the extent such circumvention
181
+ is effected by exercising rights under this License with respect to
182
+ the covered work, and you disclaim any intention to limit operation or
183
+ modification of the work as a means of enforcing, against the work's
184
+ users, your or third parties' legal rights to forbid circumvention of
185
+ technological measures.
186
+
187
+ 4. Conveying Verbatim Copies.
188
+
189
+ You may convey verbatim copies of the Program's source code as you
190
+ receive it, in any medium, provided that you conspicuously and
191
+ appropriately publish on each copy an appropriate copyright notice;
192
+ keep intact all notices stating that this License and any
193
+ non-permissive terms added in accord with section 7 apply to the code;
194
+ keep intact all notices of the absence of any warranty; and give all
195
+ recipients a copy of this License along with the Program.
196
+
197
+ You may charge any price or no price for each copy that you convey,
198
+ and you may offer support or warranty protection for a fee.
199
+
200
+ 5. Conveying Modified Source Versions.
201
+
202
+ You may convey a work based on the Program, or the modifications to
203
+ produce it from the Program, in the form of source code under the
204
+ terms of section 4, provided that you also meet all of these conditions:
205
+
206
+ a) The work must carry prominent notices stating that you modified
207
+ it, and giving a relevant date.
208
+
209
+ b) The work must carry prominent notices stating that it is
210
+ released under this License and any conditions added under section
211
+ 7. This requirement modifies the requirement in section 4 to
212
+ "keep intact all notices".
213
+
214
+ c) You must license the entire work, as a whole, under this
215
+ License to anyone who comes into possession of a copy. This
216
+ License will therefore apply, along with any applicable section 7
217
+ additional terms, to the whole of the work, and all its parts,
218
+ regardless of how they are packaged. This License gives no
219
+ permission to license the work in any other way, but it does not
220
+ invalidate such permission if you have separately received it.
221
+
222
+ d) If the work has interactive user interfaces, each must display
223
+ Appropriate Legal Notices; however, if the Program has interactive
224
+ interfaces that do not display Appropriate Legal Notices, your
225
+ work need not make them do so.
226
+
227
+ A compilation of a covered work with other separate and independent
228
+ works, which are not by their nature extensions of the covered work,
229
+ and which are not combined with it such as to form a larger program,
230
+ in or on a volume of a storage or distribution medium, is called an
231
+ "aggregate" if the compilation and its resulting copyright are not
232
+ used to limit the access or legal rights of the compilation's users
233
+ beyond what the individual works permit. Inclusion of a covered work
234
+ in an aggregate does not cause this License to apply to the other
235
+ parts of the aggregate.
236
+
237
+ 6. Conveying Non-Source Forms.
238
+
239
+ You may convey a covered work in object code form under the terms
240
+ of sections 4 and 5, provided that you also convey the
241
+ machine-readable Corresponding Source under the terms of this License,
242
+ in one of these ways:
243
+
244
+ a) Convey the object code in, or embodied in, a physical product
245
+ (including a physical distribution medium), accompanied by the
246
+ Corresponding Source fixed on a durable physical medium
247
+ customarily used for software interchange.
248
+
249
+ b) Convey the object code in, or embodied in, a physical product
250
+ (including a physical distribution medium), accompanied by a
251
+ written offer, valid for at least three years and valid for as
252
+ long as you offer spare parts or customer support for that product
253
+ model, to give anyone who possesses the object code either (1) a
254
+ copy of the Corresponding Source for all the software in the
255
+ product that is covered by this License, on a durable physical
256
+ medium customarily used for software interchange, for a price no
257
+ more than your reasonable cost of physically performing this
258
+ conveying of source, or (2) access to copy the
259
+ Corresponding Source from a network server at no charge.
260
+
261
+ c) Convey individual copies of the object code with a copy of the
262
+ written offer to provide the Corresponding Source. This
263
+ alternative is allowed only occasionally and noncommercially, and
264
+ only if you received the object code with such an offer, in accord
265
+ with subsection 6b.
266
+
267
+ d) Convey the object code by offering access from a designated
268
+ place (gratis or for a charge), and offer equivalent access to the
269
+ Corresponding Source in the same way through the same place at no
270
+ further charge. You need not require recipients to copy the
271
+ Corresponding Source along with the object code. If the place to
272
+ copy the object code is a network server, the Corresponding Source
273
+ may be on a different server (operated by you or a third party)
274
+ that supports equivalent copying facilities, provided you maintain
275
+ clear directions next to the object code saying where to find the
276
+ Corresponding Source. Regardless of what server hosts the
277
+ Corresponding Source, you remain obligated to ensure that it is
278
+ available for as long as needed to satisfy these requirements.
279
+
280
+ e) Convey the object code using peer-to-peer transmission, provided
281
+ you inform other peers where the object code and Corresponding
282
+ Source of the work are being offered to the general public at no
283
+ charge under subsection 6d.
284
+
285
+ A separable portion of the object code, whose source code is excluded
286
+ from the Corresponding Source as a System Library, need not be
287
+ included in conveying the object code work.
288
+
289
+ A "User Product" is either (1) a "consumer product", which means any
290
+ tangible personal property which is normally used for personal, family,
291
+ or household purposes, or (2) anything designed or sold for incorporation
292
+ into a dwelling. In determining whether a product is a consumer product,
293
+ doubtful cases shall be resolved in favor of coverage. For a particular
294
+ product received by a particular user, "normally used" refers to a
295
+ typical or common use of that class of product, regardless of the status
296
+ of the particular user or of the way in which the particular user
297
+ actually uses, or expects or is expected to use, the product. A product
298
+ is a consumer product regardless of whether the product has substantial
299
+ commercial, industrial or non-consumer uses, unless such uses represent
300
+ the only significant mode of use of the product.
301
+
302
+ "Installation Information" for a User Product means any methods,
303
+ procedures, authorization keys, or other information required to install
304
+ and execute modified versions of a covered work in that User Product from
305
+ a modified version of its Corresponding Source. The information must
306
+ suffice to ensure that the continued functioning of the modified object
307
+ code is in no case prevented or interfered with solely because
308
+ modification has been made.
309
+
310
+ If you convey an object code work under this section in, or with, or
311
+ specifically for use in, a User Product, and the conveying occurs as
312
+ part of a transaction in which the right of possession and use of the
313
+ User Product is transferred to the recipient in perpetuity or for a
314
+ fixed term (regardless of how the transaction is characterized), the
315
+ Corresponding Source conveyed under this section must be accompanied
316
+ by the Installation Information. But this requirement does not apply
317
+ if neither you nor any third party retains the ability to install
318
+ modified object code on the User Product (for example, the work has
319
+ been installed in ROM).
320
+
321
+ The requirement to provide Installation Information does not include a
322
+ requirement to continue to provide support service, warranty, or updates
323
+ for a work that has been modified or installed by the recipient, or for
324
+ the User Product in which it has been modified or installed. Access to a
325
+ network may be denied when the modification itself materially and
326
+ adversely affects the operation of the network or violates the rules and
327
+ protocols for communication across the network.
328
+
329
+ Corresponding Source conveyed, and Installation Information provided,
330
+ in accord with this section must be in a format that is publicly
331
+ documented (and with an implementation available to the public in
332
+ source code form), and must require no special password or key for
333
+ unpacking, reading or copying.
334
+
335
+ 7. Additional Terms.
336
+
337
+ "Additional permissions" are terms that supplement the terms of this
338
+ License by making exceptions from one or more of its conditions.
339
+ Additional permissions that are applicable to the entire Program shall
340
+ be treated as though they were included in this License, to the extent
341
+ that they are valid under applicable law. If additional permissions
342
+ apply only to part of the Program, that part may be used separately
343
+ under those permissions, but the entire Program remains governed by
344
+ this License without regard to the additional permissions.
345
+
346
+ When you convey a copy of a covered work, you may at your option
347
+ remove any additional permissions from that copy, or from any part of
348
+ it. (Additional permissions may be written to require their own
349
+ removal in certain cases when you modify the work.) You may place
350
+ additional permissions on material, added by you to a covered work,
351
+ for which you have or can give appropriate copyright permission.
352
+
353
+ Notwithstanding any other provision of this License, for material you
354
+ add to a covered work, you may (if authorized by the copyright holders of
355
+ that material) supplement the terms of this License with terms:
356
+
357
+ a) Disclaiming warranty or limiting liability differently from the
358
+ terms of sections 15 and 16 of this License; or
359
+
360
+ b) Requiring preservation of specified reasonable legal notices or
361
+ author attributions in that material or in the Appropriate Legal
362
+ Notices displayed by works containing it; or
363
+
364
+ c) Prohibiting misrepresentation of the origin of that material, or
365
+ requiring that modified versions of such material be marked in
366
+ reasonable ways as different from the original version; or
367
+
368
+ d) Limiting the use for publicity purposes of names of licensors or
369
+ authors of the material; or
370
+
371
+ e) Declining to grant rights under trademark law for use of some
372
+ trade names, trademarks, or service marks; or
373
+
374
+ f) Requiring indemnification of licensors and authors of that
375
+ material by anyone who conveys the material (or modified versions of
376
+ it) with contractual assumptions of liability to the recipient, for
377
+ any liability that these contractual assumptions directly impose on
378
+ those licensors and authors.
379
+
380
+ All other non-permissive additional terms are considered "further
381
+ restrictions" within the meaning of section 10. If the Program as you
382
+ received it, or any part of it, contains a notice stating that it is
383
+ governed by this License along with a term that is a further
384
+ restriction, you may remove that term. If a license document contains
385
+ a further restriction but permits relicensing or conveying under this
386
+ License, you may add to a covered work material governed by the terms
387
+ of that license document, provided that the further restriction does
388
+ not survive such relicensing or conveying.
389
+
390
+ If you add terms to a covered work in accord with this section, you
391
+ must place, in the relevant source files, a statement of the
392
+ additional terms that apply to those files, or a notice indicating
393
+ where to find the applicable terms.
394
+
395
+ Additional terms, permissive or non-permissive, may be stated in the
396
+ form of a separately written license, or stated as exceptions;
397
+ the above requirements apply either way.
398
+
399
+ 8. Termination.
400
+
401
+ You may not propagate or modify a covered work except as expressly
402
+ provided under this License. Any attempt otherwise to propagate or
403
+ modify it is void, and will automatically terminate your rights under
404
+ this License (including any patent licenses granted under the third
405
+ paragraph of section 11).
406
+
407
+ However, if you cease all violation of this License, then your
408
+ license from a particular copyright holder is reinstated (a)
409
+ provisionally, unless and until the copyright holder explicitly and
410
+ finally terminates your license, and (b) permanently, if the copyright
411
+ holder fails to notify you of the violation by some reasonable means
412
+ prior to 60 days after the cessation.
413
+
414
+ Moreover, your license from a particular copyright holder is
415
+ reinstated permanently if the copyright holder notifies you of the
416
+ violation by some reasonable means, this is the first time you have
417
+ received notice of violation of this License (for any work) from that
418
+ copyright holder, and you cure the violation prior to 30 days after
419
+ your receipt of the notice.
420
+
421
+ Termination of your rights under this section does not terminate the
422
+ licenses of parties who have received copies or rights from you under
423
+ this License. If your rights have been terminated and not permanently
424
+ reinstated, you do not qualify to receive new licenses for the same
425
+ material under section 10.
426
+
427
+ 9. Acceptance Not Required for Having Copies.
428
+
429
+ You are not required to accept this License in order to receive or
430
+ run a copy of the Program. Ancillary propagation of a covered work
431
+ occurring solely as a consequence of using peer-to-peer transmission
432
+ to receive a copy likewise does not require acceptance. However,
433
+ nothing other than this License grants you permission to propagate or
434
+ modify any covered work. These actions infringe copyright if you do
435
+ not accept this License. Therefore, by modifying or propagating a
436
+ covered work, you indicate your acceptance of this License to do so.
437
+
438
+ 10. Automatic Licensing of Downstream Recipients.
439
+
440
+ Each time you convey a covered work, the recipient automatically
441
+ receives a license from the original licensors, to run, modify and
442
+ propagate that work, subject to this License. You are not responsible
443
+ for enforcing compliance by third parties with this License.
444
+
445
+ An "entity transaction" is a transaction transferring control of an
446
+ organization, or substantially all assets of one, or subdividing an
447
+ organization, or merging organizations. If propagation of a covered
448
+ work results from an entity transaction, each party to that
449
+ transaction who receives a copy of the work also receives whatever
450
+ licenses to the work the party's predecessor in interest had or could
451
+ give under the previous paragraph, plus a right to possession of the
452
+ Corresponding Source of the work from the predecessor in interest, if
453
+ the predecessor has it or can get it with reasonable efforts.
454
+
455
+ You may not impose any further restrictions on the exercise of the
456
+ rights granted or affirmed under this License. For example, you may
457
+ not impose a license fee, royalty, or other charge for exercise of
458
+ rights granted under this License, and you may not initiate litigation
459
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
460
+ any patent claim is infringed by making, using, selling, offering for
461
+ sale, or importing the Program or any portion of it.
462
+
463
+ 11. Patents.
464
+
465
+ A "contributor" is a copyright holder who authorizes use under this
466
+ License of the Program or a work on which the Program is based. The
467
+ work thus licensed is called the contributor's "contributor version".
468
+
469
+ A contributor's "essential patent claims" are all patent claims
470
+ owned or controlled by the contributor, whether already acquired or
471
+ hereafter acquired, that would be infringed by some manner, permitted
472
+ by this License, of making, using, or selling its contributor version,
473
+ but do not include claims that would be infringed only as a
474
+ consequence of further modification of the contributor version. For
475
+ purposes of this definition, "control" includes the right to grant
476
+ patent sublicenses in a manner consistent with the requirements of
477
+ this License.
478
+
479
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
480
+ patent license under the contributor's essential patent claims, to
481
+ make, use, sell, offer for sale, import and otherwise run, modify and
482
+ propagate the contents of its contributor version.
483
+
484
+ In the following three paragraphs, a "patent license" is any express
485
+ agreement or commitment, however denominated, not to enforce a patent
486
+ (such as an express permission to practice a patent or covenant not to
487
+ sue for patent infringement). To "grant" such a patent license to a
488
+ party means to make such an agreement or commitment not to enforce a
489
+ patent against the party.
490
+
491
+ If you convey a covered work, knowingly relying on a patent license,
492
+ and the Corresponding Source of the work is not available for anyone
493
+ to copy, free of charge and under the terms of this License, through a
494
+ publicly available network server or other readily accessible means,
495
+ then you must either (1) cause the Corresponding Source to be so
496
+ available, or (2) arrange to deprive yourself of the benefit of the
497
+ patent license for this particular work, or (3) arrange, in a manner
498
+ consistent with the requirements of this License, to extend the patent
499
+ license to downstream recipients. "Knowingly relying" means you have
500
+ actual knowledge that, but for the patent license, your conveying the
501
+ covered work in a country, or your recipient's use of the covered work
502
+ in a country, would infringe one or more identifiable patents in that
503
+ country that you have reason to believe are valid.
504
+
505
+ If, pursuant to or in connection with a single transaction or
506
+ arrangement, you convey, or propagate by procuring conveyance of, a
507
+ covered work, and grant a patent license to some of the parties
508
+ receiving the covered work authorizing them to use, propagate, modify
509
+ or convey a specific copy of the covered work, then the patent license
510
+ you grant is automatically extended to all recipients of the covered
511
+ work and works based on it.
512
+
513
+ A patent license is "discriminatory" if it does not include within
514
+ the scope of its coverage, prohibits the exercise of, or is
515
+ conditioned on the non-exercise of one or more of the rights that are
516
+ specifically granted under this License. You may not convey a covered
517
+ work if you are a party to an arrangement with a third party that is
518
+ in the business of distributing software, under which you make payment
519
+ to the third party based on the extent of your activity of conveying
520
+ the work, and under which the third party grants, to any of the
521
+ parties who would receive the covered work from you, a discriminatory
522
+ patent license (a) in connection with copies of the covered work
523
+ conveyed by you (or copies made from those copies), or (b) primarily
524
+ for and in connection with specific products or compilations that
525
+ contain the covered work, unless you entered into that arrangement,
526
+ or that patent license was granted, prior to 28 March 2007.
527
+
528
+ Nothing in this License shall be construed as excluding or limiting
529
+ any implied license or other defenses to infringement that may
530
+ otherwise be available to you under applicable patent law.
531
+
532
+ 12. No Surrender of Others' Freedom.
533
+
534
+ If conditions are imposed on you (whether by court order, agreement or
535
+ otherwise) that contradict the conditions of this License, they do not
536
+ excuse you from the conditions of this License. If you cannot convey a
537
+ covered work so as to satisfy simultaneously your obligations under this
538
+ License and any other pertinent obligations, then as a consequence you may
539
+ not convey it at all. For example, if you agree to terms that obligate you
540
+ to collect a royalty for further conveying from those to whom you convey
541
+ the Program, the only way you could satisfy both those terms and this
542
+ License would be to refrain entirely from conveying the Program.
543
+
544
+ 13. Remote Network Interaction; Use with the GNU General Public License.
545
+
546
+ Notwithstanding any other provision of this License, if you modify the
547
+ Program, your modified version must prominently offer all users
548
+ interacting with it remotely through a computer network (if your version
549
+ supports such interaction) an opportunity to receive the Corresponding
550
+ Source of your version by providing access to the Corresponding Source
551
+ from a network server at no charge, through some standard or customary
552
+ means of facilitating copying of software. This Corresponding Source
553
+ shall include the Corresponding Source for any work covered by version 3
554
+ of the GNU General Public License that is incorporated pursuant to the
555
+ following paragraph.
556
+
557
+ Notwithstanding any other provision of this License, you have
558
+ permission to link or combine any covered work with a work licensed
559
+ under version 3 of the GNU General Public License into a single
560
+ combined work, and to convey the resulting work. The terms of this
561
+ License will continue to apply to the part which is the covered work,
562
+ but the work with which it is combined will remain governed by version
563
+ 3 of the GNU General Public License.
564
+
565
+ 14. Revised Versions of this License.
566
+
567
+ The Free Software Foundation may publish revised and/or new versions of
568
+ the GNU Affero General Public License from time to time. Such new versions
569
+ will be similar in spirit to the present version, but may differ in detail to
570
+ address new problems or concerns.
571
+
572
+ Each version is given a distinguishing version number. If the
573
+ Program specifies that a certain numbered version of the GNU Affero General
574
+ Public License "or any later version" applies to it, you have the
575
+ option of following the terms and conditions either of that numbered
576
+ version or of any later version published by the Free Software
577
+ Foundation. If the Program does not specify a version number of the
578
+ GNU Affero General Public License, you may choose any version ever published
579
+ by the Free Software Foundation.
580
+
581
+ If the Program specifies that a proxy can decide which future
582
+ versions of the GNU Affero General Public License can be used, that proxy's
583
+ public statement of acceptance of a version permanently authorizes you
584
+ to choose that version for the Program.
585
+
586
+ Later license versions may give you additional or different
587
+ permissions. However, no additional obligations are imposed on any
588
+ author or copyright holder as a result of your choosing to follow a
589
+ later version.
590
+
591
+ 15. Disclaimer of Warranty.
592
+
593
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
594
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
595
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
596
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
597
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
598
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
599
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
600
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
601
+
602
+ 16. Limitation of Liability.
603
+
604
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
605
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
606
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
607
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
608
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
609
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
610
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
611
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
612
+ SUCH DAMAGES.
613
+
614
+ 17. Interpretation of Sections 15 and 16.
615
+
616
+ If the disclaimer of warranty and limitation of liability provided
617
+ above cannot be given local legal effect according to their terms,
618
+ reviewing courts shall apply local law that most closely approximates
619
+ an absolute waiver of all civil liability in connection with the
620
+ Program, unless a warranty or assumption of liability accompanies a
621
+ copy of the Program in return for a fee.
622
+
623
+ END OF TERMS AND CONDITIONS
README.md ADDED
@@ -0,0 +1,424 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <h1 align="center">Stable Diffusion WebUI Forge - Classic</h1>
2
+
3
+ <p align="center"><img src="html\ui.webp" width=512 alt="UI"></p>
4
+
5
+ <blockquote><i>
6
+ <b>Stable Diffusion WebUI Forge</b> is a platform on top of the original <a href="https://github.com/AUTOMATIC1111/stable-diffusion-webui">Stable Diffusion WebUI</a> by <ins>AUTOMATIC1111</ins>, to make development easier, optimize resource management, speed up inference, and study experimental features.<br>
7
+ The name "Forge" is inspired by "Minecraft Forge". This project aims to become the Forge of Stable Diffusion WebUI.<br>
8
+ <p align="right">- <b>lllyasviel</b><br>
9
+ <sup>(paraphrased)</sup></p>
10
+ </i></blockquote>
11
+
12
+ <br>
13
+
14
+ "**Classic**" mainly serves as an archive for the "`previous`" version of Forge, which was built on [Gradio](https://github.com/gradio-app/gradio) `3.41.2` before the major changes *(see the original [announcement](https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/801))* were introduced. Additionally, this fork is focused exclusively on **SD1** and **SDXL** checkpoints, having various optimizations implemented, with the main goal of being the lightest WebUI without any bloatwares.
15
+
16
+ > [!Tip]
17
+ > [How to Install](#installation)
18
+
19
+ <br>
20
+
21
+ ## Features [Apr. 30]
22
+ > Most base features of the original [Automatic1111 Webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) should still function
23
+
24
+ #### New Features
25
+
26
+ - [X] Support `v-pred` **SDXL** checkpoints *(**eg.** [NoobAI](https://civitai.com/models/833294?modelVersionId=1190596))*
27
+ - [X] Support [uv](https://github.com/astral-sh/uv) package manager
28
+ - requires **uv**
29
+ - drastically speed up installation
30
+ - see [Commandline](#by-classic)
31
+ - [X] Support [SageAttention](https://github.com/thu-ml/SageAttention)
32
+ - requires **manually** installing the [triton](https://github.com/triton-lang/triton) package
33
+ - [how to install](#install-triton)
34
+ - requires RTX **30** +
35
+ - ~10% speed up
36
+ - see [Commandline](#by-classic)
37
+ - [X] Support [FlashAttention](https://arxiv.org/abs/2205.14135)
38
+ - requires **manually** installing the [flash-attn](https://github.com/Dao-AILab/flash-attention) package
39
+ - [how to install](#install-flash-attn)
40
+ - ~10% speed up
41
+ - [X] Support fast `fp16_accumulation`
42
+ - requires PyTorch **2.7.0** +
43
+ - ~25% speed up
44
+ - see [Commandline](#by-classic)
45
+ - [X] Support fast `cublas` operation *(`CublasLinear`)*
46
+ - requires **manually** installing the [cublas_ops](https://github.com/aredden/torch-cublas-hgemm) package
47
+ - [how to install](#install-cublas)
48
+ - ~25% speed up
49
+ - enable in **Settings**
50
+ - [X] Support fast `fp8` operation *(`torch._scaled_mm`)*
51
+ - requires RTX **40** +
52
+ - ~10% speed up; reduce quality
53
+ - enable in **Settings**
54
+
55
+ > [!Note]
56
+ > - The `fp16_accumulation` and `cublas` operation achieve the same speed up; if you already install/update to `torch==2.7.0`, you do not need to go for `cublas_ops`
57
+ > - The `fp16_accumulation` and `cublas` operation require `fp16` precision, thus is not compatible with the `fp8` operation
58
+
59
+ - [X] Implement RescaleCFG
60
+ - reduce burnt colors; mainly for `v-pred` checkpoints
61
+ - [X] Implement MaHiRo
62
+ - alternative CFG calculation
63
+ - [graph](https://www.desmos.com/calculator/wcztf0ktiq)
64
+ - [X] Implement `diskcache`
65
+ - *(backported from Automatic1111 Webui upstream)*
66
+ - [X] Implement `skip_early_cond`
67
+ - *(backported from Automatic1111 Webui upstream)*
68
+ - [X] Update `spandrel`
69
+ - support most modern Upscaler architecture
70
+ - [X] Add `pillow-heif` package
71
+ - support `.avif` and `.heif` formats
72
+ - [X] Automatic row split for `X/Y/Z Plot`
73
+ - [X] Add an option to disable **Refiner**
74
+ - [X] Add an option to disable ExtraNetworks **Tree View**
75
+ - [X] Support [Union](https://huggingface.co/xinsir/controlnet-union-sdxl-1.0) / [ProMax](https://huggingface.co/brad-twinkl/controlnet-union-sdxl-1.0-promax) ControlNet
76
+ - I just made them always show up in the dropdown
77
+
78
+ #### Removed Features
79
+
80
+ - [X] SD2
81
+ - [X] Alt-Diffusion
82
+ - [X] Instruct-Pix2Pix
83
+ - [X] Hypernetworks
84
+ - [X] SVD
85
+ - [X] Z123
86
+ - [X] CLIP Interrogator
87
+ - [X] Deepbooru Interrogator
88
+ - [X] Textual Inversion Training
89
+ - [X] Checkpoint Merging
90
+ - [X] LDSR
91
+ - [X] Most **built-in** Extensions
92
+ - [X] Some **built-in** Scripts
93
+ - [X] The `test` scripts
94
+ - [X] `Photopea` and `openpose_editor` *(ControlNet)*
95
+ - [X] Unix `.sh` launch scripts
96
+ - You can still use this WebUI by copying a launch script from another working WebUI; I just don't want to maintain them...
97
+
98
+ #### Optimizations
99
+
100
+ - [X] **[Freedom]** Natively integrate the `SD1` and `SDXL` logics
101
+ - no longer `git` `clone` any repository on fresh install
102
+ - no more random hacks and monkey patches
103
+ - [X] Fix memory leak when switching checkpoints
104
+ - [X] Clean up the `ldm_patched` *(**ie.** `comfy`)* folder
105
+ - [X] Remove unused `cmd_args`
106
+ - [X] Remove unused `shared_options`
107
+ - [X] Remove unused `args_parser`
108
+ - [X] Remove legacy codes
109
+ - [X] Remove duplicated upscaler codes
110
+ - put every upscaler inside the `ESRGAN` folder
111
+ - [X] Improve color correction
112
+ - [X] Improve code logics
113
+ - [X] Improve hash caching
114
+ - [X] Improve error logs
115
+ - no longer prints `TypeError: 'NoneType' object is not iterable`
116
+ - [X] Improve setting descriptions
117
+ - [X] Check for Extension updates in parallel
118
+ - [X] Moved `embeddings` folder into `models` folder
119
+ - [X] ControlNet Rewrite
120
+ - change Units to `gr.Tab`
121
+ - remove multi-inputs, as they are "[misleading](https://github.com/lllyasviel/stable-diffusion-webui-forge/discussions/932)"
122
+ - change `visible` toggle to `interactive` toggle; now the UI will no longer jump around
123
+ - improved `Presets` application
124
+ - [X] Run `text encoder` on CPU by default
125
+ - [X] Fix `pydantic` Errors
126
+ - [X] Fix `Soft Inpainting`
127
+ - [X] Lint & Format most of the Python and JavaScript codes
128
+ - [X] Update to Pillow 11
129
+ - faster image processing
130
+ - [X] Update `protobuf`
131
+ - faster `insightface` loading
132
+ - [X] Update to latest PyTorch
133
+ - `torch==2.7.0+cu128`
134
+ - `xformers==0.0.30`
135
+ - [X] No longer install `open-clip` twice
136
+ - [X] Update certain packages to newer versions
137
+ - [X] Update recommended Python to `3.11.9`
138
+ - [X] many more... :tm:
139
+
140
+ <br>
141
+
142
+ ## Commandline
143
+ > These flags can be added after the `set COMMANDLINE_ARGS=` line in the `webui-user.bat` *(separate each flag with space)*
144
+
145
+ #### A1111 built-in
146
+
147
+ - `--no-download-sd-model`: Do not download a default checkpoint
148
+ - can be removed after you download some checkpoints of your choice
149
+ - `--xformers`: Install the `xformers` package to speed up generation
150
+ - Currently, `torch==2.7.0` does **not** support `xformers` yet
151
+ - `--port`: Specify a server port to use
152
+ - defaults to `7860`
153
+ - `--api`: Enable [API](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/API) access
154
+
155
+ <br>
156
+
157
+ - Once you have successfully launched the WebUI, you can add the following flags to bypass some validation steps in order to improve the Startup time
158
+ - `--skip-prepare-environment`
159
+ - `--skip-install`
160
+ - `--skip-python-version-check`
161
+ - `--skip-torch-cuda-test`
162
+ - `--skip-version-check`
163
+
164
+ > [!Important]
165
+ > Remove them if you are installing an Extension, as those also block Extension from installing requirements
166
+
167
+ #### by. Forge
168
+
169
+ - For RTX **30** and above, you can add the following flags to slightly increase the performance; but in rare occurrences, they may cause `OutOfMemory` errors or even crash the WebUI; and in certain configurations, they may even lower the speed instead
170
+ - `--cuda-malloc`
171
+ - `--cuda-stream`
172
+ - `--pin-shared-memory`
173
+
174
+ #### by. Classic
175
+
176
+ - `--uv`: Replace the `python -m pip` calls with `uv pip` to massively speed up package installation
177
+ - requires **uv** to be installed first *(see [Installation](#installation))*
178
+ - `--uv-symlink`: Same as above; but additionally pass `--link-mode symlink` to the commands
179
+ - significantly reduces installation size (`~7 GB` to `~100 MB`)
180
+
181
+ > [!Important]
182
+ > Using `symlink` means it will directly access the packages from the cache folders; refrain from clearing the cache when setting this option
183
+
184
+ - `--fast-fp16`: Enable the `allow_fp16_accumulation` option
185
+ - requires PyTorch **2.7.0** +
186
+ - `--sage`: Install the `sageattention` package to speed up generation
187
+ - requires **triton**
188
+ - requires RTX **30** +
189
+ - only affects **SDXL**
190
+
191
+ > [!Tip]
192
+ > `--xformers` is still recommended even if you already have `--sage`, as `sageattention` does not speed up **VAE** while `xformers` does
193
+
194
+ - `--model-ref`: Points to a central `models` folder that contains all your models
195
+ - said folder should contain subfolders like `Stable-diffusion`, `Lora`, `VAE`, `ESRGAN`, etc.
196
+
197
+ > [!Important]
198
+ > This simply **replaces** the `models` folder, rather than adding on top of it
199
+
200
+ <br>
201
+
202
+ ## Installation
203
+
204
+ 0. Install **[git](https://git-scm.com/downloads)**
205
+ 1. Clone the Repo
206
+ ```bash
207
+ git clone https://github.com/Haoming02/sd-webui-forge-classic
208
+ ```
209
+
210
+ 2. Setup Python
211
+
212
+ <details>
213
+ <summary>Recommended Method</summary>
214
+
215
+ - Install **[uv](https://github.com/astral-sh/uv)**
216
+ - Set up **venv**
217
+ ```bash
218
+ cd sd-webui-forge-classic
219
+ uv venv venv --python 3.11 --seed
220
+ ```
221
+ - Add the `--uv` flag to `webui-user.bat`
222
+
223
+ </details>
224
+
225
+ <details>
226
+ <summary>Standard Method</summary>
227
+
228
+ - Install **[Python 3.11.9](https://www.python.org/downloads/release/python-3119/)**
229
+ - Remember to enable `Add Python to PATH`
230
+
231
+ </details>
232
+
233
+ 3. **(Optional)** Configure [Commandline](#commandline)
234
+ 4. Launch the WebUI via `webui-user.bat`
235
+ 5. During the first launch, it will automatically install all the requirements
236
+ 6. Once the installation is finished, the WebUI will start in a browser automatically
237
+
238
+ <br>
239
+
240
+ ### Install cublas
241
+
242
+ <details>
243
+ <summary>Expand</summary>
244
+
245
+ 0. Ensure the WebUI can properly launch already, by following the [installation](#installation) steps first
246
+ 1. Open the console in the WebUI directory
247
+ ```bash
248
+ cd sd-webui-forge-classic
249
+ ```
250
+ 2. Start the virtual environment
251
+ ```bash
252
+ venv\scripts\activate
253
+ ```
254
+ 3. Create a new folder
255
+ ```bash
256
+ mkdir repo
257
+ cd repo
258
+ ```
259
+ 4. Clone the repo
260
+ ```bash
261
+ git clone https://github.com/aredden/torch-cublas-hgemm
262
+ cd torch-cublas-hgemm
263
+ ```
264
+ 5. Install the library
265
+ ```
266
+ pip install -e . --no-build-isolation
267
+ ```
268
+
269
+ - If you installed `uv`, use `uv pip install` instead
270
+ - The installation takes a few minutes
271
+
272
+ </details>
273
+
274
+ ### Install triton
275
+
276
+ <details>
277
+ <summary>Expand</summary>
278
+
279
+ 0. Ensure the WebUI can properly launch already, by following the [installation](#installation) steps first
280
+ 1. Open the console in the WebUI directory
281
+ ```bash
282
+ cd sd-webui-forge-classic
283
+ ```
284
+ 2. Start the virtual environment
285
+ ```bash
286
+ venv\scripts\activate
287
+ ```
288
+ 3. Install the library
289
+ - **Windows**
290
+ ```bash
291
+ pip install triton-windows
292
+ ```
293
+ - **Linux**
294
+ ```bash
295
+ pip install triton
296
+ ```
297
+ - If you installed `uv`, use `uv pip install` instead
298
+
299
+ </details>
300
+
301
+ ### Install flash-attn
302
+
303
+ <details>
304
+ <summary>Expand</summary>
305
+
306
+ 0. Ensure the WebUI can properly launch already, by following the [installation](#installation) steps first
307
+ 1. Open the console in the WebUI directory
308
+ ```bash
309
+ cd sd-webui-forge-classic
310
+ ```
311
+ 2. Start the virtual environment
312
+ ```bash
313
+ venv\scripts\activate
314
+ ```
315
+ 3. Install the library
316
+ - **Windows**
317
+ - Download the pre-built `.whl` package from https://github.com/kingbri1/flash-attention/releases
318
+ ```bash
319
+ pip install flash_attn...win...whl
320
+ ```
321
+ - **Linux**
322
+ - Download the pre-built `.whl` package from https://github.com/Dao-AILab/flash-attention/releases
323
+ ```bash
324
+ pip install flash_attn...linux...whl
325
+ ```
326
+ - If you installed `uv`, use `uv pip install` instead
327
+ - **Important:** Download the correct `.whl` for your Python and PyTorch version
328
+
329
+ </details>
330
+
331
+ ### Install sageattention 2
332
+ > If you only use **SDXL**, then `1.x` is already enough; `2.x` simply has partial support for **SD1** checkpoints
333
+
334
+ <details>
335
+ <summary>Expand</summary>
336
+
337
+ 0. Ensure the WebUI can properly launch already, by following the [installation](#installation) steps first
338
+ 1. Open the console in the WebUI directory
339
+ ```bash
340
+ cd sd-webui-forge-classic
341
+ ```
342
+ 2. Start the virtual environment
343
+ ```bash
344
+ venv\scripts\activate
345
+ ```
346
+ 3. Create a new folder
347
+ ```bash
348
+ mkdir repo
349
+ cd repo
350
+ ```
351
+ 4. Clone the repo
352
+ ```bash
353
+ git clone https://github.com/thu-ml/SageAttention
354
+ cd SageAttention
355
+ ```
356
+ 5. Install the library
357
+ ```
358
+ pip install -e . --no-build-isolation
359
+ ```
360
+
361
+ - If you installed `uv`, use `uv pip install` instead
362
+ - The installation takes a few minutes
363
+
364
+ </details>
365
+
366
+ <br>
367
+
368
+ ### Install older PyTorch
369
+ > Read this if your GPU does not support the latest PyTorch
370
+
371
+ <details>
372
+ <summary>Expand</summary>
373
+
374
+ 0. Navigate to the WebUI directory
375
+ 1. Edit the `webui-user.bat` file
376
+ 2. Add a new line to specify an older version:
377
+ ```bash
378
+ set TORCH_COMMAND=pip install torch==2.1.2 torchvision==0.16.2 --extra-index-url https://download.pytorch.org/whl/cu121
379
+ ```
380
+
381
+ </details>
382
+
383
+ <br>
384
+
385
+ ## Attention
386
+
387
+ > [!Important]
388
+ > The `--xformers` and `--sage` args are only responsible for installing the packages, **not** whether its respective attention is used; This also means you can remove them once they are successfully installed
389
+
390
+ **Forge Classic** tries to import the packages and automatically choose the first available attention function in the following order:
391
+
392
+ 1. `SageAttention`
393
+ 2. `FlashAttention`
394
+ 3. `xformers`
395
+ 4. `PyTorch`
396
+ 5. `Basic`
397
+
398
+ > [!Note]
399
+ > The VAE only checks for `xformers`
400
+
401
+ In my experience, the speed of each attention function for SDXL is ranked in the following order:
402
+
403
+ - `SageAttention` ≥ `FlashAttention` > `xformers` > `PyTorch` >> `Basic`
404
+
405
+ > [!Note]
406
+ > `SageAttention` is based on quantization, so its quality might be slightly worse than others
407
+
408
+ ## Issues & Requests
409
+
410
+ - **Issues** about removed features will simply be ignored
411
+ - **Issues** regarding installation will be ignored if it's obviously user-error
412
+ - **Feature Request** not related to performance or optimization will simply be ignored
413
+ - For cutting edge features, check out [reForge](https://github.com/Panchovix/stable-diffusion-webui-reForge) instead
414
+ - Non-Windows platforms will not be supported, as I cannot verify nor maintain them
415
+
416
+ </details>
417
+
418
+ <hr>
419
+
420
+ <p align="center">
421
+ Special thanks to <b>AUTOMATIC1111</b>, <b>lllyasviel</b>, and <b>comfyanonymous</b>, <b>kijai</b>, <br>
422
+ along with the rest of the contributors, <br>
423
+ for their invaluable efforts in the open-source image generation community
424
+ </p>
configs/sd_xl_base.yaml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: sgm.models.diffusion.DiffusionEngine
3
+ params:
4
+ scale_factor: 0.13025
5
+ disable_first_stage_autocast: True
6
+
7
+ denoiser_config:
8
+ target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
9
+ params:
10
+ num_idx: 1000
11
+
12
+ weighting_config:
13
+ target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
14
+ scaling_config:
15
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
16
+ discretization_config:
17
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
18
+
19
+ network_config:
20
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
21
+ params:
22
+ adm_in_channels: 2816
23
+ num_classes: sequential
24
+ use_checkpoint: False
25
+ in_channels: 4
26
+ out_channels: 4
27
+ model_channels: 320
28
+ attention_resolutions: [4, 2]
29
+ num_res_blocks: 2
30
+ channel_mult: [1, 2, 4]
31
+ num_head_channels: 64
32
+ use_spatial_transformer: True
33
+ use_linear_in_transformer: True
34
+ transformer_depth: [1, 2, 10] # note: the first is unused (due to attn_res starting at 2) 32, 16, 8 --> 64, 32, 16
35
+ context_dim: 2048
36
+ spatial_transformer_attn_type: softmax-xformers
37
+ legacy: False
38
+
39
+ conditioner_config:
40
+ target: sgm.modules.GeneralConditioner
41
+ params:
42
+ emb_models:
43
+ # crossattn cond
44
+ - is_trainable: False
45
+ input_key: txt
46
+ target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
47
+ params:
48
+ layer: hidden
49
+ layer_idx: 11
50
+ # crossattn and vector cond
51
+ - is_trainable: False
52
+ input_key: txt
53
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
54
+ params:
55
+ arch: ViT-bigG-14
56
+ version: laion2b_s39b_b160k
57
+ freeze: True
58
+ layer: penultimate
59
+ always_return_pooled: True
60
+ legacy: False
61
+ # vector cond
62
+ - is_trainable: False
63
+ input_key: original_size_as_tuple
64
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
65
+ params:
66
+ outdim: 256 # multiplied by two
67
+ # vector cond
68
+ - is_trainable: False
69
+ input_key: crop_coords_top_left
70
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
71
+ params:
72
+ outdim: 256 # multiplied by two
73
+ # vector cond
74
+ - is_trainable: False
75
+ input_key: target_size_as_tuple
76
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
77
+ params:
78
+ outdim: 256 # multiplied by two
79
+
80
+ first_stage_config:
81
+ target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
82
+ params:
83
+ embed_dim: 4
84
+ monitor: val/rec_loss
85
+ ddconfig:
86
+ attn_type: vanilla-xformers
87
+ double_z: true
88
+ z_channels: 4
89
+ resolution: 256
90
+ in_channels: 3
91
+ out_ch: 3
92
+ ch: 128
93
+ ch_mult: [1, 2, 4, 4]
94
+ num_res_blocks: 2
95
+ attn_resolutions: []
96
+ dropout: 0.0
97
+ lossconfig:
98
+ target: torch.nn.Identity
configs/sd_xl_inpaint.yaml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: sgm.models.diffusion.DiffusionEngine
3
+ params:
4
+ scale_factor: 0.13025
5
+ disable_first_stage_autocast: True
6
+
7
+ denoiser_config:
8
+ target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
9
+ params:
10
+ num_idx: 1000
11
+
12
+ weighting_config:
13
+ target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
14
+ scaling_config:
15
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
16
+ discretization_config:
17
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
18
+
19
+ network_config:
20
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
21
+ params:
22
+ adm_in_channels: 2816
23
+ num_classes: sequential
24
+ use_checkpoint: False
25
+ in_channels: 9
26
+ out_channels: 4
27
+ model_channels: 320
28
+ attention_resolutions: [4, 2]
29
+ num_res_blocks: 2
30
+ channel_mult: [1, 2, 4]
31
+ num_head_channels: 64
32
+ use_spatial_transformer: True
33
+ use_linear_in_transformer: True
34
+ transformer_depth: [1, 2, 10] # note: the first is unused (due to attn_res starting at 2) 32, 16, 8 --> 64, 32, 16
35
+ context_dim: 2048
36
+ spatial_transformer_attn_type: softmax-xformers
37
+ legacy: False
38
+
39
+ conditioner_config:
40
+ target: sgm.modules.GeneralConditioner
41
+ params:
42
+ emb_models:
43
+ # crossattn cond
44
+ - is_trainable: False
45
+ input_key: txt
46
+ target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
47
+ params:
48
+ layer: hidden
49
+ layer_idx: 11
50
+ # crossattn and vector cond
51
+ - is_trainable: False
52
+ input_key: txt
53
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
54
+ params:
55
+ arch: ViT-bigG-14
56
+ version: laion2b_s39b_b160k
57
+ freeze: True
58
+ layer: penultimate
59
+ always_return_pooled: True
60
+ legacy: False
61
+ # vector cond
62
+ - is_trainable: False
63
+ input_key: original_size_as_tuple
64
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
65
+ params:
66
+ outdim: 256 # multiplied by two
67
+ # vector cond
68
+ - is_trainable: False
69
+ input_key: crop_coords_top_left
70
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
71
+ params:
72
+ outdim: 256 # multiplied by two
73
+ # vector cond
74
+ - is_trainable: False
75
+ input_key: target_size_as_tuple
76
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
77
+ params:
78
+ outdim: 256 # multiplied by two
79
+
80
+ first_stage_config:
81
+ target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
82
+ params:
83
+ embed_dim: 4
84
+ monitor: val/rec_loss
85
+ ddconfig:
86
+ attn_type: vanilla-xformers
87
+ double_z: true
88
+ z_channels: 4
89
+ resolution: 256
90
+ in_channels: 3
91
+ out_ch: 3
92
+ ch: 128
93
+ ch_mult: [1, 2, 4, 4]
94
+ num_res_blocks: 2
95
+ attn_resolutions: []
96
+ dropout: 0.0
97
+ lossconfig:
98
+ target: torch.nn.Identity
configs/sd_xl_refiner.yaml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: sgm.models.diffusion.DiffusionEngine
3
+ params:
4
+ scale_factor: 0.13025
5
+ disable_first_stage_autocast: True
6
+
7
+ denoiser_config:
8
+ target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
9
+ params:
10
+ num_idx: 1000
11
+
12
+ weighting_config:
13
+ target: sgm.modules.diffusionmodules.denoiser_weighting.EpsWeighting
14
+ scaling_config:
15
+ target: sgm.modules.diffusionmodules.denoiser_scaling.EpsScaling
16
+ discretization_config:
17
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
18
+
19
+ network_config:
20
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
21
+ params:
22
+ adm_in_channels: 2560
23
+ num_classes: sequential
24
+ use_checkpoint: False
25
+ in_channels: 4
26
+ out_channels: 4
27
+ model_channels: 384
28
+ attention_resolutions: [4, 2]
29
+ num_res_blocks: 2
30
+ channel_mult: [1, 2, 4, 4]
31
+ num_head_channels: 64
32
+ use_spatial_transformer: True
33
+ use_linear_in_transformer: True
34
+ transformer_depth: 4
35
+ context_dim: [1280, 1280, 1280, 1280] # 1280
36
+ spatial_transformer_attn_type: softmax-xformers
37
+ legacy: False
38
+
39
+ conditioner_config:
40
+ target: sgm.modules.GeneralConditioner
41
+ params:
42
+ emb_models:
43
+ # crossattn and vector cond
44
+ - is_trainable: False
45
+ input_key: txt
46
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
47
+ params:
48
+ arch: ViT-bigG-14
49
+ version: laion2b_s39b_b160k
50
+ freeze: True
51
+ layer: penultimate
52
+ always_return_pooled: True
53
+ legacy: False
54
+ # vector cond
55
+ - is_trainable: False
56
+ input_key: original_size_as_tuple
57
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
58
+ params:
59
+ outdim: 256 # multiplied by two
60
+ # vector cond
61
+ - is_trainable: False
62
+ input_key: crop_coords_top_left
63
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
64
+ params:
65
+ outdim: 256 # multiplied by two
66
+ # vector cond
67
+ - is_trainable: False
68
+ input_key: aesthetic_score
69
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
70
+ params:
71
+ outdim: 256 # multiplied by one
72
+
73
+ first_stage_config:
74
+ target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
75
+ params:
76
+ embed_dim: 4
77
+ monitor: val/rec_loss
78
+ ddconfig:
79
+ attn_type: vanilla-xformers
80
+ double_z: true
81
+ z_channels: 4
82
+ resolution: 256
83
+ in_channels: 3
84
+ out_ch: 3
85
+ ch: 128
86
+ ch_mult: [1, 2, 4, 4]
87
+ num_res_blocks: 2
88
+ attn_resolutions: []
89
+ dropout: 0.0
90
+ lossconfig:
91
+ target: torch.nn.Identity
configs/sd_xl_v.yaml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: sgm.models.diffusion.DiffusionEngine
3
+ params:
4
+ scale_factor: 0.13025
5
+ disable_first_stage_autocast: True
6
+
7
+ denoiser_config:
8
+ target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
9
+ params:
10
+ num_idx: 1000
11
+
12
+ weighting_config:
13
+ target: sgm.modules.diffusionmodules.denoiser_weighting.VWeighting
14
+ scaling_config:
15
+ target: sgm.modules.diffusionmodules.denoiser_scaling.VScaling
16
+ discretization_config:
17
+ target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
18
+
19
+ network_config:
20
+ target: sgm.modules.diffusionmodules.openaimodel.UNetModel
21
+ params:
22
+ adm_in_channels: 2816
23
+ num_classes: sequential
24
+ use_checkpoint: False
25
+ in_channels: 4
26
+ out_channels: 4
27
+ model_channels: 320
28
+ attention_resolutions: [4, 2]
29
+ num_res_blocks: 2
30
+ channel_mult: [1, 2, 4]
31
+ num_head_channels: 64
32
+ use_spatial_transformer: True
33
+ use_linear_in_transformer: True
34
+ transformer_depth: [1, 2, 10] # note: the first is unused (due to attn_res starting at 2) 32, 16, 8 --> 64, 32, 16
35
+ context_dim: 2048
36
+ spatial_transformer_attn_type: softmax-xformers
37
+ legacy: False
38
+
39
+ conditioner_config:
40
+ target: sgm.modules.GeneralConditioner
41
+ params:
42
+ emb_models:
43
+ # crossattn cond
44
+ - is_trainable: False
45
+ input_key: txt
46
+ target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
47
+ params:
48
+ layer: hidden
49
+ layer_idx: 11
50
+ # crossattn and vector cond
51
+ - is_trainable: False
52
+ input_key: txt
53
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
54
+ params:
55
+ arch: ViT-bigG-14
56
+ version: laion2b_s39b_b160k
57
+ freeze: True
58
+ layer: penultimate
59
+ always_return_pooled: True
60
+ legacy: False
61
+ # vector cond
62
+ - is_trainable: False
63
+ input_key: original_size_as_tuple
64
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
65
+ params:
66
+ outdim: 256 # multiplied by two
67
+ # vector cond
68
+ - is_trainable: False
69
+ input_key: crop_coords_top_left
70
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
71
+ params:
72
+ outdim: 256 # multiplied by two
73
+ # vector cond
74
+ - is_trainable: False
75
+ input_key: target_size_as_tuple
76
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
77
+ params:
78
+ outdim: 256 # multiplied by two
79
+
80
+ first_stage_config:
81
+ target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
82
+ params:
83
+ embed_dim: 4
84
+ monitor: val/rec_loss
85
+ ddconfig:
86
+ attn_type: vanilla-xformers
87
+ double_z: true
88
+ z_channels: 4
89
+ resolution: 256
90
+ in_channels: 3
91
+ out_ch: 3
92
+ ch: 128
93
+ ch_mult: [1, 2, 4, 4]
94
+ num_res_blocks: 2
95
+ attn_resolutions: []
96
+ dropout: 0.0
97
+ lossconfig:
98
+ target: torch.nn.Identity
configs/v1-inference.yaml ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
3
+ params:
4
+ linear_start: 0.00085
5
+ linear_end: 0.0120
6
+ num_timesteps_cond: 1
7
+ log_every_t: 200
8
+ timesteps: 1000
9
+ first_stage_key: "jpg"
10
+ cond_stage_key: "txt"
11
+ image_size: 64
12
+ channels: 4
13
+ cond_stage_trainable: false # Note: different from the one we trained before
14
+ conditioning_key: crossattn
15
+ monitor: val/loss_simple_ema
16
+ scale_factor: 0.18215
17
+ use_ema: False
18
+
19
+ scheduler_config: # 10000 warmup steps
20
+ target: ldm.lr_scheduler.LambdaLinearScheduler
21
+ params:
22
+ warm_up_steps: [10000]
23
+ cycle_lengths: [10000000000000] # incredibly large number to prevent corner cases
24
+ f_start: [1.e-6]
25
+ f_max: [1.]
26
+ f_min: [1.]
27
+
28
+ unet_config:
29
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
30
+ params:
31
+ image_size: 32 # unused
32
+ in_channels: 4
33
+ out_channels: 4
34
+ model_channels: 320
35
+ attention_resolutions: [4, 2, 1]
36
+ num_res_blocks: 2
37
+ channel_mult: [1, 2, 4, 4]
38
+ num_heads: 8
39
+ use_spatial_transformer: True
40
+ transformer_depth: 1
41
+ context_dim: 768
42
+ use_checkpoint: False
43
+ legacy: False
44
+
45
+ first_stage_config:
46
+ target: ldm.models.autoencoder.AutoencoderKL
47
+ params:
48
+ embed_dim: 4
49
+ monitor: val/rec_loss
50
+ ddconfig:
51
+ double_z: true
52
+ z_channels: 4
53
+ resolution: 256
54
+ in_channels: 3
55
+ out_ch: 3
56
+ ch: 128
57
+ ch_mult:
58
+ - 1
59
+ - 2
60
+ - 4
61
+ - 4
62
+ num_res_blocks: 2
63
+ attn_resolutions: []
64
+ dropout: 0.0
65
+ lossconfig:
66
+ target: torch.nn.Identity
67
+
68
+ cond_stage_config:
69
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
configs/v1-inpainting-inference.yaml ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
3
+ params:
4
+ linear_start: 0.00085
5
+ linear_end: 0.0120
6
+ num_timesteps_cond: 1
7
+ log_every_t: 200
8
+ timesteps: 1000
9
+ first_stage_key: "jpg"
10
+ cond_stage_key: "txt"
11
+ image_size: 64
12
+ channels: 4
13
+ cond_stage_trainable: false # Note: different from the one we trained before
14
+ conditioning_key: hybrid # important
15
+ monitor: val/loss_simple_ema
16
+ scale_factor: 0.18215
17
+ finetune_keys: null
18
+
19
+ scheduler_config: # 10000 warmup steps
20
+ target: ldm.lr_scheduler.LambdaLinearScheduler
21
+ params:
22
+ warm_up_steps: [2500] # NOTE for resuming. use 10000 if starting from scratch
23
+ cycle_lengths: [10000000000000] # incredibly large number to prevent corner cases
24
+ f_start: [1.e-6]
25
+ f_max: [1.]
26
+ f_min: [1.]
27
+
28
+ unet_config:
29
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
30
+ params:
31
+ image_size: 32 # unused
32
+ in_channels: 9 # 4 data + 4 downscaled image + 1 mask
33
+ out_channels: 4
34
+ model_channels: 320
35
+ attention_resolutions: [4, 2, 1]
36
+ num_res_blocks: 2
37
+ channel_mult: [1, 2, 4, 4]
38
+ num_heads: 8
39
+ use_spatial_transformer: True
40
+ transformer_depth: 1
41
+ context_dim: 768
42
+ use_checkpoint: False
43
+ legacy: False
44
+
45
+ first_stage_config:
46
+ target: ldm.models.autoencoder.AutoencoderKL
47
+ params:
48
+ embed_dim: 4
49
+ monitor: val/rec_loss
50
+ ddconfig:
51
+ double_z: true
52
+ z_channels: 4
53
+ resolution: 256
54
+ in_channels: 3
55
+ out_ch: 3
56
+ ch: 128
57
+ ch_mult:
58
+ - 1
59
+ - 2
60
+ - 4
61
+ - 4
62
+ num_res_blocks: 2
63
+ attn_resolutions: []
64
+ dropout: 0.0
65
+ lossconfig:
66
+ target: torch.nn.Identity
67
+
68
+ cond_stage_config:
69
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
environment-wsl2.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: forge-classic
2
+ channels:
3
+ - pytorch
4
+ - defaults
5
+ dependencies:
6
+ - python=3.10
7
+ - pip=24.0
8
+ - cudatoolkit=11.8
9
+ - pytorch=2.5
10
+ - torchvision=0.20
11
+ - numpy=1.26
extensions-builtin/Lora/extra_networks_lora.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules import extra_networks, shared
2
+
3
+ import networks
4
+
5
+
6
+ class ExtraNetworkLora(extra_networks.ExtraNetwork):
7
+ def __init__(self):
8
+ super().__init__("lora")
9
+
10
+ self.errors = {}
11
+ """mapping of network names to the number of errors the network had during operation"""
12
+
13
+ def activate(self, p, params_list):
14
+ additional = shared.opts.sd_lora
15
+
16
+ self.errors.clear()
17
+
18
+ if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional):
19
+ p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
20
+ params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
21
+
22
+ names = []
23
+ te_multipliers = []
24
+ unet_multipliers = []
25
+ dyn_dims = []
26
+ for params in params_list:
27
+ assert params.items
28
+
29
+ names.append(params.positional[0])
30
+
31
+ te_multiplier = float(params.positional[1]) if len(params.positional) > 1 else 1.0
32
+ te_multiplier = float(params.named.get("te", te_multiplier))
33
+
34
+ unet_multiplier = float(params.positional[2]) if len(params.positional) > 2 else te_multiplier
35
+ unet_multiplier = float(params.named.get("unet", unet_multiplier))
36
+
37
+ dyn_dim = int(params.positional[3]) if len(params.positional) > 3 else None
38
+ dyn_dim = int(params.named["dyn"]) if "dyn" in params.named else dyn_dim
39
+
40
+ te_multipliers.append(te_multiplier)
41
+ unet_multipliers.append(unet_multiplier)
42
+ dyn_dims.append(dyn_dim)
43
+
44
+ networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims)
45
+
46
+ if shared.opts.lora_add_hashes_to_infotext:
47
+ network_hashes = []
48
+ for item in networks.loaded_networks:
49
+ shorthash = item.network_on_disk.shorthash
50
+ if not shorthash:
51
+ continue
52
+
53
+ alias = item.mentioned_name
54
+ if not alias:
55
+ continue
56
+
57
+ alias = alias.replace(":", "").replace(",", "")
58
+
59
+ network_hashes.append(f"{alias}: {shorthash}")
60
+
61
+ if network_hashes:
62
+ p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes)
63
+
64
+ def deactivate(self, p):
65
+ if self.errors:
66
+ p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items()))
67
+ self.errors.clear()
extensions-builtin/Lora/lora.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import networks
2
+
3
+ list_available_loras = networks.list_available_networks
4
+
5
+ available_loras = networks.available_networks
6
+ available_lora_aliases = networks.available_network_aliases
7
+ available_lora_hash_lookup = networks.available_network_hash_lookup
8
+ forbidden_lora_aliases = networks.forbidden_network_aliases
9
+ loaded_loras = networks.loaded_networks
extensions-builtin/Lora/network.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import enum
4
+ from collections import namedtuple
5
+
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ from modules import cache, errors, hashes, sd_models, shared
9
+
10
+ NetworkWeights = namedtuple("NetworkWeights", ["network_key", "sd_key", "w", "sd_module"])
11
+
12
+ metadata_tags_order = {
13
+ "ss_sd_model_name": 1,
14
+ "ss_resolution": 2,
15
+ "ss_clip_skip": 3,
16
+ "ss_num_train_images": 10,
17
+ "ss_tag_frequency": 20,
18
+ }
19
+
20
+
21
+ class SDVersion(enum.Enum):
22
+ Unknown = -1
23
+ SD1 = 1
24
+ SD2 = 2
25
+ SDXL = 3
26
+
27
+
28
+ class NetworkOnDisk:
29
+ def __init__(self, name, filename):
30
+ self.name = name
31
+ self.filename = filename
32
+ self.metadata = {}
33
+ self.is_safetensors = filename.lower().endswith(".safetensors")
34
+
35
+ def read_metadata():
36
+ metadata = sd_models.read_metadata_from_safetensors(filename)
37
+ metadata.pop("ssmd_cover_images", None) # cover images are too big to display in UI
38
+ return metadata
39
+
40
+ if self.is_safetensors:
41
+ try:
42
+ self.metadata = cache.cached_data_for_file("safetensors-metadata", "/".join(["lora", self.name]), filename, read_metadata)
43
+ except Exception as e:
44
+ errors.display(e, f"reading lora {filename}")
45
+
46
+ if self.metadata:
47
+ m = {}
48
+ for k, v in sorted(self.metadata.items(), key=lambda x: metadata_tags_order.get(x[0], 999)):
49
+ m[k] = v
50
+
51
+ self.metadata = m
52
+
53
+ self.alias = self.metadata.get("ss_output_name", self.name)
54
+
55
+ self.hash = None
56
+ self.shorthash = None
57
+ self.set_hash(self.metadata.get("sshs_model_hash") or hashes.sha256_from_cache(self.filename, "/".join(["lora", self.name]), use_addnet_hash=self.is_safetensors) or "")
58
+
59
+ self.sd_version = self.detect_version()
60
+
61
+ def detect_version(self):
62
+ if str(self.metadata.get("ss_base_model_version", "")).startswith("sdxl_"):
63
+ return SDVersion.SDXL
64
+ elif str(self.metadata.get("ss_v2", "")) == "True":
65
+ return SDVersion.SD2
66
+ elif len(self.metadata):
67
+ return SDVersion.SD1
68
+
69
+ return SDVersion.Unknown
70
+
71
+ def set_hash(self, v):
72
+ self.hash = v
73
+ self.shorthash = self.hash[0:12]
74
+
75
+ if self.shorthash:
76
+ import networks
77
+
78
+ networks.available_network_hash_lookup[self.shorthash] = self
79
+
80
+ def read_hash(self):
81
+ if not self.hash:
82
+ self.set_hash(
83
+ hashes.sha256(
84
+ self.filename,
85
+ "/".join(["lora", self.name]),
86
+ use_addnet_hash=self.is_safetensors,
87
+ )
88
+ or ""
89
+ )
90
+
91
+ def get_alias(self):
92
+ import networks
93
+
94
+ if shared.opts.lora_preferred_name == "Filename" or self.alias.lower() in networks.forbidden_network_aliases:
95
+ return self.name
96
+ else:
97
+ return self.alias
98
+
99
+
100
+ class Network: # LoraModule
101
+ def __init__(self, name, network_on_disk: NetworkOnDisk):
102
+ self.name = name
103
+ self.network_on_disk = network_on_disk
104
+ self.te_multiplier = 1.0
105
+ self.unet_multiplier = 1.0
106
+ self.dyn_dim = None
107
+ self.modules = {}
108
+ self.bundle_embeddings = {}
109
+ self.mtime = None
110
+
111
+ self.mentioned_name = None
112
+ """the text that was used to add the network to prompt - can be either name or an alias"""
113
+
114
+
115
+ class ModuleType:
116
+ def create_module(self, net: Network, weights: NetworkWeights) -> Network | None:
117
+ return None
118
+
119
+
120
+ class NetworkModule:
121
+ def __init__(self, net: Network, weights: NetworkWeights):
122
+ self.network = net
123
+ self.network_key = weights.network_key
124
+ self.sd_key = weights.sd_key
125
+ self.sd_module = weights.sd_module
126
+
127
+ if hasattr(self.sd_module, "weight"):
128
+ self.shape = self.sd_module.weight.shape
129
+
130
+ self.ops = None
131
+ self.extra_kwargs = {}
132
+ if isinstance(self.sd_module, nn.Conv2d):
133
+ self.ops = F.conv2d
134
+ self.extra_kwargs = {
135
+ "stride": self.sd_module.stride,
136
+ "padding": self.sd_module.padding,
137
+ }
138
+ elif isinstance(self.sd_module, nn.Linear):
139
+ self.ops = F.linear
140
+ elif isinstance(self.sd_module, nn.LayerNorm):
141
+ self.ops = F.layer_norm
142
+ self.extra_kwargs = {
143
+ "normalized_shape": self.sd_module.normalized_shape,
144
+ "eps": self.sd_module.eps,
145
+ }
146
+ elif isinstance(self.sd_module, nn.GroupNorm):
147
+ self.ops = F.group_norm
148
+ self.extra_kwargs = {
149
+ "num_groups": self.sd_module.num_groups,
150
+ "eps": self.sd_module.eps,
151
+ }
152
+
153
+ self.dim = None
154
+ self.bias = weights.w.get("bias")
155
+ self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None
156
+ self.scale = weights.w["scale"].item() if "scale" in weights.w else None
157
+
158
+ def multiplier(self):
159
+ if "transformer" in self.sd_key[:20]:
160
+ return self.network.te_multiplier
161
+ else:
162
+ return self.network.unet_multiplier
163
+
164
+ def calc_scale(self):
165
+ if self.scale is not None:
166
+ return self.scale
167
+ if self.dim is not None and self.alpha is not None:
168
+ return self.alpha / self.dim
169
+
170
+ return 1.0
171
+
172
+ def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None):
173
+ if self.bias is not None:
174
+ updown = updown.reshape(self.bias.shape)
175
+ updown += self.bias.to(orig_weight.device, dtype=updown.dtype)
176
+ updown = updown.reshape(output_shape)
177
+
178
+ if len(output_shape) == 4:
179
+ updown = updown.reshape(output_shape)
180
+
181
+ if orig_weight.size().numel() == updown.size().numel():
182
+ updown = updown.reshape(orig_weight.shape)
183
+
184
+ if ex_bias is not None:
185
+ ex_bias = ex_bias * self.multiplier()
186
+
187
+ return updown * self.calc_scale() * self.multiplier(), ex_bias
188
+
189
+ def calc_updown(self, target):
190
+ raise NotImplementedError
191
+
192
+ def forward(self, x, y):
193
+ """A general forward implementation for all modules"""
194
+ if self.ops is None:
195
+ raise NotImplementedError
196
+
197
+ updown, ex_bias = self.calc_updown(self.sd_module.weight)
198
+ return y + self.ops(x, weight=updown, bias=ex_bias, **self.extra_kwargs)
extensions-builtin/Lora/networks.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import os.path
3
+ import re
4
+
5
+ from ldm_patched.modules.sd import load_lora_for_models
6
+ from ldm_patched.modules.utils import load_torch_file
7
+ from modules import errors, scripts, sd_models, shared
8
+
9
+ import network
10
+
11
+
12
+ @functools.lru_cache(maxsize=4, typed=False)
13
+ def load_lora_state_dict(filename):
14
+ return load_torch_file(filename, safe_load=True)
15
+
16
+
17
+ def load_network(name, network_on_disk):
18
+ net = network.Network(name, network_on_disk)
19
+ net.mtime = os.path.getmtime(network_on_disk.filename)
20
+ return net
21
+
22
+
23
+ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None):
24
+ global lora_state_dict_cache
25
+
26
+ current_sd = sd_models.model_data.get_sd_model()
27
+ if current_sd is None:
28
+ return
29
+
30
+ loaded_networks.clear()
31
+
32
+ networks_on_disk = [(available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None)) for name in names]
33
+ assert not any(x is None for x in networks_on_disk)
34
+
35
+ for network_on_disk, name in zip(networks_on_disk, names):
36
+ try:
37
+ net = load_network(name, network_on_disk)
38
+ except Exception as e:
39
+ errors.display(e, f"loading network {network_on_disk.filename}")
40
+ continue
41
+ net.mentioned_name = name
42
+ network_on_disk.read_hash()
43
+ loaded_networks.append(net)
44
+
45
+ compiled_lora_targets = []
46
+ for a, b, c in zip(networks_on_disk, unet_multipliers, te_multipliers):
47
+ compiled_lora_targets.append([a.filename, b, c])
48
+
49
+ compiled_lora_targets_hash = str(compiled_lora_targets)
50
+
51
+ if current_sd.current_lora_hash == compiled_lora_targets_hash:
52
+ return
53
+
54
+ current_sd.current_lora_hash = compiled_lora_targets_hash
55
+ current_sd.forge_objects.unet = current_sd.forge_objects_original.unet
56
+ current_sd.forge_objects.clip = current_sd.forge_objects_original.clip
57
+
58
+ for filename, strength_model, strength_clip in compiled_lora_targets:
59
+ lora_sd = load_lora_state_dict(filename)
60
+ current_sd.forge_objects.unet, current_sd.forge_objects.clip = load_lora_for_models(
61
+ current_sd.forge_objects.unet,
62
+ current_sd.forge_objects.clip,
63
+ lora_sd,
64
+ strength_model,
65
+ strength_clip,
66
+ filename,
67
+ )
68
+
69
+ current_sd.forge_objects_after_applying_lora = current_sd.forge_objects.shallow_copy()
70
+
71
+
72
+ def list_available_networks():
73
+ available_networks.clear()
74
+ available_network_aliases.clear()
75
+ available_network_hash_lookup.clear()
76
+ forbidden_network_aliases.clear()
77
+ forbidden_network_aliases.update({"none": 1, "Addams": 1})
78
+
79
+ candidates = list(
80
+ shared.walk_files(
81
+ shared.cmd_opts.lora_dir,
82
+ allowed_extensions=[".pt", ".ckpt", ".safetensors"],
83
+ )
84
+ )
85
+
86
+ for filename in candidates:
87
+ if os.path.isdir(filename):
88
+ continue
89
+
90
+ name = os.path.splitext(os.path.basename(filename))[0]
91
+ try:
92
+ entry = network.NetworkOnDisk(name, filename)
93
+ except OSError: # should catch FileNotFoundError and PermissionError, etc.
94
+ errors.report(f"Failed to load network {name} from {filename}", exc_info=True)
95
+ continue
96
+
97
+ available_networks[name] = entry
98
+
99
+ if entry.alias in available_network_aliases:
100
+ forbidden_network_aliases[entry.alias.lower()] = 1
101
+
102
+ available_network_aliases[name] = entry
103
+ available_network_aliases[entry.alias] = entry
104
+
105
+
106
+ re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")
107
+
108
+
109
+ def infotext_pasted(infotext, params):
110
+ if "AddNet Module 1" in [x[1] for x in scripts.scripts_txt2img.infotext_fields]:
111
+ return # if the other extension is active, it will handle those fields, no need to do anything
112
+
113
+ added = []
114
+
115
+ for k in params:
116
+ if not k.startswith("AddNet Model "):
117
+ continue
118
+
119
+ num = k[13:]
120
+
121
+ if params.get("AddNet Module " + num) != "LoRA":
122
+ continue
123
+
124
+ name = params.get("AddNet Model " + num)
125
+ if name is None:
126
+ continue
127
+
128
+ m = re_network_name.match(name)
129
+ if m:
130
+ name = m.group(1)
131
+
132
+ multiplier = params.get("AddNet Weight A " + num, "1.0")
133
+
134
+ added.append(f"<lora:{name}:{multiplier}>")
135
+
136
+ if added:
137
+ params["Prompt"] += "\n" + "".join(added)
138
+
139
+
140
+ extra_network_lora = None
141
+
142
+ available_networks = {}
143
+ available_network_aliases = {}
144
+ loaded_networks = []
145
+ loaded_bundle_embeddings = {}
146
+ networks_in_memory = {}
147
+ available_network_hash_lookup = {}
148
+ forbidden_network_aliases = {}
149
+
150
+ list_available_networks()
extensions-builtin/Lora/preload.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from os.path import join
2
+
3
+ from modules import paths
4
+ from modules.paths_internal import normalized_filepath
5
+
6
+
7
+ def preload(parser):
8
+ parser.add_argument(
9
+ "--lora-dir",
10
+ type=normalized_filepath,
11
+ help="Path to directory with LoRA networks",
12
+ default=join(paths.models_path, "Lora"),
13
+ )
extensions-builtin/Lora/scripts/lora_script.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ import gradio as gr
4
+ from fastapi import FastAPI
5
+ from modules import extra_networks, script_callbacks, shared, ui_extra_networks
6
+
7
+ import extra_networks_lora
8
+ import lora # noqa: F401
9
+ import network
10
+ import networks
11
+ import ui_extra_networks_lora
12
+
13
+ shared.options_templates.update(
14
+ shared.options_section(
15
+ ("extra_networks", "Extra Networks"),
16
+ {
17
+ "sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks),
18
+ "lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
19
+ "lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
20
+ "lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
21
+ "lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
22
+ },
23
+ )
24
+ )
25
+
26
+
27
+ if shared.cmd_opts.api:
28
+
29
+ def create_lora_json(obj: network.NetworkOnDisk):
30
+ return {
31
+ "name": obj.name,
32
+ "alias": obj.alias,
33
+ "path": obj.filename,
34
+ "metadata": obj.metadata,
35
+ }
36
+
37
+ def api_networks(_: gr.Blocks, app: FastAPI):
38
+ @app.get("/sdapi/v1/loras")
39
+ async def get_loras():
40
+ return [create_lora_json(obj) for obj in networks.available_networks.values()]
41
+
42
+ @app.post("/sdapi/v1/refresh-loras")
43
+ async def refresh_loras():
44
+ return networks.list_available_networks()
45
+
46
+ script_callbacks.on_app_started(api_networks)
47
+
48
+
49
+ re_lora = re.compile("<lora:([^:]+):")
50
+
51
+
52
+ def infotext_pasted(infotext, d):
53
+ hashes = d.get("Lora hashes")
54
+ if not hashes:
55
+ return
56
+
57
+ hashes = [x.strip().split(":", 1) for x in hashes.split(",")]
58
+ hashes = {x[0].strip().replace(",", ""): x[1].strip() for x in hashes}
59
+
60
+ def network_replacement(m):
61
+ alias = m.group(1)
62
+ shorthash = hashes.get(alias)
63
+ if shorthash is None:
64
+ return m.group(0)
65
+
66
+ network_on_disk = networks.available_network_hash_lookup.get(shorthash)
67
+ if network_on_disk is None:
68
+ return m.group(0)
69
+
70
+ return f"<lora:{network_on_disk.get_alias()}:"
71
+
72
+ d["Prompt"] = re.sub(re_lora, network_replacement, d["Prompt"])
73
+
74
+
75
+ def before_ui():
76
+ ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora())
77
+ networks.extra_network_lora = extra_networks_lora.ExtraNetworkLora()
78
+ extra_networks.register_extra_network(networks.extra_network_lora)
79
+
80
+
81
+ script_callbacks.on_before_ui(before_ui)
82
+ script_callbacks.on_infotext_pasted(networks.infotext_pasted)
83
+ script_callbacks.on_infotext_pasted(infotext_pasted)
extensions-builtin/Lora/ui_edit_user_metadata.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import html
3
+ import re
4
+
5
+ import gradio as gr
6
+ from modules.ui_extra_networks_user_metadata import UserMetadataEditor
7
+
8
+ re_word = re.compile(r"[-_\w']+")
9
+ re_comma = re.compile(r" *, *")
10
+
11
+
12
+ def is_non_comma_tagset(tags):
13
+ average_tag_length = sum(len(x) for x in tags.keys()) / len(tags)
14
+ return average_tag_length >= 16
15
+
16
+
17
+ def build_tags(metadata):
18
+ tags = {}
19
+
20
+ for _, tags_dict in metadata.get("ss_tag_frequency", {}).items():
21
+ for tag, tag_count in tags_dict.items():
22
+ tag = tag.strip()
23
+ tags[tag] = tags.get(tag, 0) + int(tag_count)
24
+
25
+ if tags and is_non_comma_tagset(tags):
26
+ new_tags = {}
27
+
28
+ for text, text_count in tags.items():
29
+ for word in re.findall(re_word, text):
30
+ if len(word) < 3:
31
+ continue
32
+
33
+ new_tags[word] = new_tags.get(word, 0) + text_count
34
+
35
+ tags = new_tags
36
+
37
+ ordered_tags = sorted(tags.keys(), key=tags.get, reverse=True)
38
+ return [(tag, tags[tag]) for tag in ordered_tags]
39
+
40
+
41
+ class LoraUserMetadataEditor(UserMetadataEditor):
42
+ def __init__(self, ui, tabname, page):
43
+ super().__init__(ui, tabname, page)
44
+
45
+ self.select_sd_version = None
46
+
47
+ self.taginfo = None
48
+ self.edit_activation_text = None
49
+ self.slider_preferred_weight = None
50
+ self.edit_notes = None
51
+
52
+ def save_lora_user_metadata(
53
+ self,
54
+ name,
55
+ desc,
56
+ sd_version,
57
+ activation_text,
58
+ preferred_weight,
59
+ negative_text,
60
+ notes,
61
+ ):
62
+ user_metadata = self.get_user_metadata(name)
63
+ user_metadata["description"] = desc
64
+ user_metadata["sd version"] = sd_version
65
+ user_metadata["activation text"] = activation_text
66
+ user_metadata["preferred weight"] = preferred_weight
67
+ user_metadata["negative text"] = negative_text
68
+ user_metadata["notes"] = notes
69
+
70
+ self.write_user_metadata(name, user_metadata)
71
+
72
+ def get_metadata_table(self, name):
73
+ table = super().get_metadata_table(name)
74
+ item = self.page.items.get(name, {})
75
+ metadata = item.get("metadata") or {}
76
+
77
+ keys = {
78
+ "ss_output_name": "Output name:",
79
+ "ss_sd_model_name": "Model:",
80
+ "ss_clip_skip": "Clip skip:",
81
+ "ss_network_module": "Kohya module:",
82
+ }
83
+
84
+ for key, label in keys.items():
85
+ value = metadata.get(key, None)
86
+ if value is not None and str(value) != "None":
87
+ table.append((label, html.escape(value)))
88
+
89
+ ss_training_started_at = metadata.get("ss_training_started_at")
90
+ if ss_training_started_at:
91
+ table.append(
92
+ (
93
+ "Date trained:",
94
+ datetime.datetime.fromtimestamp(float(ss_training_started_at)).strftime("%Y-%m-%d %H:%M"),
95
+ )
96
+ )
97
+
98
+ ss_bucket_info = metadata.get("ss_bucket_info")
99
+ if ss_bucket_info and "buckets" in ss_bucket_info:
100
+ resolutions = {}
101
+ for _, bucket in ss_bucket_info["buckets"].items():
102
+ resolution = bucket["resolution"]
103
+ resolution = f"{resolution[1]}x{resolution[0]}"
104
+ resolutions[resolution] = resolutions.get(resolution, 0) + int(bucket["count"])
105
+
106
+ resolutions_list = sorted(resolutions.keys(), key=resolutions.get, reverse=True)
107
+ resolutions_text = html.escape(", ".join(resolutions_list[0:4]))
108
+ if len(resolutions) > 4:
109
+ resolutions_text += ", ..."
110
+ resolutions_text = f"<span title='{html.escape(', '.join(resolutions_list))}'>{resolutions_text}</span>"
111
+
112
+ table.append(
113
+ (
114
+ "Resolutions:" if len(resolutions_list) > 1 else "Resolution:",
115
+ resolutions_text,
116
+ )
117
+ )
118
+
119
+ image_count = 0
120
+ for _, params in metadata.get("ss_dataset_dirs", {}).items():
121
+ image_count += int(params.get("img_count", 0))
122
+
123
+ if image_count:
124
+ table.append(("Dataset size:", image_count))
125
+
126
+ return table
127
+
128
+ def put_values_into_components(self, name):
129
+ user_metadata = self.get_user_metadata(name)
130
+ values = super().put_values_into_components(name)
131
+
132
+ item = self.page.items.get(name, {})
133
+ metadata = item.get("metadata") or {}
134
+
135
+ tags = build_tags(metadata)
136
+ gradio_tags = [(tag, str(count)) for tag, count in tags[0:24]]
137
+
138
+ return [
139
+ *values[0:5],
140
+ gr.update(value=item.get("sd_version", "Unknown")),
141
+ gr.HighlightedText.update(value=gradio_tags, visible=True if tags else False),
142
+ user_metadata.get("activation text", ""),
143
+ float(user_metadata.get("preferred weight", 0.0)),
144
+ user_metadata.get("negative text", ""),
145
+ ]
146
+
147
+ def create_extra_default_items_in_left_column(self):
148
+ self.select_sd_version = gr.Dropdown(
149
+ choices=("SD1", "SD2", "SDXL", "Unknown"),
150
+ value="Unknown",
151
+ label="Stable Diffusion Version",
152
+ interactive=True,
153
+ )
154
+
155
+ def create_editor(self):
156
+ self.create_default_editor_elems()
157
+
158
+ self.taginfo = gr.HighlightedText(label="Training dataset tags")
159
+ self.edit_activation_text = gr.Text(label="Activation text", info="Will be added to prompt along with Lora")
160
+ self.edit_negative_text = gr.Text(label="Negative prompt", info="Will be added to negative prompts")
161
+ self.slider_preferred_weight = gr.Slider(
162
+ label="Preferred weight",
163
+ info="Set to 0 to use the default set in Settings",
164
+ minimum=0.0,
165
+ maximum=2.0,
166
+ step=0.1,
167
+ )
168
+ self.edit_notes = gr.TextArea(label="Notes", lines=4)
169
+
170
+ def select_tag(activation_text, evt: gr.SelectData):
171
+ tag = evt.value[0]
172
+
173
+ words = re.split(re_comma, activation_text)
174
+ if tag in words:
175
+ words = [x for x in words if x != tag and x.strip()]
176
+ return ", ".join(words)
177
+
178
+ return activation_text + ", " + tag if activation_text else tag
179
+
180
+ self.taginfo.select(
181
+ fn=select_tag,
182
+ inputs=[self.edit_activation_text],
183
+ outputs=[self.edit_activation_text],
184
+ show_progress=False,
185
+ )
186
+
187
+ self.create_default_buttons()
188
+
189
+ viewed_components = [
190
+ self.edit_name,
191
+ self.edit_description,
192
+ self.html_filedata,
193
+ self.html_preview,
194
+ self.edit_notes,
195
+ self.select_sd_version,
196
+ self.taginfo,
197
+ self.edit_activation_text,
198
+ self.slider_preferred_weight,
199
+ self.edit_negative_text,
200
+ ]
201
+
202
+ self.button_edit.click(
203
+ fn=self.put_values_into_components,
204
+ inputs=[self.edit_name_input],
205
+ outputs=viewed_components,
206
+ ).then(fn=lambda: gr.update(visible=True), inputs=[], outputs=[self.box])
207
+
208
+ edited_components = [
209
+ self.edit_description,
210
+ self.select_sd_version,
211
+ self.edit_activation_text,
212
+ self.slider_preferred_weight,
213
+ self.edit_negative_text,
214
+ self.edit_notes,
215
+ ]
216
+
217
+ self.setup_save_handler(self.button_save, self.save_lora_user_metadata, edited_components)
extensions-builtin/Lora/ui_extra_networks_lora.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from os import makedirs
2
+ from os.path import splitext
3
+
4
+ from modules import shared
5
+ from modules.ui_extra_networks import ExtraNetworksPage, quote_js
6
+
7
+ import network
8
+ import networks
9
+ from ui_edit_user_metadata import LoraUserMetadataEditor
10
+
11
+
12
+ class ExtraNetworksPageLora(ExtraNetworksPage):
13
+ def __init__(self):
14
+ super().__init__("Lora")
15
+ makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
16
+
17
+ def refresh(self):
18
+ networks.list_available_networks()
19
+
20
+ def create_item(self, name, index=None, enable_filter=True):
21
+ lora_on_disk = networks.available_networks.get(name)
22
+ if lora_on_disk is None:
23
+ return
24
+
25
+ path = splitext(lora_on_disk.filename)[0]
26
+ search_terms = [self.search_terms_from_path(lora_on_disk.filename)]
27
+ if lora_on_disk.hash:
28
+ search_terms.append(lora_on_disk.hash)
29
+
30
+ item = {
31
+ "name": name,
32
+ "filename": lora_on_disk.filename,
33
+ "shorthash": lora_on_disk.shorthash,
34
+ "preview": self.find_preview(path),
35
+ "description": self.find_description(path),
36
+ "search_terms": search_terms,
37
+ "local_preview": f"{path}.{shared.opts.samples_format}",
38
+ "metadata": lora_on_disk.metadata,
39
+ "sort_keys": {
40
+ "default": index,
41
+ **self.get_sort_keys(lora_on_disk.filename),
42
+ },
43
+ "sd_version": lora_on_disk.sd_version.name,
44
+ }
45
+
46
+ self.read_user_metadata(item)
47
+
48
+ sd_version = item["user_metadata"].get("sd version")
49
+ if sd_version in network.SDVersion.__members__:
50
+ item["sd_version"] = sd_version
51
+ sd_version = network.SDVersion[sd_version]
52
+ else:
53
+ sd_version = lora_on_disk.sd_version
54
+
55
+ if enable_filter and not shared.opts.lora_show_all:
56
+ if sd_version is network.SDVersion.Unknown:
57
+ model_version = network.SDVersion.SDXL if shared.sd_model.is_sdxl else network.SDVersion.SD1
58
+ if model_version.name in shared.opts.lora_hide_unknown_for_versions:
59
+ return None
60
+ elif shared.sd_model.is_sdxl and sd_version != network.SDVersion.SDXL:
61
+ return None
62
+ elif shared.sd_model.is_sd2 and sd_version != network.SDVersion.SD2:
63
+ return None
64
+ elif shared.sd_model.is_sd1 and sd_version != network.SDVersion.SD1:
65
+ return None
66
+
67
+ alias = lora_on_disk.get_alias()
68
+ activation_text = item["user_metadata"].get("activation text")
69
+ preferred_weight = item["user_metadata"].get("preferred weight", 0.0)
70
+ item["prompt"] = quote_js(f"<lora:{alias}:") + " + " + (str(preferred_weight) if preferred_weight else "opts.extra_networks_default_multiplier") + " + " + quote_js(">")
71
+
72
+ if activation_text:
73
+ item["prompt"] += " + " + quote_js(" " + activation_text)
74
+
75
+ negative_prompt = item["user_metadata"].get("negative text")
76
+ item["negative_prompt"] = quote_js("")
77
+ if negative_prompt:
78
+ item["negative_prompt"] = quote_js("(" + negative_prompt + ":1)")
79
+
80
+ return item
81
+
82
+ def list_items(self):
83
+ """instantiate a list to protect against concurrent modification"""
84
+ names = list(networks.available_networks)
85
+ for index, name in enumerate(names):
86
+ item = self.create_item(name, index)
87
+ if item is not None:
88
+ yield item
89
+
90
+ def allowed_directories_for_previews(self):
91
+ return [shared.cmd_opts.lora_dir]
92
+
93
+ def create_user_metadata_editor(self, ui, tabname):
94
+ return LoraUserMetadataEditor(ui, tabname, self)
extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js ADDED
@@ -0,0 +1,940 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ (function () {
2
+
3
+ onUiLoaded(async () => {
4
+ const elementIDs = {
5
+ img2imgTabs: "#mode_img2img .tab-nav",
6
+ inpaint: "#img2maskimg",
7
+ inpaintSketch: "#inpaint_sketch",
8
+ rangeGroup: "#img2img_column_size",
9
+ sketch: "#img2img_sketch"
10
+ };
11
+
12
+ const tabNameToElementId = {
13
+ "Inpaint sketch": elementIDs.inpaintSketch,
14
+ "Inpaint": elementIDs.inpaint,
15
+ "Sketch": elementIDs.sketch
16
+ };
17
+
18
+ /** Waits for an element to be present in the DOM */
19
+ const waitForElement = (id) => new Promise(resolve => {
20
+ const checkForElement = () => {
21
+ const element = document.querySelector(id);
22
+ if (element) return resolve(element);
23
+ setTimeout(checkForElement, 100);
24
+ };
25
+ checkForElement();
26
+ });
27
+
28
+ function getActiveTab(elements, all = false) {
29
+ const tabs = elements.img2imgTabs.querySelectorAll("button");
30
+ if (all) return tabs;
31
+
32
+ for (let tab of tabs) {
33
+ if (tab.classList.contains("selected"))
34
+ return tab;
35
+ }
36
+ }
37
+
38
+ // Get tab ID
39
+ function getTabId(elements) {
40
+ const activeTab = getActiveTab(elements);
41
+ return tabNameToElementId[activeTab.innerText];
42
+ }
43
+
44
+ // Wait until opts loaded
45
+ async function waitForOpts() {
46
+ for (; ;) {
47
+ if (window.opts && Object.keys(window.opts).length) {
48
+ return window.opts;
49
+ }
50
+ await new Promise(resolve => setTimeout(resolve, 100));
51
+ }
52
+ }
53
+
54
+ // Detect whether the element has a horizontal scroll bar
55
+ function hasHorizontalScrollbar(element) {
56
+ return element.scrollWidth > element.clientWidth;
57
+ }
58
+
59
+ // Function for defining the "Ctrl", "Shift" and "Alt" keys
60
+ function isModifierKey(event, key) {
61
+ switch (key) {
62
+ case "Ctrl":
63
+ return event.ctrlKey;
64
+ case "Shift":
65
+ return event.shiftKey;
66
+ case "Alt":
67
+ return event.altKey;
68
+ default:
69
+ return false;
70
+ }
71
+ }
72
+
73
+ // Check if hotkey is valid
74
+ function isValidHotkey(value) {
75
+ const specialKeys = ["Ctrl", "Alt", "Shift", "Disable"];
76
+ return (
77
+ (typeof value === "string" &&
78
+ value.length === 1 &&
79
+ /[a-z]/i.test(value)) ||
80
+ specialKeys.includes(value)
81
+ );
82
+ }
83
+
84
+ // Normalize hotkey
85
+ function normalizeHotkey(hotkey) {
86
+ return hotkey.length === 1 ? "Key" + hotkey.toUpperCase() : hotkey;
87
+ }
88
+
89
+ // Format hotkey for display
90
+ function formatHotkeyForDisplay(hotkey) {
91
+ return hotkey.startsWith("Key") ? hotkey.slice(3) : hotkey;
92
+ }
93
+
94
+ // Create hotkey configuration with the provided options
95
+ function createHotkeyConfig(defaultHotkeysConfig, hotkeysConfigOpts) {
96
+ const result = {}; // Resulting hotkey configuration
97
+ const usedKeys = new Set(); // Set of used hotkeys
98
+
99
+ // Iterate through defaultHotkeysConfig keys
100
+ for (const key in defaultHotkeysConfig) {
101
+ const userValue = hotkeysConfigOpts[key]; // User-provided hotkey value
102
+ const defaultValue = defaultHotkeysConfig[key]; // Default hotkey value
103
+
104
+ // Apply appropriate value for undefined, boolean, or object userValue
105
+ if (
106
+ userValue === undefined ||
107
+ typeof userValue === "boolean" ||
108
+ typeof userValue === "object" ||
109
+ userValue === "disable"
110
+ ) {
111
+ result[key] =
112
+ userValue === undefined ? defaultValue : userValue;
113
+ } else if (isValidHotkey(userValue)) {
114
+ const normalizedUserValue = normalizeHotkey(userValue);
115
+
116
+ // Check for conflicting hotkeys
117
+ if (!usedKeys.has(normalizedUserValue)) {
118
+ usedKeys.add(normalizedUserValue);
119
+ result[key] = normalizedUserValue;
120
+ } else {
121
+ console.error(
122
+ `Hotkey: ${formatHotkeyForDisplay(
123
+ userValue
124
+ )} for ${key} is repeated and conflicts with another hotkey. The default hotkey is used: ${formatHotkeyForDisplay(
125
+ defaultValue
126
+ )}`
127
+ );
128
+ result[key] = defaultValue;
129
+ }
130
+ } else {
131
+ console.error(
132
+ `Hotkey: ${formatHotkeyForDisplay(
133
+ userValue
134
+ )} for ${key} is not valid. The default hotkey is used: ${formatHotkeyForDisplay(
135
+ defaultValue
136
+ )}`
137
+ );
138
+ result[key] = defaultValue;
139
+ }
140
+ }
141
+
142
+ return result;
143
+ }
144
+
145
+ // Disables functions in the config object based on the provided list of function names
146
+ function disableFunctions(config, disabledFunctions) {
147
+ // Bind the hasOwnProperty method to the functionMap object to avoid errors
148
+ const hasOwnProperty =
149
+ Object.prototype.hasOwnProperty.bind(functionMap);
150
+
151
+ // Loop through the disabledFunctions array and disable the corresponding functions in the config object
152
+ disabledFunctions.forEach(funcName => {
153
+ if (hasOwnProperty(funcName)) {
154
+ const key = functionMap[funcName];
155
+ config[key] = "disable";
156
+ }
157
+ });
158
+
159
+ // Return the updated config object
160
+ return config;
161
+ }
162
+
163
+ /**
164
+ * The restoreImgRedMask function displays a red mask around an image to indicate the aspect ratio.
165
+ * If the image display property is set to 'none', the mask breaks. To fix this, the function
166
+ * temporarily sets the display property to 'block' and then hides the mask again after 300 milliseconds
167
+ * to avoid breaking the canvas. Additionally, the function adjusts the mask to work correctly on
168
+ * very long images.
169
+ */
170
+ function restoreImgRedMask(elements) {
171
+ const mainTabId = getTabId(elements);
172
+
173
+ if (!mainTabId) return;
174
+
175
+ const mainTab = gradioApp().querySelector(mainTabId);
176
+ const img = mainTab.querySelector("img");
177
+ const imageARPreview = gradioApp().querySelector("#imageARPreview");
178
+
179
+ if (!img || !imageARPreview) return;
180
+
181
+ imageARPreview.style.transform = "";
182
+ if (parseFloat(mainTab.style.width) > 865) {
183
+ const transformString = mainTab.style.transform;
184
+ const scaleMatch = transformString.match(
185
+ /scale\(([-+]?[0-9]*\.?[0-9]+)\)/
186
+ );
187
+ let zoom = 1; // default zoom
188
+
189
+ if (scaleMatch && scaleMatch[1]) {
190
+ zoom = Number(scaleMatch[1]);
191
+ }
192
+
193
+ imageARPreview.style.transformOrigin = "0 0";
194
+ imageARPreview.style.transform = `scale(${zoom})`;
195
+ }
196
+
197
+ if (img.style.display !== "none") return;
198
+
199
+ img.style.display = "block";
200
+
201
+ setTimeout(() => {
202
+ img.style.display = "none";
203
+ }, 400);
204
+ }
205
+
206
+ const hotkeysConfigOpts = await waitForOpts();
207
+
208
+ // Default config
209
+ const defaultHotkeysConfig = {
210
+ canvas_hotkey_zoom: "Alt",
211
+ canvas_hotkey_adjust: "Ctrl",
212
+ canvas_hotkey_reset: "KeyR",
213
+ canvas_hotkey_fullscreen: "KeyS",
214
+ canvas_hotkey_move: "KeyF",
215
+ canvas_hotkey_overlap: "KeyO",
216
+ canvas_hotkey_shrink_brush: "KeyQ",
217
+ canvas_hotkey_grow_brush: "KeyW",
218
+ canvas_disabled_functions: [],
219
+ canvas_show_tooltip: true,
220
+ canvas_auto_expand: true,
221
+ canvas_blur_prompt: false,
222
+ };
223
+
224
+ const functionMap = {
225
+ "Zoom": "canvas_hotkey_zoom",
226
+ "Adjust brush size": "canvas_hotkey_adjust",
227
+ "Hotkey shrink brush": "canvas_hotkey_shrink_brush",
228
+ "Hotkey enlarge brush": "canvas_hotkey_grow_brush",
229
+ "Moving canvas": "canvas_hotkey_move",
230
+ "Fullscreen": "canvas_hotkey_fullscreen",
231
+ "Reset Zoom": "canvas_hotkey_reset",
232
+ "Overlap": "canvas_hotkey_overlap"
233
+ };
234
+
235
+ // Loading the configuration from opts
236
+ const preHotkeysConfig = createHotkeyConfig(
237
+ defaultHotkeysConfig,
238
+ hotkeysConfigOpts
239
+ );
240
+
241
+ // Disable functions that are not needed by the user
242
+ const hotkeysConfig = disableFunctions(
243
+ preHotkeysConfig,
244
+ preHotkeysConfig.canvas_disabled_functions
245
+ );
246
+
247
+ let isMoving = false;
248
+ let mouseX, mouseY;
249
+ let activeElement;
250
+
251
+ const elements = Object.fromEntries(
252
+ Object.keys(elementIDs).map(id => [
253
+ id,
254
+ gradioApp().querySelector(elementIDs[id])
255
+ ])
256
+ );
257
+ const elemData = {};
258
+
259
+ // Apply functionality to the range inputs. Restore redmask and correct for long images.
260
+ const rangeInputs = elements.rangeGroup ?
261
+ Array.from(elements.rangeGroup.querySelectorAll("input")) :
262
+ [
263
+ gradioApp().querySelector("#img2img_width input[type='range']"),
264
+ gradioApp().querySelector("#img2img_height input[type='range']")
265
+ ];
266
+
267
+ for (const input of rangeInputs) {
268
+ input?.addEventListener("input", () => restoreImgRedMask(elements));
269
+ }
270
+
271
+ function applyZoomAndPan(elemId, isExtension = true) {
272
+ const targetElement = gradioApp().querySelector(elemId);
273
+
274
+ if (!targetElement) {
275
+ console.log("Element not found");
276
+ return;
277
+ }
278
+
279
+ targetElement.style.transformOrigin = "0 0";
280
+
281
+ elemData[elemId] = {
282
+ zoom: 1,
283
+ panX: 0,
284
+ panY: 0
285
+ };
286
+ let fullScreenMode = false;
287
+
288
+ // Create tooltip
289
+ function createTooltip() {
290
+ const toolTipElement =
291
+ targetElement.querySelector(".image-container");
292
+ const tooltip = document.createElement("div");
293
+ tooltip.className = "canvas-tooltip";
294
+
295
+ // Creating an item of information
296
+ const info = document.createElement("i");
297
+ info.className = "canvas-tooltip-info";
298
+ info.textContent = "";
299
+
300
+ // Create a container for the contents of the tooltip
301
+ const tooltipContent = document.createElement("div");
302
+ tooltipContent.className = "canvas-tooltip-content";
303
+
304
+ // Define an array with hotkey information and their actions
305
+ const hotkeysInfo = [
306
+ {
307
+ configKey: "canvas_hotkey_zoom",
308
+ action: "Zoom canvas",
309
+ keySuffix: " + wheel"
310
+ },
311
+ {
312
+ configKey: "canvas_hotkey_adjust",
313
+ action: "Adjust brush size",
314
+ keySuffix: " + wheel"
315
+ },
316
+ { configKey: "canvas_hotkey_reset", action: "Reset zoom" },
317
+ {
318
+ configKey: "canvas_hotkey_fullscreen",
319
+ action: "Fullscreen mode"
320
+ },
321
+ { configKey: "canvas_hotkey_move", action: "Move canvas" },
322
+ { configKey: "canvas_hotkey_overlap", action: "Overlap" }
323
+ ];
324
+
325
+ // Create hotkeys array with disabled property based on the config values
326
+ const hotkeys = hotkeysInfo.map(info => {
327
+ const configValue = hotkeysConfig[info.configKey];
328
+ const key = info.keySuffix ?
329
+ `${configValue}${info.keySuffix}` :
330
+ configValue.charAt(configValue.length - 1);
331
+ return {
332
+ key,
333
+ action: info.action,
334
+ disabled: configValue === "disable"
335
+ };
336
+ });
337
+
338
+ for (const hotkey of hotkeys) {
339
+ if (hotkey.disabled) {
340
+ continue;
341
+ }
342
+
343
+ const p = document.createElement("p");
344
+ p.innerHTML = `<b>${hotkey.key}</b> - ${hotkey.action}`;
345
+ tooltipContent.appendChild(p);
346
+ }
347
+
348
+ // Add information and content elements to the tooltip element
349
+ tooltip.appendChild(info);
350
+ tooltip.appendChild(tooltipContent);
351
+
352
+ // Add a hint element to the target element
353
+ toolTipElement.appendChild(tooltip);
354
+ }
355
+
356
+ //Show tool tip if setting enable
357
+ if (hotkeysConfig.canvas_show_tooltip) {
358
+ createTooltip();
359
+ }
360
+
361
+ // In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui.
362
+ function fixCanvas() {
363
+ const activeTab = getActiveTab(elements).textContent.trim();
364
+
365
+ if (activeTab !== "img2img") {
366
+ const img = targetElement.querySelector(`${elemId} img`);
367
+
368
+ if (img && img.style.display !== "none") {
369
+ img.style.display = "none";
370
+ img.style.visibility = "hidden";
371
+ }
372
+ }
373
+ }
374
+
375
+ // Reset the zoom level and pan position of the target element to their initial values
376
+ function resetZoom() {
377
+ elemData[elemId] = {
378
+ zoomLevel: 1,
379
+ panX: 0,
380
+ panY: 0
381
+ };
382
+
383
+ if (isExtension) {
384
+ targetElement.style.overflow = "hidden";
385
+ }
386
+
387
+ targetElement.isZoomed = false;
388
+
389
+ fixCanvas();
390
+ targetElement.style.transform = `scale(${elemData[elemId].zoomLevel}) translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px)`;
391
+
392
+ const canvas = gradioApp().querySelector(
393
+ `${elemId} canvas[key="interface"]`
394
+ );
395
+
396
+ toggleOverlap("off");
397
+ fullScreenMode = false;
398
+
399
+ const closeBtn = targetElement.querySelector("button[aria-label='Remove Image']");
400
+ if (closeBtn) {
401
+ closeBtn.addEventListener("click", resetZoom);
402
+ }
403
+
404
+ if (canvas && isExtension) {
405
+ const parentElement = targetElement.closest('[id^="component-"]');
406
+ if (
407
+ canvas &&
408
+ parseFloat(canvas.style.width) > parentElement.offsetWidth &&
409
+ parseFloat(targetElement.style.width) > parentElement.offsetWidth
410
+ ) {
411
+ fitToElement();
412
+ return;
413
+ }
414
+
415
+ }
416
+
417
+ if (
418
+ canvas &&
419
+ !isExtension &&
420
+ parseFloat(canvas.style.width) > 865 &&
421
+ parseFloat(targetElement.style.width) > 865
422
+ ) {
423
+ fitToElement();
424
+ return;
425
+ }
426
+
427
+ targetElement.style.width = "";
428
+ }
429
+
430
+ // Toggle the zIndex of the target element between two values, allowing it to overlap or be overlapped by other elements
431
+ function toggleOverlap(forced = "") {
432
+ const zIndex1 = "0";
433
+ const zIndex2 = "998";
434
+
435
+ targetElement.style.zIndex =
436
+ targetElement.style.zIndex !== zIndex2 ? zIndex2 : zIndex1;
437
+
438
+ if (forced === "off") {
439
+ targetElement.style.zIndex = zIndex1;
440
+ } else if (forced === "on") {
441
+ targetElement.style.zIndex = zIndex2;
442
+ }
443
+ }
444
+
445
+ // Adjust the brush size based on the deltaY value from a mouse wheel event
446
+ function adjustBrushSize(
447
+ elemId,
448
+ deltaY,
449
+ withoutValue = false,
450
+ percentage = 5
451
+ ) {
452
+ const input =
453
+ gradioApp().querySelector(
454
+ `${elemId} input[aria-label='Brush radius']`
455
+ ) ||
456
+ gradioApp().querySelector(
457
+ `${elemId} button[aria-label="Use brush"]`
458
+ );
459
+
460
+ if (input) {
461
+ input.click();
462
+ if (!withoutValue) {
463
+ const maxValue =
464
+ parseFloat(input.getAttribute("max")) || 100;
465
+ const changeAmount = maxValue * (percentage / 100);
466
+ const newValue =
467
+ parseFloat(input.value) +
468
+ (deltaY > 0 ? -changeAmount : changeAmount);
469
+ input.value = Math.min(Math.max(newValue, 0), maxValue);
470
+ input.dispatchEvent(new Event("change"));
471
+ }
472
+ }
473
+ }
474
+
475
+ // Reset zoom when uploading a new image
476
+ const fileInput = gradioApp().querySelector(
477
+ `${elemId} input[type="file"][accept="image/*"].svelte-116rqfv`
478
+ );
479
+ fileInput.addEventListener("click", resetZoom);
480
+
481
+ // Update the zoom level and pan position of the target element based on the values of the zoomLevel, panX and panY variables
482
+ function updateZoom(newZoomLevel, mouseX, mouseY) {
483
+ newZoomLevel = Math.max(0.1, Math.min(newZoomLevel, 15));
484
+
485
+ elemData[elemId].panX +=
486
+ mouseX - (mouseX * newZoomLevel) / elemData[elemId].zoomLevel;
487
+ elemData[elemId].panY +=
488
+ mouseY - (mouseY * newZoomLevel) / elemData[elemId].zoomLevel;
489
+
490
+ targetElement.style.transformOrigin = "0 0";
491
+ targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${newZoomLevel})`;
492
+
493
+ toggleOverlap("on");
494
+ if (isExtension) {
495
+ targetElement.style.overflow = "visible";
496
+ }
497
+
498
+ return newZoomLevel;
499
+ }
500
+
501
+ // Change the zoom level based on user interaction
502
+ function changeZoomLevel(operation, e) {
503
+ if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) {
504
+ e.preventDefault();
505
+
506
+ let zoomPosX, zoomPosY;
507
+ let delta = 0.2;
508
+ if (elemData[elemId].zoomLevel > 7) {
509
+ delta = 0.9;
510
+ } else if (elemData[elemId].zoomLevel > 2) {
511
+ delta = 0.6;
512
+ }
513
+
514
+ zoomPosX = e.clientX;
515
+ zoomPosY = e.clientY;
516
+
517
+ fullScreenMode = false;
518
+ elemData[elemId].zoomLevel = updateZoom(
519
+ elemData[elemId].zoomLevel +
520
+ (operation === "+" ? delta : -delta),
521
+ zoomPosX - targetElement.getBoundingClientRect().left,
522
+ zoomPosY - targetElement.getBoundingClientRect().top
523
+ );
524
+
525
+ targetElement.isZoomed = true;
526
+ }
527
+ }
528
+
529
+ /**
530
+ * This function fits the target element to the screen by calculating
531
+ * the required scale and offsets. It also updates the global variables
532
+ * zoomLevel, panX, and panY to reflect the new state.
533
+ */
534
+
535
+ function fitToElement() {
536
+ //Reset Zoom
537
+ targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
538
+
539
+ let parentElement;
540
+
541
+ if (isExtension) {
542
+ parentElement = targetElement.closest('[id^="component-"]');
543
+ } else {
544
+ parentElement = targetElement.parentElement;
545
+ }
546
+
547
+
548
+ // Get element and screen dimensions
549
+ const elementWidth = targetElement.offsetWidth;
550
+ const elementHeight = targetElement.offsetHeight;
551
+
552
+ const screenWidth = parentElement.clientWidth;
553
+ const screenHeight = parentElement.clientHeight;
554
+
555
+ // Get element's coordinates relative to the parent element
556
+ const elementRect = targetElement.getBoundingClientRect();
557
+ const parentRect = parentElement.getBoundingClientRect();
558
+ const elementX = elementRect.x - parentRect.x;
559
+
560
+ // Calculate scale and offsets
561
+ const scaleX = screenWidth / elementWidth;
562
+ const scaleY = screenHeight / elementHeight;
563
+ const scale = Math.min(scaleX, scaleY);
564
+
565
+ const transformOrigin =
566
+ window.getComputedStyle(targetElement).transformOrigin;
567
+ const [originX, originY] = transformOrigin.split(" ");
568
+ const originXValue = parseFloat(originX);
569
+ const originYValue = parseFloat(originY);
570
+
571
+ const offsetX =
572
+ (screenWidth - elementWidth * scale) / 2 -
573
+ originXValue * (1 - scale);
574
+ const offsetY =
575
+ (screenHeight - elementHeight * scale) / 2.5 -
576
+ originYValue * (1 - scale);
577
+
578
+ // Apply scale and offsets to the element
579
+ targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
580
+
581
+ // Update global variables
582
+ elemData[elemId].zoomLevel = scale;
583
+ elemData[elemId].panX = offsetX;
584
+ elemData[elemId].panY = offsetY;
585
+
586
+ fullScreenMode = false;
587
+ toggleOverlap("off");
588
+ }
589
+
590
+ /**
591
+ * This function fits the target element to the screen by calculating
592
+ * the required scale and offsets. It also updates the global variables
593
+ * zoomLevel, panX, and panY to reflect the new state.
594
+ */
595
+
596
+ // Fullscreen mode
597
+ function fitToScreen() {
598
+ const canvas = gradioApp().querySelector(
599
+ `${elemId} canvas[key="interface"]`
600
+ );
601
+
602
+ if (!canvas) return;
603
+
604
+ if (canvas.offsetWidth > 862 || isExtension) {
605
+ targetElement.style.width = (canvas.offsetWidth + 2) + "px";
606
+ }
607
+
608
+ if (isExtension) {
609
+ targetElement.style.overflow = "visible";
610
+ }
611
+
612
+ if (fullScreenMode) {
613
+ resetZoom();
614
+ fullScreenMode = false;
615
+ return;
616
+ }
617
+
618
+ //Reset Zoom
619
+ targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
620
+
621
+ // Get scrollbar width to right-align the image
622
+ const scrollbarWidth =
623
+ window.innerWidth - document.documentElement.clientWidth;
624
+
625
+ // Get element and screen dimensions
626
+ const elementWidth = targetElement.offsetWidth;
627
+ const elementHeight = targetElement.offsetHeight;
628
+ const screenWidth = window.innerWidth - scrollbarWidth;
629
+ const screenHeight = window.innerHeight;
630
+
631
+ // Get element's coordinates relative to the page
632
+ const elementRect = targetElement.getBoundingClientRect();
633
+ const elementY = elementRect.y;
634
+ const elementX = elementRect.x;
635
+
636
+ // Calculate scale and offsets
637
+ const scaleX = screenWidth / elementWidth;
638
+ const scaleY = screenHeight / elementHeight;
639
+ const scale = Math.min(scaleX, scaleY);
640
+
641
+ // Get the current transformOrigin
642
+ const computedStyle = window.getComputedStyle(targetElement);
643
+ const transformOrigin = computedStyle.transformOrigin;
644
+ const [originX, originY] = transformOrigin.split(" ");
645
+ const originXValue = parseFloat(originX);
646
+ const originYValue = parseFloat(originY);
647
+
648
+ // Calculate offsets with respect to the transformOrigin
649
+ const offsetX =
650
+ (screenWidth - elementWidth * scale) / 2 -
651
+ elementX -
652
+ originXValue * (1 - scale);
653
+ const offsetY =
654
+ (screenHeight - elementHeight * scale) / 2 -
655
+ elementY -
656
+ originYValue * (1 - scale);
657
+
658
+ // Apply scale and offsets to the element
659
+ targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
660
+
661
+ // Update global variables
662
+ elemData[elemId].zoomLevel = scale;
663
+ elemData[elemId].panX = offsetX;
664
+ elemData[elemId].panY = offsetY;
665
+
666
+ fullScreenMode = true;
667
+ toggleOverlap("on");
668
+ }
669
+
670
+ // Handle keydown events
671
+ function handleKeyDown(event) {
672
+ // Disable key locks to make pasting from the buffer work correctly
673
+ if ((event.ctrlKey && event.code === 'KeyV') || (event.ctrlKey && event.code === 'KeyC') || event.code === "F5") {
674
+ return;
675
+ }
676
+
677
+ // before activating shortcut, ensure user is not actively typing in an input field
678
+ if (!hotkeysConfig.canvas_blur_prompt) {
679
+ if (event.target.nodeName === 'TEXTAREA' || event.target.nodeName === 'INPUT') {
680
+ return;
681
+ }
682
+ }
683
+
684
+
685
+ const hotkeyActions = {
686
+ [hotkeysConfig.canvas_hotkey_reset]: resetZoom,
687
+ [hotkeysConfig.canvas_hotkey_overlap]: toggleOverlap,
688
+ [hotkeysConfig.canvas_hotkey_fullscreen]: fitToScreen,
689
+ [hotkeysConfig.canvas_hotkey_shrink_brush]: () => adjustBrushSize(elemId, 10),
690
+ [hotkeysConfig.canvas_hotkey_grow_brush]: () => adjustBrushSize(elemId, -10)
691
+ };
692
+
693
+ const action = hotkeyActions[event.code];
694
+ if (action) {
695
+ event.preventDefault();
696
+ action(event);
697
+ }
698
+
699
+ if (
700
+ isModifierKey(event, hotkeysConfig.canvas_hotkey_zoom) ||
701
+ isModifierKey(event, hotkeysConfig.canvas_hotkey_adjust)
702
+ ) {
703
+ event.preventDefault();
704
+ }
705
+ }
706
+
707
+ // Get Mouse position
708
+ function getMousePosition(e) {
709
+ mouseX = e.offsetX;
710
+ mouseY = e.offsetY;
711
+ }
712
+
713
+ // Simulation of the function to put a long image into the screen.
714
+ // We detect if an image has a scroll bar or not, make a fullscreen to reveal the image, then reduce it to fit into the element.
715
+ // We hide the image and show it to the user when it is ready.
716
+
717
+ targetElement.isExpanded = false;
718
+ function autoExpand() {
719
+ const canvas = document.querySelector(`${elemId} canvas[key="interface"]`);
720
+ if (canvas) {
721
+ if (hasHorizontalScrollbar(targetElement) && targetElement.isExpanded === false) {
722
+ targetElement.style.visibility = "hidden";
723
+ setTimeout(() => {
724
+ fitToScreen();
725
+ resetZoom();
726
+ targetElement.style.visibility = "visible";
727
+ targetElement.isExpanded = true;
728
+ }, 10);
729
+ }
730
+ }
731
+ }
732
+
733
+ targetElement.addEventListener("mousemove", getMousePosition);
734
+
735
+ //observers
736
+ // Creating an observer with a callback function to handle DOM changes
737
+ const observer = new MutationObserver((mutationsList, observer) => {
738
+ for (let mutation of mutationsList) {
739
+ // If the style attribute of the canvas has changed, by observation it happens only when the picture changes
740
+ if (mutation.type === 'attributes' && mutation.attributeName === 'style' &&
741
+ mutation.target.tagName.toLowerCase() === 'canvas') {
742
+ targetElement.isExpanded = false;
743
+ setTimeout(resetZoom, 10);
744
+ }
745
+ }
746
+ });
747
+
748
+ // Apply auto expand if enabled
749
+ if (hotkeysConfig.canvas_auto_expand) {
750
+ targetElement.addEventListener("mousemove", autoExpand);
751
+ // Set up an observer to track attribute changes
752
+ observer.observe(targetElement, { attributes: true, childList: true, subtree: true });
753
+ }
754
+
755
+ // Handle events only inside the targetElement
756
+ let isKeyDownHandlerAttached = false;
757
+
758
+ function handleMouseMove() {
759
+ if (!isKeyDownHandlerAttached) {
760
+ document.addEventListener("keydown", handleKeyDown);
761
+ isKeyDownHandlerAttached = true;
762
+
763
+ activeElement = elemId;
764
+ }
765
+ }
766
+
767
+ function handleMouseLeave() {
768
+ if (isKeyDownHandlerAttached) {
769
+ document.removeEventListener("keydown", handleKeyDown);
770
+ isKeyDownHandlerAttached = false;
771
+
772
+ activeElement = null;
773
+ }
774
+ }
775
+
776
+ // Add mouse event handlers
777
+ targetElement.addEventListener("mousemove", handleMouseMove);
778
+ targetElement.addEventListener("mouseleave", handleMouseLeave);
779
+
780
+ // Reset zoom when click on another tab
781
+ elements.img2imgTabs.addEventListener("click", resetZoom);
782
+ elements.img2imgTabs.addEventListener("click", () => {
783
+ // targetElement.style.width = "";
784
+ if (parseInt(targetElement.style.width) > 865) {
785
+ setTimeout(fitToElement, 0);
786
+ }
787
+ });
788
+
789
+ targetElement.addEventListener("wheel", e => {
790
+ // change zoom level
791
+ const operation = e.deltaY > 0 ? "-" : "+";
792
+ changeZoomLevel(operation, e);
793
+
794
+ // Handle brush size adjustment with ctrl key pressed
795
+ if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) {
796
+ e.preventDefault();
797
+
798
+ // Increase or decrease brush size based on scroll direction
799
+ adjustBrushSize(elemId, e.deltaY);
800
+ }
801
+ });
802
+
803
+ // Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
804
+ function handleMoveKeyDown(e) {
805
+
806
+ // Disable key locks to make pasting from the buffer work correctly
807
+ if ((e.ctrlKey && e.code === 'KeyV') || (e.ctrlKey && event.code === 'KeyC') || e.code === "F5") {
808
+ return;
809
+ }
810
+
811
+ // before activating shortcut, ensure user is not actively typing in an input field
812
+ if (!hotkeysConfig.canvas_blur_prompt) {
813
+ if (e.target.nodeName === 'TEXTAREA' || e.target.nodeName === 'INPUT') {
814
+ return;
815
+ }
816
+ }
817
+
818
+
819
+ if (e.code === hotkeysConfig.canvas_hotkey_move) {
820
+ if (!e.ctrlKey && !e.metaKey && isKeyDownHandlerAttached) {
821
+ e.preventDefault();
822
+ document.activeElement.blur();
823
+ isMoving = true;
824
+ }
825
+ }
826
+ }
827
+
828
+ function handleMoveKeyUp(e) {
829
+ if (e.code === hotkeysConfig.canvas_hotkey_move) {
830
+ isMoving = false;
831
+ }
832
+ }
833
+
834
+ document.addEventListener("keydown", handleMoveKeyDown);
835
+ document.addEventListener("keyup", handleMoveKeyUp);
836
+
837
+ // Detect zoom level and update the pan speed.
838
+ function updatePanPosition(movementX, movementY) {
839
+ let panSpeed = 2;
840
+
841
+ if (elemData[elemId].zoomLevel > 8) {
842
+ panSpeed = 3.5;
843
+ }
844
+
845
+ elemData[elemId].panX += movementX * panSpeed;
846
+ elemData[elemId].panY += movementY * panSpeed;
847
+
848
+ // Delayed redraw of an element
849
+ requestAnimationFrame(() => {
850
+ targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${elemData[elemId].zoomLevel})`;
851
+ toggleOverlap("on");
852
+ });
853
+ }
854
+
855
+ function handleMoveByKey(e) {
856
+ if (isMoving && elemId === activeElement) {
857
+ updatePanPosition(e.movementX, e.movementY);
858
+ targetElement.style.pointerEvents = "none";
859
+
860
+ if (isExtension) {
861
+ targetElement.style.overflow = "visible";
862
+ }
863
+
864
+ } else {
865
+ targetElement.style.pointerEvents = "auto";
866
+ }
867
+ }
868
+
869
+ // Prevents sticking to the mouse
870
+ window.onblur = function () {
871
+ isMoving = false;
872
+ };
873
+
874
+ // Checks for extension
875
+ function checkForOutBox() {
876
+ const parentElement = targetElement.closest('[id^="component-"]');
877
+ if (parentElement.offsetWidth < targetElement.offsetWidth && !targetElement.isExpanded) {
878
+ resetZoom();
879
+ targetElement.isExpanded = true;
880
+ }
881
+
882
+ if (parentElement.offsetWidth < targetElement.offsetWidth && elemData[elemId].zoomLevel == 1) {
883
+ resetZoom();
884
+ }
885
+
886
+ if (parentElement.offsetWidth < targetElement.offsetWidth && targetElement.offsetWidth * elemData[elemId].zoomLevel > parentElement.offsetWidth && elemData[elemId].zoomLevel < 1 && !targetElement.isZoomed) {
887
+ resetZoom();
888
+ }
889
+ }
890
+
891
+ if (isExtension) {
892
+ targetElement.addEventListener("mousemove", checkForOutBox);
893
+ }
894
+
895
+
896
+ window.addEventListener('resize', (e) => {
897
+ resetZoom();
898
+
899
+ if (isExtension) {
900
+ targetElement.isExpanded = false;
901
+ targetElement.isZoomed = false;
902
+ }
903
+ });
904
+
905
+ gradioApp().addEventListener("mousemove", handleMoveByKey);
906
+
907
+
908
+ }
909
+
910
+ applyZoomAndPan(elementIDs.sketch, false);
911
+ applyZoomAndPan(elementIDs.inpaint, false);
912
+ applyZoomAndPan(elementIDs.inpaintSketch, false);
913
+
914
+ // Make the function global so that other extensions can take advantage of this solution
915
+ const applyZoomAndPanIntegration = async (id, elementIDs) => {
916
+ const mainEl = document.querySelector(id);
917
+ if (id.toLocaleLowerCase() === "none") {
918
+ for (const elementID of elementIDs) {
919
+ const el = await waitForElement(elementID);
920
+ if (!el) break;
921
+ applyZoomAndPan(elementID);
922
+ }
923
+ return;
924
+ }
925
+
926
+ if (!mainEl) return;
927
+ mainEl.addEventListener("click", async () => {
928
+ for (const elementID of elementIDs) {
929
+ const el = await waitForElement(elementID);
930
+ if (!el) break;
931
+ applyZoomAndPan(elementID);
932
+ }
933
+ }, { once: true });
934
+ };
935
+
936
+ window.applyZoomAndPan = applyZoomAndPan; // Only 1 elements, argument elementID, for example applyZoomAndPan("#txt2img_controlnet_ControlNet_input_image")
937
+ window.applyZoomAndPanIntegration = applyZoomAndPanIntegration; // for any extension
938
+ });
939
+
940
+ })();
extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules import shared
2
+ import gradio as gr
3
+
4
+ shared.options_templates.update(shared.options_section(("canvas_hotkey", "Canvas Hotkeys", "ui"), {
5
+ "canvas_hotkey_zoom": shared.OptionInfo("Alt", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
6
+ "canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
7
+ "canvas_hotkey_shrink_brush": shared.OptionInfo("Q", "Shrink the brush size"),
8
+ "canvas_hotkey_grow_brush": shared.OptionInfo("W", "Enlarge the brush size"),
9
+ "canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"),
10
+ "canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "),
11
+ "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas position"),
12
+ "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"),
13
+ "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
14
+ "canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"),
15
+ "canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"),
16
+ "canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size","Hotkey enlarge brush","Hotkey shrink brush","Moving canvas","Fullscreen","Reset Zoom","Overlap"]}),
17
+ }))
extensions-builtin/canvas-zoom-and-pan/style.css ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .canvas-tooltip-info {
2
+ position: absolute;
3
+ top: 10px;
4
+ left: 10px;
5
+ cursor: help;
6
+ background-color: rgba(0, 0, 0, 0.3);
7
+ width: 20px;
8
+ height: 20px;
9
+ border-radius: 50%;
10
+ display: flex;
11
+ align-items: center;
12
+ justify-content: center;
13
+ flex-direction: column;
14
+
15
+ z-index: 100;
16
+ }
17
+
18
+ .canvas-tooltip-info::after {
19
+ content: '';
20
+ display: block;
21
+ width: 2px;
22
+ height: 7px;
23
+ background-color: white;
24
+ margin-top: 2px;
25
+ }
26
+
27
+ .canvas-tooltip-info::before {
28
+ content: '';
29
+ display: block;
30
+ width: 2px;
31
+ height: 2px;
32
+ background-color: white;
33
+ }
34
+
35
+ .canvas-tooltip-content {
36
+ display: none;
37
+ background-color: #f9f9f9;
38
+ color: #333;
39
+ border: 1px solid #ddd;
40
+ padding: 15px;
41
+ position: absolute;
42
+ top: 40px;
43
+ left: 10px;
44
+ width: 250px;
45
+ font-size: 16px;
46
+ opacity: 0;
47
+ border-radius: 8px;
48
+ box-shadow: 0px 8px 16px 0px rgba(0, 0, 0, 0.2);
49
+
50
+ z-index: 100;
51
+ }
52
+
53
+ .canvas-tooltip:hover .canvas-tooltip-content {
54
+ display: block;
55
+ animation: fadeIn 0.5s;
56
+ opacity: 1;
57
+ }
58
+
59
+ @keyframes fadeIn {
60
+ from {
61
+ opacity: 0;
62
+ }
63
+
64
+ to {
65
+ opacity: 1;
66
+ }
67
+ }
68
+
69
+ .styler {
70
+ overflow: inherit !important;
71
+ }
extensions-builtin/extra-options-section/scripts/extra_options_section.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules import scripts, shared, ui_components, ui_settings, infotext_utils
2
+ from modules.ui_components import FormColumn
3
+
4
+ import gradio as gr
5
+ import math
6
+
7
+
8
+ class ExtraOptionsSection(scripts.Script):
9
+ section = "extra_options"
10
+
11
+ def __init__(self):
12
+ self.comps = None
13
+ self.setting_names = None
14
+
15
+ def title(self):
16
+ return "Extra options"
17
+
18
+ def show(self, is_img2img):
19
+ return scripts.AlwaysVisible
20
+
21
+ def ui(self, is_img2img):
22
+ self.comps = []
23
+ self.setting_names = []
24
+ self.infotext_fields = []
25
+ extra_options = (
26
+ shared.opts.extra_options_img2img
27
+ if is_img2img
28
+ else shared.opts.extra_options_txt2img
29
+ )
30
+ elem_id_tabname = "extra_options_" + ("img2img" if is_img2img else "txt2img")
31
+
32
+ mapping = {k: v for v, k in infotext_utils.infotext_to_setting_name_mapping}
33
+
34
+ with gr.Blocks() as interface:
35
+ with (
36
+ gr.Accordion("Options", open=False, elem_id=elem_id_tabname)
37
+ if shared.opts.extra_options_accordion and extra_options
38
+ else gr.Group(elem_id=elem_id_tabname)
39
+ ):
40
+
41
+ row_count = math.ceil(
42
+ len(extra_options) / shared.opts.extra_options_cols
43
+ )
44
+
45
+ for row in range(row_count):
46
+ with gr.Row():
47
+ for col in range(shared.opts.extra_options_cols):
48
+ index = row * shared.opts.extra_options_cols + col
49
+ if index >= len(extra_options):
50
+ break
51
+
52
+ setting_name = extra_options[index]
53
+
54
+ with FormColumn():
55
+ comp = ui_settings.create_setting_component(
56
+ setting_name
57
+ )
58
+
59
+ self.comps.append(comp)
60
+ self.setting_names.append(setting_name)
61
+
62
+ setting_infotext_name = mapping.get(setting_name)
63
+ if setting_infotext_name is not None:
64
+ self.infotext_fields.append(
65
+ (comp, setting_infotext_name)
66
+ )
67
+
68
+ def get_settings_values():
69
+ res = [ui_settings.get_value_for_setting(key) for key in self.setting_names]
70
+ return res[0] if len(res) == 1 else res
71
+
72
+ interface.load(
73
+ fn=get_settings_values,
74
+ inputs=[],
75
+ outputs=self.comps,
76
+ queue=False,
77
+ show_progress=False,
78
+ )
79
+
80
+ return self.comps
81
+
82
+ def before_process(self, p, *args):
83
+ for name, value in zip(self.setting_names, args):
84
+ if name not in p.override_settings:
85
+ p.override_settings[name] = value
86
+
87
+
88
+ shared.options_templates.update(
89
+ shared.options_section(
90
+ ("settings_in_ui", "Settings in UI", "ui"),
91
+ {
92
+ "settings_in_ui": shared.OptionHTML("This page allows you to add some settings to the main interface of txt2img and img2img tabs."),
93
+ "extra_options_txt2img": shared.OptionInfo(
94
+ [],
95
+ "Settings for txt2img",
96
+ ui_components.DropdownMulti,
97
+ lambda: {"choices": list(shared.opts.data_labels.keys())},
98
+ )
99
+ .js("info", "settingsHintsShowQuicksettings")
100
+ .info("setting entries that also appear in txt2img interfaces")
101
+ .needs_reload_ui(),
102
+ "extra_options_img2img": shared.OptionInfo(
103
+ [],
104
+ "Settings for img2img",
105
+ ui_components.DropdownMulti,
106
+ lambda: {"choices": list(shared.opts.data_labels.keys())},
107
+ )
108
+ .js("info", "settingsHintsShowQuicksettings")
109
+ .info("setting entries that also appear in img2img interfaces")
110
+ .needs_reload_ui(),
111
+ "extra_options_cols": shared.OptionInfo(
112
+ 1,
113
+ "Number of columns for added settings",
114
+ gr.Slider,
115
+ {"step": 1, "minimum": 1, "maximum": 20},
116
+ )
117
+ .info("displayed amount will depend on the actual browser window width")
118
+ .needs_reload_ui(),
119
+ "extra_options_accordion": shared.OptionInfo(
120
+ False, "Place added settings into an accordion"
121
+ ).needs_reload_ui(),
122
+ },
123
+ )
124
+ )
extensions-builtin/forge_legacy_preprocessors/.gitignore ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea
161
+ *.pt
162
+ *.pth
163
+ *.ckpt
164
+ *.bin
165
+ *.safetensors
166
+
167
+ # Editor setting metadata
168
+ .idea/
169
+ .vscode/
170
+ detected_maps/
171
+ annotator/downloads/
172
+
173
+ # test results and expectations
174
+ web_tests/results/
175
+ web_tests/expectations/
176
+ tests/web_api/full_coverage/results/
177
+ tests/web_api/full_coverage/expectations/
178
+
179
+ *_diff.png
180
+
181
+ # Presets
182
+ presets/
183
+
184
+ # Ignore existing dir of hand refiner if exists.
185
+ annotator/hand_refiner_portable
extensions-builtin/forge_legacy_preprocessors/LICENSE ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU GENERAL PUBLIC LICENSE
2
+ Version 3, 29 June 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU General Public License is a free, copyleft license for
11
+ software and other kinds of works.
12
+
13
+ The licenses for most software and other practical works are designed
14
+ to take away your freedom to share and change the works. By contrast,
15
+ the GNU General Public License is intended to guarantee your freedom to
16
+ share and change all versions of a program--to make sure it remains free
17
+ software for all its users. We, the Free Software Foundation, use the
18
+ GNU General Public License for most of our software; it applies also to
19
+ any other work released this way by its authors. You can apply it to
20
+ your programs, too.
21
+
22
+ When we speak of free software, we are referring to freedom, not
23
+ price. Our General Public Licenses are designed to make sure that you
24
+ have the freedom to distribute copies of free software (and charge for
25
+ them if you wish), that you receive source code or can get it if you
26
+ want it, that you can change the software or use pieces of it in new
27
+ free programs, and that you know you can do these things.
28
+
29
+ To protect your rights, we need to prevent others from denying you
30
+ these rights or asking you to surrender the rights. Therefore, you have
31
+ certain responsibilities if you distribute copies of the software, or if
32
+ you modify it: responsibilities to respect the freedom of others.
33
+
34
+ For example, if you distribute copies of such a program, whether
35
+ gratis or for a fee, you must pass on to the recipients the same
36
+ freedoms that you received. You must make sure that they, too, receive
37
+ or can get the source code. And you must show them these terms so they
38
+ know their rights.
39
+
40
+ Developers that use the GNU GPL protect your rights with two steps:
41
+ (1) assert copyright on the software, and (2) offer you this License
42
+ giving you legal permission to copy, distribute and/or modify it.
43
+
44
+ For the developers' and authors' protection, the GPL clearly explains
45
+ that there is no warranty for this free software. For both users' and
46
+ authors' sake, the GPL requires that modified versions be marked as
47
+ changed, so that their problems will not be attributed erroneously to
48
+ authors of previous versions.
49
+
50
+ Some devices are designed to deny users access to install or run
51
+ modified versions of the software inside them, although the manufacturer
52
+ can do so. This is fundamentally incompatible with the aim of
53
+ protecting users' freedom to change the software. The systematic
54
+ pattern of such abuse occurs in the area of products for individuals to
55
+ use, which is precisely where it is most unacceptable. Therefore, we
56
+ have designed this version of the GPL to prohibit the practice for those
57
+ products. If such problems arise substantially in other domains, we
58
+ stand ready to extend this provision to those domains in future versions
59
+ of the GPL, as needed to protect the freedom of users.
60
+
61
+ Finally, every program is threatened constantly by software patents.
62
+ States should not allow patents to restrict development and use of
63
+ software on general-purpose computers, but in those that do, we wish to
64
+ avoid the special danger that patents applied to a free program could
65
+ make it effectively proprietary. To prevent this, the GPL assures that
66
+ patents cannot be used to render the program non-free.
67
+
68
+ The precise terms and conditions for copying, distribution and
69
+ modification follow.
70
+
71
+ TERMS AND CONDITIONS
72
+
73
+ 0. Definitions.
74
+
75
+ "This License" refers to version 3 of the GNU General Public License.
76
+
77
+ "Copyright" also means copyright-like laws that apply to other kinds of
78
+ works, such as semiconductor masks.
79
+
80
+ "The Program" refers to any copyrightable work licensed under this
81
+ License. Each licensee is addressed as "you". "Licensees" and
82
+ "recipients" may be individuals or organizations.
83
+
84
+ To "modify" a work means to copy from or adapt all or part of the work
85
+ in a fashion requiring copyright permission, other than the making of an
86
+ exact copy. The resulting work is called a "modified version" of the
87
+ earlier work or a work "based on" the earlier work.
88
+
89
+ A "covered work" means either the unmodified Program or a work based
90
+ on the Program.
91
+
92
+ To "propagate" a work means to do anything with it that, without
93
+ permission, would make you directly or secondarily liable for
94
+ infringement under applicable copyright law, except executing it on a
95
+ computer or modifying a private copy. Propagation includes copying,
96
+ distribution (with or without modification), making available to the
97
+ public, and in some countries other activities as well.
98
+
99
+ To "convey" a work means any kind of propagation that enables other
100
+ parties to make or receive copies. Mere interaction with a user through
101
+ a computer network, with no transfer of a copy, is not conveying.
102
+
103
+ An interactive user interface displays "Appropriate Legal Notices"
104
+ to the extent that it includes a convenient and prominently visible
105
+ feature that (1) displays an appropriate copyright notice, and (2)
106
+ tells the user that there is no warranty for the work (except to the
107
+ extent that warranties are provided), that licensees may convey the
108
+ work under this License, and how to view a copy of this License. If
109
+ the interface presents a list of user commands or options, such as a
110
+ menu, a prominent item in the list meets this criterion.
111
+
112
+ 1. Source Code.
113
+
114
+ The "source code" for a work means the preferred form of the work
115
+ for making modifications to it. "Object code" means any non-source
116
+ form of a work.
117
+
118
+ A "Standard Interface" means an interface that either is an official
119
+ standard defined by a recognized standards body, or, in the case of
120
+ interfaces specified for a particular programming language, one that
121
+ is widely used among developers working in that language.
122
+
123
+ The "System Libraries" of an executable work include anything, other
124
+ than the work as a whole, that (a) is included in the normal form of
125
+ packaging a Major Component, but which is not part of that Major
126
+ Component, and (b) serves only to enable use of the work with that
127
+ Major Component, or to implement a Standard Interface for which an
128
+ implementation is available to the public in source code form. A
129
+ "Major Component", in this context, means a major essential component
130
+ (kernel, window system, and so on) of the specific operating system
131
+ (if any) on which the executable work runs, or a compiler used to
132
+ produce the work, or an object code interpreter used to run it.
133
+
134
+ The "Corresponding Source" for a work in object code form means all
135
+ the source code needed to generate, install, and (for an executable
136
+ work) run the object code and to modify the work, including scripts to
137
+ control those activities. However, it does not include the work's
138
+ System Libraries, or general-purpose tools or generally available free
139
+ programs which are used unmodified in performing those activities but
140
+ which are not part of the work. For example, Corresponding Source
141
+ includes interface definition files associated with source files for
142
+ the work, and the source code for shared libraries and dynamically
143
+ linked subprograms that the work is specifically designed to require,
144
+ such as by intimate data communication or control flow between those
145
+ subprograms and other parts of the work.
146
+
147
+ The Corresponding Source need not include anything that users
148
+ can regenerate automatically from other parts of the Corresponding
149
+ Source.
150
+
151
+ The Corresponding Source for a work in source code form is that
152
+ same work.
153
+
154
+ 2. Basic Permissions.
155
+
156
+ All rights granted under this License are granted for the term of
157
+ copyright on the Program, and are irrevocable provided the stated
158
+ conditions are met. This License explicitly affirms your unlimited
159
+ permission to run the unmodified Program. The output from running a
160
+ covered work is covered by this License only if the output, given its
161
+ content, constitutes a covered work. This License acknowledges your
162
+ rights of fair use or other equivalent, as provided by copyright law.
163
+
164
+ You may make, run and propagate covered works that you do not
165
+ convey, without conditions so long as your license otherwise remains
166
+ in force. You may convey covered works to others for the sole purpose
167
+ of having them make modifications exclusively for you, or provide you
168
+ with facilities for running those works, provided that you comply with
169
+ the terms of this License in conveying all material for which you do
170
+ not control copyright. Those thus making or running the covered works
171
+ for you must do so exclusively on your behalf, under your direction
172
+ and control, on terms that prohibit them from making any copies of
173
+ your copyrighted material outside their relationship with you.
174
+
175
+ Conveying under any other circumstances is permitted solely under
176
+ the conditions stated below. Sublicensing is not allowed; section 10
177
+ makes it unnecessary.
178
+
179
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180
+
181
+ No covered work shall be deemed part of an effective technological
182
+ measure under any applicable law fulfilling obligations under article
183
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184
+ similar laws prohibiting or restricting circumvention of such
185
+ measures.
186
+
187
+ When you convey a covered work, you waive any legal power to forbid
188
+ circumvention of technological measures to the extent such circumvention
189
+ is effected by exercising rights under this License with respect to
190
+ the covered work, and you disclaim any intention to limit operation or
191
+ modification of the work as a means of enforcing, against the work's
192
+ users, your or third parties' legal rights to forbid circumvention of
193
+ technological measures.
194
+
195
+ 4. Conveying Verbatim Copies.
196
+
197
+ You may convey verbatim copies of the Program's source code as you
198
+ receive it, in any medium, provided that you conspicuously and
199
+ appropriately publish on each copy an appropriate copyright notice;
200
+ keep intact all notices stating that this License and any
201
+ non-permissive terms added in accord with section 7 apply to the code;
202
+ keep intact all notices of the absence of any warranty; and give all
203
+ recipients a copy of this License along with the Program.
204
+
205
+ You may charge any price or no price for each copy that you convey,
206
+ and you may offer support or warranty protection for a fee.
207
+
208
+ 5. Conveying Modified Source Versions.
209
+
210
+ You may convey a work based on the Program, or the modifications to
211
+ produce it from the Program, in the form of source code under the
212
+ terms of section 4, provided that you also meet all of these conditions:
213
+
214
+ a) The work must carry prominent notices stating that you modified
215
+ it, and giving a relevant date.
216
+
217
+ b) The work must carry prominent notices stating that it is
218
+ released under this License and any conditions added under section
219
+ 7. This requirement modifies the requirement in section 4 to
220
+ "keep intact all notices".
221
+
222
+ c) You must license the entire work, as a whole, under this
223
+ License to anyone who comes into possession of a copy. This
224
+ License will therefore apply, along with any applicable section 7
225
+ additional terms, to the whole of the work, and all its parts,
226
+ regardless of how they are packaged. This License gives no
227
+ permission to license the work in any other way, but it does not
228
+ invalidate such permission if you have separately received it.
229
+
230
+ d) If the work has interactive user interfaces, each must display
231
+ Appropriate Legal Notices; however, if the Program has interactive
232
+ interfaces that do not display Appropriate Legal Notices, your
233
+ work need not make them do so.
234
+
235
+ A compilation of a covered work with other separate and independent
236
+ works, which are not by their nature extensions of the covered work,
237
+ and which are not combined with it such as to form a larger program,
238
+ in or on a volume of a storage or distribution medium, is called an
239
+ "aggregate" if the compilation and its resulting copyright are not
240
+ used to limit the access or legal rights of the compilation's users
241
+ beyond what the individual works permit. Inclusion of a covered work
242
+ in an aggregate does not cause this License to apply to the other
243
+ parts of the aggregate.
244
+
245
+ 6. Conveying Non-Source Forms.
246
+
247
+ You may convey a covered work in object code form under the terms
248
+ of sections 4 and 5, provided that you also convey the
249
+ machine-readable Corresponding Source under the terms of this License,
250
+ in one of these ways:
251
+
252
+ a) Convey the object code in, or embodied in, a physical product
253
+ (including a physical distribution medium), accompanied by the
254
+ Corresponding Source fixed on a durable physical medium
255
+ customarily used for software interchange.
256
+
257
+ b) Convey the object code in, or embodied in, a physical product
258
+ (including a physical distribution medium), accompanied by a
259
+ written offer, valid for at least three years and valid for as
260
+ long as you offer spare parts or customer support for that product
261
+ model, to give anyone who possesses the object code either (1) a
262
+ copy of the Corresponding Source for all the software in the
263
+ product that is covered by this License, on a durable physical
264
+ medium customarily used for software interchange, for a price no
265
+ more than your reasonable cost of physically performing this
266
+ conveying of source, or (2) access to copy the
267
+ Corresponding Source from a network server at no charge.
268
+
269
+ c) Convey individual copies of the object code with a copy of the
270
+ written offer to provide the Corresponding Source. This
271
+ alternative is allowed only occasionally and noncommercially, and
272
+ only if you received the object code with such an offer, in accord
273
+ with subsection 6b.
274
+
275
+ d) Convey the object code by offering access from a designated
276
+ place (gratis or for a charge), and offer equivalent access to the
277
+ Corresponding Source in the same way through the same place at no
278
+ further charge. You need not require recipients to copy the
279
+ Corresponding Source along with the object code. If the place to
280
+ copy the object code is a network server, the Corresponding Source
281
+ may be on a different server (operated by you or a third party)
282
+ that supports equivalent copying facilities, provided you maintain
283
+ clear directions next to the object code saying where to find the
284
+ Corresponding Source. Regardless of what server hosts the
285
+ Corresponding Source, you remain obligated to ensure that it is
286
+ available for as long as needed to satisfy these requirements.
287
+
288
+ e) Convey the object code using peer-to-peer transmission, provided
289
+ you inform other peers where the object code and Corresponding
290
+ Source of the work are being offered to the general public at no
291
+ charge under subsection 6d.
292
+
293
+ A separable portion of the object code, whose source code is excluded
294
+ from the Corresponding Source as a System Library, need not be
295
+ included in conveying the object code work.
296
+
297
+ A "User Product" is either (1) a "consumer product", which means any
298
+ tangible personal property which is normally used for personal, family,
299
+ or household purposes, or (2) anything designed or sold for incorporation
300
+ into a dwelling. In determining whether a product is a consumer product,
301
+ doubtful cases shall be resolved in favor of coverage. For a particular
302
+ product received by a particular user, "normally used" refers to a
303
+ typical or common use of that class of product, regardless of the status
304
+ of the particular user or of the way in which the particular user
305
+ actually uses, or expects or is expected to use, the product. A product
306
+ is a consumer product regardless of whether the product has substantial
307
+ commercial, industrial or non-consumer uses, unless such uses represent
308
+ the only significant mode of use of the product.
309
+
310
+ "Installation Information" for a User Product means any methods,
311
+ procedures, authorization keys, or other information required to install
312
+ and execute modified versions of a covered work in that User Product from
313
+ a modified version of its Corresponding Source. The information must
314
+ suffice to ensure that the continued functioning of the modified object
315
+ code is in no case prevented or interfered with solely because
316
+ modification has been made.
317
+
318
+ If you convey an object code work under this section in, or with, or
319
+ specifically for use in, a User Product, and the conveying occurs as
320
+ part of a transaction in which the right of possession and use of the
321
+ User Product is transferred to the recipient in perpetuity or for a
322
+ fixed term (regardless of how the transaction is characterized), the
323
+ Corresponding Source conveyed under this section must be accompanied
324
+ by the Installation Information. But this requirement does not apply
325
+ if neither you nor any third party retains the ability to install
326
+ modified object code on the User Product (for example, the work has
327
+ been installed in ROM).
328
+
329
+ The requirement to provide Installation Information does not include a
330
+ requirement to continue to provide support service, warranty, or updates
331
+ for a work that has been modified or installed by the recipient, or for
332
+ the User Product in which it has been modified or installed. Access to a
333
+ network may be denied when the modification itself materially and
334
+ adversely affects the operation of the network or violates the rules and
335
+ protocols for communication across the network.
336
+
337
+ Corresponding Source conveyed, and Installation Information provided,
338
+ in accord with this section must be in a format that is publicly
339
+ documented (and with an implementation available to the public in
340
+ source code form), and must require no special password or key for
341
+ unpacking, reading or copying.
342
+
343
+ 7. Additional Terms.
344
+
345
+ "Additional permissions" are terms that supplement the terms of this
346
+ License by making exceptions from one or more of its conditions.
347
+ Additional permissions that are applicable to the entire Program shall
348
+ be treated as though they were included in this License, to the extent
349
+ that they are valid under applicable law. If additional permissions
350
+ apply only to part of the Program, that part may be used separately
351
+ under those permissions, but the entire Program remains governed by
352
+ this License without regard to the additional permissions.
353
+
354
+ When you convey a copy of a covered work, you may at your option
355
+ remove any additional permissions from that copy, or from any part of
356
+ it. (Additional permissions may be written to require their own
357
+ removal in certain cases when you modify the work.) You may place
358
+ additional permissions on material, added by you to a covered work,
359
+ for which you have or can give appropriate copyright permission.
360
+
361
+ Notwithstanding any other provision of this License, for material you
362
+ add to a covered work, you may (if authorized by the copyright holders of
363
+ that material) supplement the terms of this License with terms:
364
+
365
+ a) Disclaiming warranty or limiting liability differently from the
366
+ terms of sections 15 and 16 of this License; or
367
+
368
+ b) Requiring preservation of specified reasonable legal notices or
369
+ author attributions in that material or in the Appropriate Legal
370
+ Notices displayed by works containing it; or
371
+
372
+ c) Prohibiting misrepresentation of the origin of that material, or
373
+ requiring that modified versions of such material be marked in
374
+ reasonable ways as different from the original version; or
375
+
376
+ d) Limiting the use for publicity purposes of names of licensors or
377
+ authors of the material; or
378
+
379
+ e) Declining to grant rights under trademark law for use of some
380
+ trade names, trademarks, or service marks; or
381
+
382
+ f) Requiring indemnification of licensors and authors of that
383
+ material by anyone who conveys the material (or modified versions of
384
+ it) with contractual assumptions of liability to the recipient, for
385
+ any liability that these contractual assumptions directly impose on
386
+ those licensors and authors.
387
+
388
+ All other non-permissive additional terms are considered "further
389
+ restrictions" within the meaning of section 10. If the Program as you
390
+ received it, or any part of it, contains a notice stating that it is
391
+ governed by this License along with a term that is a further
392
+ restriction, you may remove that term. If a license document contains
393
+ a further restriction but permits relicensing or conveying under this
394
+ License, you may add to a covered work material governed by the terms
395
+ of that license document, provided that the further restriction does
396
+ not survive such relicensing or conveying.
397
+
398
+ If you add terms to a covered work in accord with this section, you
399
+ must place, in the relevant source files, a statement of the
400
+ additional terms that apply to those files, or a notice indicating
401
+ where to find the applicable terms.
402
+
403
+ Additional terms, permissive or non-permissive, may be stated in the
404
+ form of a separately written license, or stated as exceptions;
405
+ the above requirements apply either way.
406
+
407
+ 8. Termination.
408
+
409
+ You may not propagate or modify a covered work except as expressly
410
+ provided under this License. Any attempt otherwise to propagate or
411
+ modify it is void, and will automatically terminate your rights under
412
+ this License (including any patent licenses granted under the third
413
+ paragraph of section 11).
414
+
415
+ However, if you cease all violation of this License, then your
416
+ license from a particular copyright holder is reinstated (a)
417
+ provisionally, unless and until the copyright holder explicitly and
418
+ finally terminates your license, and (b) permanently, if the copyright
419
+ holder fails to notify you of the violation by some reasonable means
420
+ prior to 60 days after the cessation.
421
+
422
+ Moreover, your license from a particular copyright holder is
423
+ reinstated permanently if the copyright holder notifies you of the
424
+ violation by some reasonable means, this is the first time you have
425
+ received notice of violation of this License (for any work) from that
426
+ copyright holder, and you cure the violation prior to 30 days after
427
+ your receipt of the notice.
428
+
429
+ Termination of your rights under this section does not terminate the
430
+ licenses of parties who have received copies or rights from you under
431
+ this License. If your rights have been terminated and not permanently
432
+ reinstated, you do not qualify to receive new licenses for the same
433
+ material under section 10.
434
+
435
+ 9. Acceptance Not Required for Having Copies.
436
+
437
+ You are not required to accept this License in order to receive or
438
+ run a copy of the Program. Ancillary propagation of a covered work
439
+ occurring solely as a consequence of using peer-to-peer transmission
440
+ to receive a copy likewise does not require acceptance. However,
441
+ nothing other than this License grants you permission to propagate or
442
+ modify any covered work. These actions infringe copyright if you do
443
+ not accept this License. Therefore, by modifying or propagating a
444
+ covered work, you indicate your acceptance of this License to do so.
445
+
446
+ 10. Automatic Licensing of Downstream Recipients.
447
+
448
+ Each time you convey a covered work, the recipient automatically
449
+ receives a license from the original licensors, to run, modify and
450
+ propagate that work, subject to this License. You are not responsible
451
+ for enforcing compliance by third parties with this License.
452
+
453
+ An "entity transaction" is a transaction transferring control of an
454
+ organization, or substantially all assets of one, or subdividing an
455
+ organization, or merging organizations. If propagation of a covered
456
+ work results from an entity transaction, each party to that
457
+ transaction who receives a copy of the work also receives whatever
458
+ licenses to the work the party's predecessor in interest had or could
459
+ give under the previous paragraph, plus a right to possession of the
460
+ Corresponding Source of the work from the predecessor in interest, if
461
+ the predecessor has it or can get it with reasonable efforts.
462
+
463
+ You may not impose any further restrictions on the exercise of the
464
+ rights granted or affirmed under this License. For example, you may
465
+ not impose a license fee, royalty, or other charge for exercise of
466
+ rights granted under this License, and you may not initiate litigation
467
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
468
+ any patent claim is infringed by making, using, selling, offering for
469
+ sale, or importing the Program or any portion of it.
470
+
471
+ 11. Patents.
472
+
473
+ A "contributor" is a copyright holder who authorizes use under this
474
+ License of the Program or a work on which the Program is based. The
475
+ work thus licensed is called the contributor's "contributor version".
476
+
477
+ A contributor's "essential patent claims" are all patent claims
478
+ owned or controlled by the contributor, whether already acquired or
479
+ hereafter acquired, that would be infringed by some manner, permitted
480
+ by this License, of making, using, or selling its contributor version,
481
+ but do not include claims that would be infringed only as a
482
+ consequence of further modification of the contributor version. For
483
+ purposes of this definition, "control" includes the right to grant
484
+ patent sublicenses in a manner consistent with the requirements of
485
+ this License.
486
+
487
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
488
+ patent license under the contributor's essential patent claims, to
489
+ make, use, sell, offer for sale, import and otherwise run, modify and
490
+ propagate the contents of its contributor version.
491
+
492
+ In the following three paragraphs, a "patent license" is any express
493
+ agreement or commitment, however denominated, not to enforce a patent
494
+ (such as an express permission to practice a patent or covenant not to
495
+ sue for patent infringement). To "grant" such a patent license to a
496
+ party means to make such an agreement or commitment not to enforce a
497
+ patent against the party.
498
+
499
+ If you convey a covered work, knowingly relying on a patent license,
500
+ and the Corresponding Source of the work is not available for anyone
501
+ to copy, free of charge and under the terms of this License, through a
502
+ publicly available network server or other readily accessible means,
503
+ then you must either (1) cause the Corresponding Source to be so
504
+ available, or (2) arrange to deprive yourself of the benefit of the
505
+ patent license for this particular work, or (3) arrange, in a manner
506
+ consistent with the requirements of this License, to extend the patent
507
+ license to downstream recipients. "Knowingly relying" means you have
508
+ actual knowledge that, but for the patent license, your conveying the
509
+ covered work in a country, or your recipient's use of the covered work
510
+ in a country, would infringe one or more identifiable patents in that
511
+ country that you have reason to believe are valid.
512
+
513
+ If, pursuant to or in connection with a single transaction or
514
+ arrangement, you convey, or propagate by procuring conveyance of, a
515
+ covered work, and grant a patent license to some of the parties
516
+ receiving the covered work authorizing them to use, propagate, modify
517
+ or convey a specific copy of the covered work, then the patent license
518
+ you grant is automatically extended to all recipients of the covered
519
+ work and works based on it.
520
+
521
+ A patent license is "discriminatory" if it does not include within
522
+ the scope of its coverage, prohibits the exercise of, or is
523
+ conditioned on the non-exercise of one or more of the rights that are
524
+ specifically granted under this License. You may not convey a covered
525
+ work if you are a party to an arrangement with a third party that is
526
+ in the business of distributing software, under which you make payment
527
+ to the third party based on the extent of your activity of conveying
528
+ the work, and under which the third party grants, to any of the
529
+ parties who would receive the covered work from you, a discriminatory
530
+ patent license (a) in connection with copies of the covered work
531
+ conveyed by you (or copies made from those copies), or (b) primarily
532
+ for and in connection with specific products or compilations that
533
+ contain the covered work, unless you entered into that arrangement,
534
+ or that patent license was granted, prior to 28 March 2007.
535
+
536
+ Nothing in this License shall be construed as excluding or limiting
537
+ any implied license or other defenses to infringement that may
538
+ otherwise be available to you under applicable patent law.
539
+
540
+ 12. No Surrender of Others' Freedom.
541
+
542
+ If conditions are imposed on you (whether by court order, agreement or
543
+ otherwise) that contradict the conditions of this License, they do not
544
+ excuse you from the conditions of this License. If you cannot convey a
545
+ covered work so as to satisfy simultaneously your obligations under this
546
+ License and any other pertinent obligations, then as a consequence you may
547
+ not convey it at all. For example, if you agree to terms that obligate you
548
+ to collect a royalty for further conveying from those to whom you convey
549
+ the Program, the only way you could satisfy both those terms and this
550
+ License would be to refrain entirely from conveying the Program.
551
+
552
+ 13. Use with the GNU Affero General Public License.
553
+
554
+ Notwithstanding any other provision of this License, you have
555
+ permission to link or combine any covered work with a work licensed
556
+ under version 3 of the GNU Affero General Public License into a single
557
+ combined work, and to convey the resulting work. The terms of this
558
+ License will continue to apply to the part which is the covered work,
559
+ but the special requirements of the GNU Affero General Public License,
560
+ section 13, concerning interaction through a network will apply to the
561
+ combination as such.
562
+
563
+ 14. Revised Versions of this License.
564
+
565
+ The Free Software Foundation may publish revised and/or new versions of
566
+ the GNU General Public License from time to time. Such new versions will
567
+ be similar in spirit to the present version, but may differ in detail to
568
+ address new problems or concerns.
569
+
570
+ Each version is given a distinguishing version number. If the
571
+ Program specifies that a certain numbered version of the GNU General
572
+ Public License "or any later version" applies to it, you have the
573
+ option of following the terms and conditions either of that numbered
574
+ version or of any later version published by the Free Software
575
+ Foundation. If the Program does not specify a version number of the
576
+ GNU General Public License, you may choose any version ever published
577
+ by the Free Software Foundation.
578
+
579
+ If the Program specifies that a proxy can decide which future
580
+ versions of the GNU General Public License can be used, that proxy's
581
+ public statement of acceptance of a version permanently authorizes you
582
+ to choose that version for the Program.
583
+
584
+ Later license versions may give you additional or different
585
+ permissions. However, no additional obligations are imposed on any
586
+ author or copyright holder as a result of your choosing to follow a
587
+ later version.
588
+
589
+ 15. Disclaimer of Warranty.
590
+
591
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599
+
600
+ 16. Limitation of Liability.
601
+
602
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610
+ SUCH DAMAGES.
611
+
612
+ 17. Interpretation of Sections 15 and 16.
613
+
614
+ If the disclaimer of warranty and limitation of liability provided
615
+ above cannot be given local legal effect according to their terms,
616
+ reviewing courts shall apply local law that most closely approximates
617
+ an absolute waiver of all civil liability in connection with the
618
+ Program, unless a warranty or assumption of liability accompanies a
619
+ copy of the Program in return for a fee.
620
+
621
+ END OF TERMS AND CONDITIONS
622
+
623
+ How to Apply These Terms to Your New Programs
624
+
625
+ If you develop a new program, and you want it to be of the greatest
626
+ possible use to the public, the best way to achieve this is to make it
627
+ free software which everyone can redistribute and change under these terms.
628
+
629
+ To do so, attach the following notices to the program. It is safest
630
+ to attach them to the start of each source file to most effectively
631
+ state the exclusion of warranty; and each file should have at least
632
+ the "copyright" line and a pointer to where the full notice is found.
633
+
634
+ <one line to give the program's name and a brief idea of what it does.>
635
+ Copyright (C) <year> <name of author>
636
+
637
+ This program is free software: you can redistribute it and/or modify
638
+ it under the terms of the GNU General Public License as published by
639
+ the Free Software Foundation, either version 3 of the License, or
640
+ (at your option) any later version.
641
+
642
+ This program is distributed in the hope that it will be useful,
643
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
644
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645
+ GNU General Public License for more details.
646
+
647
+ You should have received a copy of the GNU General Public License
648
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
649
+
650
+ Also add information on how to contact you by electronic and paper mail.
651
+
652
+ If the program does terminal interaction, make it output a short
653
+ notice like this when it starts in an interactive mode:
654
+
655
+ <program> Copyright (C) <year> <name of author>
656
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657
+ This is free software, and you are welcome to redistribute it
658
+ under certain conditions; type `show c' for details.
659
+
660
+ The hypothetical commands `show w' and `show c' should show the appropriate
661
+ parts of the General Public License. Of course, your program's commands
662
+ might be different; for a GUI interface, you would use an "about box".
663
+
664
+ You should also get your employer (if you work as a programmer) or school,
665
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
666
+ For more information on this, and how to apply and follow the GNU GPL, see
667
+ <https://www.gnu.org/licenses/>.
668
+
669
+ The GNU General Public License does not permit incorporating your program
670
+ into proprietary programs. If your program is a subroutine library, you
671
+ may consider it more useful to permit linking proprietary applications with
672
+ the library. If this is what you want to do, use the GNU Lesser General
673
+ Public License instead of this License. But first, please read
674
+ <https://www.gnu.org/licenses/why-not-lgpl.html>.
extensions-builtin/forge_legacy_preprocessors/annotator/anime_face_segment/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2021 Miaomiao Li
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
extensions-builtin/forge_legacy_preprocessors/annotator/anime_face_segment/__init__.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from PIL import Image
6
+ import fnmatch
7
+ import cv2
8
+
9
+ import sys
10
+
11
+ import numpy as np
12
+ from modules import devices
13
+ from einops import rearrange
14
+ from annotator.annotator_path import models_path
15
+
16
+ import torchvision
17
+ from torchvision.models import MobileNet_V2_Weights
18
+ from torchvision import transforms
19
+
20
+ COLOR_BACKGROUND = (255, 255, 0)
21
+ COLOR_HAIR = (0, 0, 255)
22
+ COLOR_EYE = (255, 0, 0)
23
+ COLOR_MOUTH = (255, 255, 255)
24
+ COLOR_FACE = (0, 255, 0)
25
+ COLOR_SKIN = (0, 255, 255)
26
+ COLOR_CLOTHES = (255, 0, 255)
27
+ PALETTE = [
28
+ COLOR_BACKGROUND,
29
+ COLOR_HAIR,
30
+ COLOR_EYE,
31
+ COLOR_MOUTH,
32
+ COLOR_FACE,
33
+ COLOR_SKIN,
34
+ COLOR_CLOTHES,
35
+ ]
36
+
37
+
38
+ class UNet(nn.Module):
39
+ def __init__(self):
40
+ super(UNet, self).__init__()
41
+ self.NUM_SEG_CLASSES = 7 # Background, hair, face, eye, mouth, skin, clothes
42
+
43
+ mobilenet_v2 = torchvision.models.mobilenet_v2(
44
+ weights=MobileNet_V2_Weights.IMAGENET1K_V1
45
+ )
46
+ mob_blocks = mobilenet_v2.features
47
+
48
+ # Encoder
49
+ self.en_block0 = nn.Sequential( # in_ch=3 out_ch=16
50
+ mob_blocks[0], mob_blocks[1]
51
+ )
52
+ self.en_block1 = nn.Sequential( # in_ch=16 out_ch=24
53
+ mob_blocks[2],
54
+ mob_blocks[3],
55
+ )
56
+ self.en_block2 = nn.Sequential( # in_ch=24 out_ch=32
57
+ mob_blocks[4],
58
+ mob_blocks[5],
59
+ mob_blocks[6],
60
+ )
61
+ self.en_block3 = nn.Sequential( # in_ch=32 out_ch=96
62
+ mob_blocks[7],
63
+ mob_blocks[8],
64
+ mob_blocks[9],
65
+ mob_blocks[10],
66
+ mob_blocks[11],
67
+ mob_blocks[12],
68
+ mob_blocks[13],
69
+ )
70
+ self.en_block4 = nn.Sequential( # in_ch=96 out_ch=160
71
+ mob_blocks[14],
72
+ mob_blocks[15],
73
+ mob_blocks[16],
74
+ )
75
+
76
+ # Decoder
77
+ self.de_block4 = nn.Sequential( # in_ch=160 out_ch=96
78
+ nn.UpsamplingNearest2d(scale_factor=2),
79
+ nn.Conv2d(160, 96, kernel_size=3, padding=1),
80
+ nn.InstanceNorm2d(96),
81
+ nn.LeakyReLU(0.1),
82
+ nn.Dropout(p=0.2),
83
+ )
84
+ self.de_block3 = nn.Sequential( # in_ch=96x2 out_ch=32
85
+ nn.UpsamplingNearest2d(scale_factor=2),
86
+ nn.Conv2d(96 * 2, 32, kernel_size=3, padding=1),
87
+ nn.InstanceNorm2d(32),
88
+ nn.LeakyReLU(0.1),
89
+ nn.Dropout(p=0.2),
90
+ )
91
+ self.de_block2 = nn.Sequential( # in_ch=32x2 out_ch=24
92
+ nn.UpsamplingNearest2d(scale_factor=2),
93
+ nn.Conv2d(32 * 2, 24, kernel_size=3, padding=1),
94
+ nn.InstanceNorm2d(24),
95
+ nn.LeakyReLU(0.1),
96
+ nn.Dropout(p=0.2),
97
+ )
98
+ self.de_block1 = nn.Sequential( # in_ch=24x2 out_ch=16
99
+ nn.UpsamplingNearest2d(scale_factor=2),
100
+ nn.Conv2d(24 * 2, 16, kernel_size=3, padding=1),
101
+ nn.InstanceNorm2d(16),
102
+ nn.LeakyReLU(0.1),
103
+ nn.Dropout(p=0.2),
104
+ )
105
+
106
+ self.de_block0 = nn.Sequential( # in_ch=16x2 out_ch=7
107
+ nn.UpsamplingNearest2d(scale_factor=2),
108
+ nn.Conv2d(16 * 2, self.NUM_SEG_CLASSES, kernel_size=3, padding=1),
109
+ nn.Softmax2d(),
110
+ )
111
+
112
+ def forward(self, x):
113
+ e0 = self.en_block0(x)
114
+ e1 = self.en_block1(e0)
115
+ e2 = self.en_block2(e1)
116
+ e3 = self.en_block3(e2)
117
+ e4 = self.en_block4(e3)
118
+
119
+ d4 = self.de_block4(e4)
120
+ d4 = F.interpolate(d4, size=e3.size()[2:], mode="bilinear", align_corners=True)
121
+ c4 = torch.cat((d4, e3), 1)
122
+
123
+ d3 = self.de_block3(c4)
124
+ d3 = F.interpolate(d3, size=e2.size()[2:], mode="bilinear", align_corners=True)
125
+ c3 = torch.cat((d3, e2), 1)
126
+
127
+ d2 = self.de_block2(c3)
128
+ d2 = F.interpolate(d2, size=e1.size()[2:], mode="bilinear", align_corners=True)
129
+ c2 = torch.cat((d2, e1), 1)
130
+
131
+ d1 = self.de_block1(c2)
132
+ d1 = F.interpolate(d1, size=e0.size()[2:], mode="bilinear", align_corners=True)
133
+ c1 = torch.cat((d1, e0), 1)
134
+ y = self.de_block0(c1)
135
+
136
+ return y
137
+
138
+
139
+ class AnimeFaceSegment:
140
+ model_dir = os.path.join(models_path, "anime_face_segment")
141
+
142
+ def __init__(self):
143
+ self.model = None
144
+ self.device = devices.get_device_for("controlnet")
145
+
146
+ def load_model(self):
147
+ remote_model_path = "https://huggingface.co/bdsqlsz/qinglong_controlnet-lllite/resolve/main/Annotators/UNet.pth"
148
+ modelpath = os.path.join(self.model_dir, "UNet.pth")
149
+ if not os.path.exists(modelpath):
150
+ from modules.modelloader import load_file_from_url
151
+
152
+ load_file_from_url(remote_model_path, model_dir=self.model_dir)
153
+ net = UNet()
154
+ ckpt = torch.load(modelpath, map_location=self.device)
155
+ for key in list(ckpt.keys()):
156
+ if "module." in key:
157
+ ckpt[key.replace("module.", "")] = ckpt[key]
158
+ del ckpt[key]
159
+ net.load_state_dict(ckpt)
160
+ net.eval()
161
+ self.model = net.to(self.device)
162
+
163
+ def unload_model(self):
164
+ if self.model is not None:
165
+ self.model.cpu()
166
+
167
+ def __call__(self, input_image):
168
+ if self.model is None:
169
+ self.load_model()
170
+ self.model.to(self.device)
171
+ transform = transforms.Compose(
172
+ [
173
+ transforms.Resize(
174
+ 512, interpolation=transforms.InterpolationMode.BICUBIC
175
+ ),
176
+ transforms.ToTensor(),
177
+ ]
178
+ )
179
+ img = Image.fromarray(input_image)
180
+ with torch.no_grad():
181
+ img = transform(img).unsqueeze(dim=0).to(self.device)
182
+ seg = self.model(img).squeeze(dim=0)
183
+ seg = seg.cpu().detach().numpy()
184
+ img = rearrange(seg, "h w c -> w c h")
185
+ img = [[PALETTE[np.argmax(val)] for val in buf] for buf in img]
186
+ return np.array(img).astype(np.uint8)
extensions-builtin/forge_legacy_preprocessors/annotator/annotator_path.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules_forge.shared import preprocessor_dir
2
+ import os
3
+
4
+ try:
5
+ from lib_controlnet.logging import logger
6
+
7
+ print = logger.info
8
+ except ImportError:
9
+ pass
10
+
11
+ models_path = preprocessor_dir
12
+ clip_vision_path = os.path.join(preprocessor_dir, "clip_vision")
13
+
14
+ os.makedirs(models_path, exist_ok=True)
15
+ os.makedirs(clip_vision_path, exist_ok=True)
16
+
17
+ print(f"Preprocessor location: {models_path}")
extensions-builtin/forge_legacy_preprocessors/annotator/binary/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+
3
+
4
+ def apply_binary(img, bin_threshold):
5
+ img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
6
+
7
+ if bin_threshold == 0 or bin_threshold == 255:
8
+ # Otsu's threshold
9
+ otsu_threshold, img_bin = cv2.threshold(
10
+ img_gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU
11
+ )
12
+ print("Otsu threshold:", otsu_threshold)
13
+ else:
14
+ _, img_bin = cv2.threshold(img_gray, bin_threshold, 255, cv2.THRESH_BINARY_INV)
15
+
16
+ return cv2.cvtColor(img_bin, cv2.COLOR_GRAY2RGB)
extensions-builtin/forge_legacy_preprocessors/annotator/canny/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import cv2
2
+
3
+
4
+ def apply_canny(img, low_threshold, high_threshold):
5
+ return cv2.Canny(img, low_threshold, high_threshold)
extensions-builtin/forge_legacy_preprocessors/annotator/color/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+
3
+
4
+ def cv2_resize_shortest_edge(image, size):
5
+ h, w = image.shape[:2]
6
+ if h < w:
7
+ new_h = size
8
+ new_w = int(round(w / h * size))
9
+ else:
10
+ new_w = size
11
+ new_h = int(round(h / w * size))
12
+ resized_image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_AREA)
13
+ return resized_image
14
+
15
+
16
+ def apply_color(img, res=512):
17
+ img = cv2_resize_shortest_edge(img, res)
18
+ h, w = img.shape[:2]
19
+
20
+ input_img_color = cv2.resize(img, (w // 64, h // 64), interpolation=cv2.INTER_CUBIC)
21
+ input_img_color = cv2.resize(
22
+ input_img_color, (w, h), interpolation=cv2.INTER_NEAREST
23
+ )
24
+ return input_img_color
extensions-builtin/forge_legacy_preprocessors/annotator/densepose/__init__.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torchvision # Fix issue Unknown builtin op: torchvision::nms
2
+ import cv2
3
+ import numpy as np
4
+ import torch
5
+ from einops import rearrange
6
+ from .densepose import (
7
+ DensePoseMaskedColormapResultsVisualizer,
8
+ _extract_i_from_iuvarr,
9
+ densepose_chart_predictor_output_to_result_with_confidences,
10
+ )
11
+ from modules import devices
12
+ from annotator.annotator_path import models_path
13
+ import os
14
+
15
+ N_PART_LABELS = 24
16
+ result_visualizer = DensePoseMaskedColormapResultsVisualizer(
17
+ alpha=1,
18
+ data_extractor=_extract_i_from_iuvarr,
19
+ segm_extractor=_extract_i_from_iuvarr,
20
+ val_scale=255.0 / N_PART_LABELS,
21
+ )
22
+ remote_torchscript_path = "https://huggingface.co/LayerNorm/DensePose-TorchScript-with-hint-image/resolve/main/densepose_r50_fpn_dl.torchscript"
23
+ torchscript_model = None
24
+ model_dir = os.path.join(models_path, "densepose")
25
+
26
+
27
+ def apply_densepose(input_image, cmap="viridis"):
28
+ global torchscript_model
29
+ if torchscript_model is None:
30
+ model_path = os.path.join(model_dir, "densepose_r50_fpn_dl.torchscript")
31
+ if not os.path.exists(model_path):
32
+ from modules.modelloader import load_file_from_url
33
+
34
+ load_file_from_url(remote_torchscript_path, model_dir=model_dir)
35
+ torchscript_model = (
36
+ torch.jit.load(model_path, map_location="cpu")
37
+ .to(devices.get_device_for("controlnet"))
38
+ .eval()
39
+ )
40
+ H, W = input_image.shape[:2]
41
+
42
+ hint_image_canvas = np.zeros([H, W], dtype=np.uint8)
43
+ hint_image_canvas = np.tile(hint_image_canvas[:, :, np.newaxis], [1, 1, 3])
44
+ input_image = rearrange(
45
+ torch.from_numpy(input_image).to(devices.get_device_for("controlnet")),
46
+ "h w c -> c h w",
47
+ )
48
+ pred_boxes, corase_segm, fine_segm, u, v = torchscript_model(input_image)
49
+
50
+ extractor = densepose_chart_predictor_output_to_result_with_confidences
51
+ densepose_results = [
52
+ extractor(
53
+ pred_boxes[i : i + 1],
54
+ corase_segm[i : i + 1],
55
+ fine_segm[i : i + 1],
56
+ u[i : i + 1],
57
+ v[i : i + 1],
58
+ )
59
+ for i in range(len(pred_boxes))
60
+ ]
61
+
62
+ if cmap == "viridis":
63
+ result_visualizer.mask_visualizer.cmap = cv2.COLORMAP_VIRIDIS
64
+ hint_image = result_visualizer.visualize(hint_image_canvas, densepose_results)
65
+ hint_image = cv2.cvtColor(hint_image, cv2.COLOR_BGR2RGB)
66
+ hint_image[:, :, 0][hint_image[:, :, 0] == 0] = 68
67
+ hint_image[:, :, 1][hint_image[:, :, 1] == 0] = 1
68
+ hint_image[:, :, 2][hint_image[:, :, 2] == 0] = 84
69
+ else:
70
+ result_visualizer.mask_visualizer.cmap = cv2.COLORMAP_PARULA
71
+ hint_image = result_visualizer.visualize(hint_image_canvas, densepose_results)
72
+ hint_image = cv2.cvtColor(hint_image, cv2.COLOR_BGR2RGB)
73
+
74
+ return hint_image
75
+
76
+
77
+ def unload_model():
78
+ global torchscript_model
79
+ if torchscript_model is not None:
80
+ torchscript_model.cpu()
extensions-builtin/forge_legacy_preprocessors/annotator/densepose/densepose.py ADDED
@@ -0,0 +1,361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+ import math
3
+ import numpy as np
4
+ from enum import IntEnum
5
+ from typing import List, Tuple, Union
6
+ import torch
7
+ from torch.nn import functional as F
8
+ import logging
9
+ import cv2
10
+
11
+ Image = np.ndarray
12
+ Boxes = torch.Tensor
13
+ ImageSizeType = Tuple[int, int]
14
+ _RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray]
15
+ IntTupleBox = Tuple[int, int, int, int]
16
+
17
+
18
+ class BoxMode(IntEnum):
19
+ """
20
+ Enum of different ways to represent a box.
21
+ """
22
+
23
+ XYXY_ABS = 0
24
+ """
25
+ (x0, y0, x1, y1) in absolute floating points coordinates.
26
+ The coordinates in range [0, width or height].
27
+ """
28
+ XYWH_ABS = 1
29
+ """
30
+ (x0, y0, w, h) in absolute floating points coordinates.
31
+ """
32
+ XYXY_REL = 2
33
+ """
34
+ Not yet supported!
35
+ (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.
36
+ """
37
+ XYWH_REL = 3
38
+ """
39
+ Not yet supported!
40
+ (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.
41
+ """
42
+ XYWHA_ABS = 4
43
+ """
44
+ (xc, yc, w, h, a) in absolute floating points coordinates.
45
+ (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.
46
+ """
47
+
48
+ @staticmethod
49
+ def convert(
50
+ box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode"
51
+ ) -> _RawBoxType:
52
+ """
53
+ Args:
54
+ box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5
55
+ from_mode, to_mode (BoxMode)
56
+
57
+ Returns:
58
+ The converted box of the same type.
59
+ """
60
+ if from_mode == to_mode:
61
+ return box
62
+
63
+ original_type = type(box)
64
+ is_numpy = isinstance(box, np.ndarray)
65
+ single_box = isinstance(box, (list, tuple))
66
+ if single_box:
67
+ assert len(box) == 4 or len(box) == 5, (
68
+ "BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,"
69
+ " where k == 4 or 5"
70
+ )
71
+ arr = torch.tensor(box)[None, :]
72
+ else:
73
+ # avoid modifying the input box
74
+ if is_numpy:
75
+ arr = torch.from_numpy(np.asarray(box)).clone()
76
+ else:
77
+ arr = box.clone()
78
+
79
+ assert to_mode not in [
80
+ BoxMode.XYXY_REL,
81
+ BoxMode.XYWH_REL,
82
+ ] and from_mode not in [
83
+ BoxMode.XYXY_REL,
84
+ BoxMode.XYWH_REL,
85
+ ], "Relative mode not yet supported!"
86
+
87
+ if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:
88
+ assert (
89
+ arr.shape[-1] == 5
90
+ ), "The last dimension of input shape must be 5 for XYWHA format"
91
+ original_dtype = arr.dtype
92
+ arr = arr.double()
93
+
94
+ w = arr[:, 2]
95
+ h = arr[:, 3]
96
+ a = arr[:, 4]
97
+ c = torch.abs(torch.cos(a * math.pi / 180.0))
98
+ s = torch.abs(torch.sin(a * math.pi / 180.0))
99
+ # This basically computes the horizontal bounding rectangle of the rotated box
100
+ new_w = c * w + s * h
101
+ new_h = c * h + s * w
102
+
103
+ # convert center to top-left corner
104
+ arr[:, 0] -= new_w / 2.0
105
+ arr[:, 1] -= new_h / 2.0
106
+ # bottom-right corner
107
+ arr[:, 2] = arr[:, 0] + new_w
108
+ arr[:, 3] = arr[:, 1] + new_h
109
+
110
+ arr = arr[:, :4].to(dtype=original_dtype)
111
+ elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:
112
+ original_dtype = arr.dtype
113
+ arr = arr.double()
114
+ arr[:, 0] += arr[:, 2] / 2.0
115
+ arr[:, 1] += arr[:, 3] / 2.0
116
+ angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)
117
+ arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)
118
+ else:
119
+ if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:
120
+ arr[:, 2] += arr[:, 0]
121
+ arr[:, 3] += arr[:, 1]
122
+ elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:
123
+ arr[:, 2] -= arr[:, 0]
124
+ arr[:, 3] -= arr[:, 1]
125
+ else:
126
+ raise NotImplementedError(
127
+ "Conversion from BoxMode {} to {} is not supported yet".format(
128
+ from_mode, to_mode
129
+ )
130
+ )
131
+
132
+ if single_box:
133
+ return original_type(arr.flatten().tolist())
134
+ if is_numpy:
135
+ return arr.numpy()
136
+ else:
137
+ return arr
138
+
139
+
140
+ class MatrixVisualizer:
141
+ """
142
+ Base visualizer for matrix data
143
+ """
144
+
145
+ def __init__(
146
+ self,
147
+ inplace=True,
148
+ cmap=cv2.COLORMAP_PARULA,
149
+ val_scale=1.0,
150
+ alpha=0.7,
151
+ interp_method_matrix=cv2.INTER_LINEAR,
152
+ interp_method_mask=cv2.INTER_NEAREST,
153
+ ):
154
+ self.inplace = inplace
155
+ self.cmap = cmap
156
+ self.val_scale = val_scale
157
+ self.alpha = alpha
158
+ self.interp_method_matrix = interp_method_matrix
159
+ self.interp_method_mask = interp_method_mask
160
+
161
+ def visualize(self, image_bgr, mask, matrix, bbox_xywh):
162
+ self._check_image(image_bgr)
163
+ self._check_mask_matrix(mask, matrix)
164
+ if self.inplace:
165
+ image_target_bgr = image_bgr
166
+ else:
167
+ image_target_bgr = image_bgr * 0
168
+ x, y, w, h = [int(v) for v in bbox_xywh]
169
+ if w <= 0 or h <= 0:
170
+ return image_bgr
171
+ mask, matrix = self._resize(mask, matrix, w, h)
172
+ mask_bg = np.tile((mask == 0)[:, :, np.newaxis], [1, 1, 3])
173
+ matrix_scaled = matrix.astype(np.float32) * self.val_scale
174
+ _EPSILON = 1e-6
175
+ if np.any(matrix_scaled > 255 + _EPSILON):
176
+ logger = logging.getLogger(__name__)
177
+ logger.warning(
178
+ f"Matrix has values > {255 + _EPSILON} after "
179
+ f"scaling, clipping to [0..255]"
180
+ )
181
+ matrix_scaled_8u = matrix_scaled.clip(0, 255).astype(np.uint8)
182
+ matrix_vis = cv2.applyColorMap(matrix_scaled_8u, self.cmap)
183
+ matrix_vis[mask_bg] = image_target_bgr[y : y + h, x : x + w, :][mask_bg]
184
+ image_target_bgr[y : y + h, x : x + w, :] = (
185
+ image_target_bgr[y : y + h, x : x + w, :] * (1.0 - self.alpha)
186
+ + matrix_vis * self.alpha
187
+ )
188
+ return image_target_bgr.astype(np.uint8)
189
+
190
+ def _resize(self, mask, matrix, w, h):
191
+ if (w != mask.shape[1]) or (h != mask.shape[0]):
192
+ mask = cv2.resize(mask, (w, h), self.interp_method_mask)
193
+ if (w != matrix.shape[1]) or (h != matrix.shape[0]):
194
+ matrix = cv2.resize(matrix, (w, h), self.interp_method_matrix)
195
+ return mask, matrix
196
+
197
+ def _check_image(self, image_rgb):
198
+ assert len(image_rgb.shape) == 3
199
+ assert image_rgb.shape[2] == 3
200
+ assert image_rgb.dtype == np.uint8
201
+
202
+ def _check_mask_matrix(self, mask, matrix):
203
+ assert len(matrix.shape) == 2
204
+ assert len(mask.shape) == 2
205
+ assert mask.dtype == np.uint8
206
+
207
+
208
+ class DensePoseResultsVisualizer:
209
+ def visualize(
210
+ self,
211
+ image_bgr: Image,
212
+ results,
213
+ ) -> Image:
214
+ context = self.create_visualization_context(image_bgr)
215
+ for i, result in enumerate(results):
216
+ boxes_xywh, labels, uv = result
217
+ iuv_array = torch.cat((labels[None].type(torch.float32), uv * 255.0)).type(
218
+ torch.uint8
219
+ )
220
+ self.visualize_iuv_arr(context, iuv_array.cpu().numpy(), boxes_xywh)
221
+ image_bgr = self.context_to_image_bgr(context)
222
+ return image_bgr
223
+
224
+ def create_visualization_context(self, image_bgr: Image):
225
+ return image_bgr
226
+
227
+ def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh) -> None:
228
+ pass
229
+
230
+ def context_to_image_bgr(self, context):
231
+ return context
232
+
233
+ def get_image_bgr_from_context(self, context):
234
+ return context
235
+
236
+
237
+ class DensePoseMaskedColormapResultsVisualizer(DensePoseResultsVisualizer):
238
+ def __init__(
239
+ self,
240
+ data_extractor,
241
+ segm_extractor,
242
+ inplace=True,
243
+ cmap=cv2.COLORMAP_PARULA,
244
+ alpha=0.7,
245
+ val_scale=1.0,
246
+ **kwargs,
247
+ ):
248
+ self.mask_visualizer = MatrixVisualizer(
249
+ inplace=inplace, cmap=cmap, val_scale=val_scale, alpha=alpha
250
+ )
251
+ self.data_extractor = data_extractor
252
+ self.segm_extractor = segm_extractor
253
+
254
+ def context_to_image_bgr(self, context):
255
+ return context
256
+
257
+ def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh) -> None:
258
+ image_bgr = self.get_image_bgr_from_context(context)
259
+ matrix = self.data_extractor(iuv_arr)
260
+ segm = self.segm_extractor(iuv_arr)
261
+ mask = np.zeros(matrix.shape, dtype=np.uint8)
262
+ mask[segm > 0] = 1
263
+ image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh)
264
+
265
+
266
+ def _extract_i_from_iuvarr(iuv_arr):
267
+ return iuv_arr[0, :, :]
268
+
269
+
270
+ def _extract_u_from_iuvarr(iuv_arr):
271
+ return iuv_arr[1, :, :]
272
+
273
+
274
+ def _extract_v_from_iuvarr(iuv_arr):
275
+ return iuv_arr[2, :, :]
276
+
277
+
278
+ def make_int_box(box: torch.Tensor) -> IntTupleBox:
279
+ int_box = [0, 0, 0, 0]
280
+ int_box[0], int_box[1], int_box[2], int_box[3] = tuple(box.long().tolist())
281
+ return int_box[0], int_box[1], int_box[2], int_box[3]
282
+
283
+
284
+ def densepose_chart_predictor_output_to_result_with_confidences(
285
+ boxes: Boxes, coarse_segm, fine_segm, u, v
286
+ ):
287
+ boxes_xyxy_abs = boxes.clone()
288
+ boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
289
+ box_xywh = make_int_box(boxes_xywh_abs[0])
290
+
291
+ labels = resample_fine_and_coarse_segm_tensors_to_bbox(
292
+ fine_segm, coarse_segm, box_xywh
293
+ ).squeeze(0)
294
+ uv = resample_uv_tensors_to_bbox(u, v, labels, box_xywh)
295
+ confidences = []
296
+ return box_xywh, labels, uv
297
+
298
+
299
+ def resample_fine_and_coarse_segm_tensors_to_bbox(
300
+ fine_segm: torch.Tensor, coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox
301
+ ):
302
+ """
303
+ Resample fine and coarse segmentation tensors to the given
304
+ bounding box and derive labels for each pixel of the bounding box
305
+
306
+ Args:
307
+ fine_segm: float tensor of shape [1, C, Hout, Wout]
308
+ coarse_segm: float tensor of shape [1, K, Hout, Wout]
309
+ box_xywh_abs (tuple of 4 int): bounding box given by its upper-left
310
+ corner coordinates, width (W) and height (H)
311
+ Return:
312
+ Labels for each pixel of the bounding box, a long tensor of size [1, H, W]
313
+ """
314
+ x, y, w, h = box_xywh_abs
315
+ w = max(int(w), 1)
316
+ h = max(int(h), 1)
317
+ # coarse segmentation
318
+ coarse_segm_bbox = F.interpolate(
319
+ coarse_segm,
320
+ (h, w),
321
+ mode="bilinear",
322
+ align_corners=False,
323
+ ).argmax(dim=1)
324
+ # combined coarse and fine segmentation
325
+ labels = (
326
+ F.interpolate(fine_segm, (h, w), mode="bilinear", align_corners=False).argmax(
327
+ dim=1
328
+ )
329
+ * (coarse_segm_bbox > 0).long()
330
+ )
331
+ return labels
332
+
333
+
334
+ def resample_uv_tensors_to_bbox(
335
+ u: torch.Tensor,
336
+ v: torch.Tensor,
337
+ labels: torch.Tensor,
338
+ box_xywh_abs: IntTupleBox,
339
+ ) -> torch.Tensor:
340
+ """
341
+ Resamples U and V coordinate estimates for the given bounding box
342
+
343
+ Args:
344
+ u (tensor [1, C, H, W] of float): U coordinates
345
+ v (tensor [1, C, H, W] of float): V coordinates
346
+ labels (tensor [H, W] of long): labels obtained by resampling segmentation
347
+ outputs for the given bounding box
348
+ box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs
349
+ Return:
350
+ Resampled U and V coordinates - a tensor [2, H, W] of float
351
+ """
352
+ x, y, w, h = box_xywh_abs
353
+ w = max(int(w), 1)
354
+ h = max(int(h), 1)
355
+ u_bbox = F.interpolate(u, (h, w), mode="bilinear", align_corners=False)
356
+ v_bbox = F.interpolate(v, (h, w), mode="bilinear", align_corners=False)
357
+ uv = torch.zeros([2, h, w], dtype=torch.float32, device=u.device)
358
+ for part_id in range(1, u_bbox.size(1)):
359
+ uv[0][labels == part_id] = u_bbox[0, part_id][labels == part_id]
360
+ uv[1][labels == part_id] = v_bbox[0, part_id][labels == part_id]
361
+ return uv
extensions-builtin/forge_legacy_preprocessors/annotator/depth_anything.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import cv2
4
+ import numpy as np
5
+ import torch.nn.functional as F
6
+ from torchvision.transforms import Compose
7
+
8
+ from depth_anything.dpt import DPT_DINOv2
9
+ from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
10
+ from .util import load_model
11
+ from .annotator_path import models_path
12
+
13
+
14
+ transform = Compose(
15
+ [
16
+ Resize(
17
+ width=518,
18
+ height=518,
19
+ resize_target=False,
20
+ keep_aspect_ratio=True,
21
+ ensure_multiple_of=14,
22
+ resize_method="lower_bound",
23
+ image_interpolation_method=cv2.INTER_CUBIC,
24
+ ),
25
+ NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
26
+ PrepareForNet(),
27
+ ]
28
+ )
29
+
30
+
31
+ class DepthAnythingDetector:
32
+ """https://github.com/LiheYoung/Depth-Anything"""
33
+
34
+ model_dir = os.path.join(models_path, "depth_anything")
35
+
36
+ def __init__(self, device: torch.device):
37
+ self.device = device
38
+ self.model = (
39
+ DPT_DINOv2(
40
+ encoder="vitl",
41
+ features=256,
42
+ out_channels=[256, 512, 1024, 1024],
43
+ localhub=False,
44
+ )
45
+ .to(device)
46
+ .eval()
47
+ )
48
+ remote_url = os.environ.get(
49
+ "CONTROLNET_DEPTH_ANYTHING_MODEL_URL",
50
+ "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitl14.pth",
51
+ )
52
+ model_path = load_model(
53
+ "depth_anything_vitl14.pth", remote_url=remote_url, model_dir=self.model_dir
54
+ )
55
+ self.model.load_state_dict(torch.load(model_path))
56
+
57
+ def __call__(self, image: np.ndarray, colored: bool = True) -> np.ndarray:
58
+ self.model.to(self.device)
59
+ h, w = image.shape[:2]
60
+
61
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) / 255.0
62
+ image = transform({"image": image})["image"]
63
+ image = torch.from_numpy(image).unsqueeze(0).to(self.device)
64
+
65
+ @torch.no_grad()
66
+ def predict_depth(model, image):
67
+ return model(image)
68
+
69
+ depth = predict_depth(self.model, image)
70
+ depth = F.interpolate(
71
+ depth[None], (h, w), mode="bilinear", align_corners=False
72
+ )[0, 0]
73
+ depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
74
+ depth = depth.cpu().numpy().astype(np.uint8)
75
+ if colored:
76
+ return cv2.applyColorMap(depth, cv2.COLORMAP_INFERNO)[:, :, ::-1]
77
+ else:
78
+ return depth
79
+
80
+ def unload_model(self):
81
+ self.model.to("cpu")
extensions-builtin/forge_legacy_preprocessors/annotator/hed/__init__.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is an improved version and model of HED edge detection with Apache License, Version 2.0.
2
+ # Please use this implementation in your products
3
+ # This implementation may produce slightly different results from Saining Xie's official implementations,
4
+ # but it generates smoother edges and is more suitable for ControlNet as well as other image-to-image translations.
5
+ # Different from official models and other implementations, this is an RGB-input model (rather than BGR)
6
+ # and in this way it works better for gradio's RGB protocol
7
+
8
+ import os
9
+ import cv2
10
+ import torch
11
+ import numpy as np
12
+
13
+ from einops import rearrange
14
+ import os
15
+ from modules import devices
16
+ from annotator.annotator_path import models_path
17
+ from annotator.util import safe_step, nms
18
+
19
+
20
+ class DoubleConvBlock(torch.nn.Module):
21
+ def __init__(self, input_channel, output_channel, layer_number):
22
+ super().__init__()
23
+ self.convs = torch.nn.Sequential()
24
+ self.convs.append(
25
+ torch.nn.Conv2d(
26
+ in_channels=input_channel,
27
+ out_channels=output_channel,
28
+ kernel_size=(3, 3),
29
+ stride=(1, 1),
30
+ padding=1,
31
+ )
32
+ )
33
+ for i in range(1, layer_number):
34
+ self.convs.append(
35
+ torch.nn.Conv2d(
36
+ in_channels=output_channel,
37
+ out_channels=output_channel,
38
+ kernel_size=(3, 3),
39
+ stride=(1, 1),
40
+ padding=1,
41
+ )
42
+ )
43
+ self.projection = torch.nn.Conv2d(
44
+ in_channels=output_channel,
45
+ out_channels=1,
46
+ kernel_size=(1, 1),
47
+ stride=(1, 1),
48
+ padding=0,
49
+ )
50
+
51
+ def __call__(self, x, down_sampling=False):
52
+ h = x
53
+ if down_sampling:
54
+ h = torch.nn.functional.max_pool2d(h, kernel_size=(2, 2), stride=(2, 2))
55
+ for conv in self.convs:
56
+ h = conv(h)
57
+ h = torch.nn.functional.relu(h)
58
+ return h, self.projection(h)
59
+
60
+
61
+ class ControlNetHED_Apache2(torch.nn.Module):
62
+ def __init__(self):
63
+ super().__init__()
64
+ self.norm = torch.nn.Parameter(torch.zeros(size=(1, 3, 1, 1)))
65
+ self.block1 = DoubleConvBlock(
66
+ input_channel=3, output_channel=64, layer_number=2
67
+ )
68
+ self.block2 = DoubleConvBlock(
69
+ input_channel=64, output_channel=128, layer_number=2
70
+ )
71
+ self.block3 = DoubleConvBlock(
72
+ input_channel=128, output_channel=256, layer_number=3
73
+ )
74
+ self.block4 = DoubleConvBlock(
75
+ input_channel=256, output_channel=512, layer_number=3
76
+ )
77
+ self.block5 = DoubleConvBlock(
78
+ input_channel=512, output_channel=512, layer_number=3
79
+ )
80
+
81
+ def __call__(self, x):
82
+ h = x - self.norm
83
+ h, projection1 = self.block1(h)
84
+ h, projection2 = self.block2(h, down_sampling=True)
85
+ h, projection3 = self.block3(h, down_sampling=True)
86
+ h, projection4 = self.block4(h, down_sampling=True)
87
+ h, projection5 = self.block5(h, down_sampling=True)
88
+ return projection1, projection2, projection3, projection4, projection5
89
+
90
+
91
+ netNetwork = None
92
+ remote_model_path = (
93
+ "https://huggingface.co/lllyasviel/Annotators/resolve/main/ControlNetHED.pth"
94
+ )
95
+ modeldir = os.path.join(models_path, "hed")
96
+ old_modeldir = os.path.dirname(os.path.realpath(__file__))
97
+
98
+
99
+ def apply_hed(input_image, is_safe=False):
100
+ global netNetwork
101
+ if netNetwork is None:
102
+ modelpath = os.path.join(modeldir, "ControlNetHED.pth")
103
+ old_modelpath = os.path.join(old_modeldir, "ControlNetHED.pth")
104
+ if os.path.exists(old_modelpath):
105
+ modelpath = old_modelpath
106
+ elif not os.path.exists(modelpath):
107
+ from modules.modelloader import load_file_from_url
108
+
109
+ load_file_from_url(remote_model_path, model_dir=modeldir)
110
+ netNetwork = ControlNetHED_Apache2().to(devices.get_device_for("controlnet"))
111
+ netNetwork.load_state_dict(torch.load(modelpath, map_location="cpu"))
112
+ netNetwork.to(devices.get_device_for("controlnet")).float().eval()
113
+
114
+ assert input_image.ndim == 3
115
+ H, W, C = input_image.shape
116
+ with torch.no_grad():
117
+ image_hed = (
118
+ torch.from_numpy(input_image.copy())
119
+ .float()
120
+ .to(devices.get_device_for("controlnet"))
121
+ )
122
+ image_hed = rearrange(image_hed, "h w c -> 1 c h w")
123
+ edges = netNetwork(image_hed)
124
+ edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges]
125
+ edges = [cv2.resize(e, (W, H), interpolation=cv2.INTER_LINEAR) for e in edges]
126
+ edges = np.stack(edges, axis=2)
127
+ edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64)))
128
+ if is_safe:
129
+ edge = safe_step(edge)
130
+ edge = (edge * 255.0).clip(0, 255).astype(np.uint8)
131
+ return edge
132
+
133
+
134
+ def unload_hed_model():
135
+ global netNetwork
136
+ if netNetwork is not None:
137
+ netNetwork.cpu()
extensions-builtin/forge_legacy_preprocessors/annotator/keypose/__init__.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import torch
4
+
5
+ import os
6
+ from modules import devices
7
+ from annotator.annotator_path import models_path
8
+
9
+ import mmcv
10
+ from mmdet.apis import inference_detector, init_detector
11
+ from mmpose.apis import inference_top_down_pose_model
12
+ from mmpose.apis import init_pose_model, process_mmdet_results, vis_pose_result
13
+
14
+
15
+ def preprocessing(image, device):
16
+ # Resize
17
+ scale = 640 / max(image.shape[:2])
18
+ image = cv2.resize(image, dsize=None, fx=scale, fy=scale)
19
+ raw_image = image.astype(np.uint8)
20
+
21
+ # Subtract mean values
22
+ image = image.astype(np.float32)
23
+ image -= np.array(
24
+ [
25
+ float(104.008),
26
+ float(116.669),
27
+ float(122.675),
28
+ ]
29
+ )
30
+
31
+ # Convert to torch.Tensor and add "batch" axis
32
+ image = torch.from_numpy(image.transpose(2, 0, 1)).float().unsqueeze(0)
33
+ image = image.to(device)
34
+
35
+ return image, raw_image
36
+
37
+
38
+ def imshow_keypoints(
39
+ img,
40
+ pose_result,
41
+ skeleton=None,
42
+ kpt_score_thr=0.1,
43
+ pose_kpt_color=None,
44
+ pose_link_color=None,
45
+ radius=4,
46
+ thickness=1,
47
+ ):
48
+ """Draw keypoints and links on an image.
49
+ Args:
50
+ img (ndarry): The image to draw poses on.
51
+ pose_result (list[kpts]): The poses to draw. Each element kpts is
52
+ a set of K keypoints as an Kx3 numpy.ndarray, where each
53
+ keypoint is represented as x, y, score.
54
+ kpt_score_thr (float, optional): Minimum score of keypoints
55
+ to be shown. Default: 0.3.
56
+ pose_kpt_color (np.array[Nx3]`): Color of N keypoints. If None,
57
+ the keypoint will not be drawn.
58
+ pose_link_color (np.array[Mx3]): Color of M links. If None, the
59
+ links will not be drawn.
60
+ thickness (int): Thickness of lines.
61
+ """
62
+
63
+ img_h, img_w, _ = img.shape
64
+ img = np.zeros(img.shape)
65
+
66
+ for idx, kpts in enumerate(pose_result):
67
+ if idx > 1:
68
+ continue
69
+ kpts = kpts["keypoints"]
70
+ # print(kpts)
71
+ kpts = np.array(kpts, copy=False)
72
+
73
+ # draw each point on image
74
+ if pose_kpt_color is not None:
75
+ assert len(pose_kpt_color) == len(kpts)
76
+
77
+ for kid, kpt in enumerate(kpts):
78
+ x_coord, y_coord, kpt_score = int(kpt[0]), int(kpt[1]), kpt[2]
79
+
80
+ if kpt_score < kpt_score_thr or pose_kpt_color[kid] is None:
81
+ # skip the point that should not be drawn
82
+ continue
83
+
84
+ color = tuple(int(c) for c in pose_kpt_color[kid])
85
+ cv2.circle(img, (int(x_coord), int(y_coord)), radius, color, -1)
86
+
87
+ # draw links
88
+ if skeleton is not None and pose_link_color is not None:
89
+ assert len(pose_link_color) == len(skeleton)
90
+
91
+ for sk_id, sk in enumerate(skeleton):
92
+ pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1]))
93
+ pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1]))
94
+
95
+ if (
96
+ pos1[0] <= 0
97
+ or pos1[0] >= img_w
98
+ or pos1[1] <= 0
99
+ or pos1[1] >= img_h
100
+ or pos2[0] <= 0
101
+ or pos2[0] >= img_w
102
+ or pos2[1] <= 0
103
+ or pos2[1] >= img_h
104
+ or kpts[sk[0], 2] < kpt_score_thr
105
+ or kpts[sk[1], 2] < kpt_score_thr
106
+ or pose_link_color[sk_id] is None
107
+ ):
108
+ # skip the link that should not be drawn
109
+ continue
110
+ color = tuple(int(c) for c in pose_link_color[sk_id])
111
+ cv2.line(img, pos1, pos2, color, thickness=thickness)
112
+
113
+ return img
114
+
115
+
116
+ human_det, pose_model = None, None
117
+ det_model_path = "https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth"
118
+ pose_model_path = "https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth"
119
+
120
+ modeldir = os.path.join(models_path, "keypose")
121
+ old_modeldir = os.path.dirname(os.path.realpath(__file__))
122
+
123
+ det_config = "faster_rcnn_r50_fpn_coco.py"
124
+ pose_config = "hrnet_w48_coco_256x192.py"
125
+
126
+ det_checkpoint = "faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth"
127
+ pose_checkpoint = "hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth"
128
+ det_cat_id = 1
129
+ bbox_thr = 0.2
130
+
131
+ skeleton = [
132
+ [15, 13],
133
+ [13, 11],
134
+ [16, 14],
135
+ [14, 12],
136
+ [11, 12],
137
+ [5, 11],
138
+ [6, 12],
139
+ [5, 6],
140
+ [5, 7],
141
+ [6, 8],
142
+ [7, 9],
143
+ [8, 10],
144
+ [1, 2],
145
+ [0, 1],
146
+ [0, 2],
147
+ [1, 3],
148
+ [2, 4],
149
+ [3, 5],
150
+ [4, 6],
151
+ ]
152
+
153
+ pose_kpt_color = [
154
+ [51, 153, 255],
155
+ [51, 153, 255],
156
+ [51, 153, 255],
157
+ [51, 153, 255],
158
+ [51, 153, 255],
159
+ [0, 255, 0],
160
+ [255, 128, 0],
161
+ [0, 255, 0],
162
+ [255, 128, 0],
163
+ [0, 255, 0],
164
+ [255, 128, 0],
165
+ [0, 255, 0],
166
+ [255, 128, 0],
167
+ [0, 255, 0],
168
+ [255, 128, 0],
169
+ [0, 255, 0],
170
+ [255, 128, 0],
171
+ ]
172
+
173
+ pose_link_color = [
174
+ [0, 255, 0],
175
+ [0, 255, 0],
176
+ [255, 128, 0],
177
+ [255, 128, 0],
178
+ [51, 153, 255],
179
+ [51, 153, 255],
180
+ [51, 153, 255],
181
+ [51, 153, 255],
182
+ [0, 255, 0],
183
+ [255, 128, 0],
184
+ [0, 255, 0],
185
+ [255, 128, 0],
186
+ [51, 153, 255],
187
+ [51, 153, 255],
188
+ [51, 153, 255],
189
+ [51, 153, 255],
190
+ [51, 153, 255],
191
+ [51, 153, 255],
192
+ [51, 153, 255],
193
+ ]
194
+
195
+
196
+ def find_download_model(checkpoint, remote_path):
197
+ modelpath = os.path.join(modeldir, checkpoint)
198
+ old_modelpath = os.path.join(old_modeldir, checkpoint)
199
+
200
+ if os.path.exists(old_modelpath):
201
+ modelpath = old_modelpath
202
+ elif not os.path.exists(modelpath):
203
+ from modules.modelloader import load_file_from_url
204
+
205
+ load_file_from_url(remote_path, model_dir=modeldir)
206
+
207
+ return modelpath
208
+
209
+
210
+ def apply_keypose(input_image):
211
+ global human_det, pose_model
212
+ if netNetwork is None:
213
+ det_model_local = find_download_model(det_checkpoint, det_model_path)
214
+ hrnet_model_local = find_download_model(pose_checkpoint, pose_model_path)
215
+ det_config_mmcv = mmcv.Config.fromfile(det_config)
216
+ pose_config_mmcv = mmcv.Config.fromfile(pose_config)
217
+ human_det = init_detector(
218
+ det_config_mmcv,
219
+ det_model_local,
220
+ device=devices.get_device_for("controlnet"),
221
+ )
222
+ pose_model = init_pose_model(
223
+ pose_config_mmcv,
224
+ hrnet_model_local,
225
+ device=devices.get_device_for("controlnet"),
226
+ )
227
+
228
+ assert input_image.ndim == 3
229
+ input_image = input_image.copy()
230
+ with torch.no_grad():
231
+ image = (
232
+ torch.from_numpy(input_image)
233
+ .float()
234
+ .to(devices.get_device_for("controlnet"))
235
+ )
236
+ image = image / 255.0
237
+ mmdet_results = inference_detector(human_det, image)
238
+
239
+ # keep the person class bounding boxes.
240
+ person_results = process_mmdet_results(mmdet_results, det_cat_id)
241
+
242
+ return_heatmap = False
243
+ dataset = pose_model.cfg.data["test"]["type"]
244
+
245
+ # e.g. use ('backbone', ) to return backbone feature
246
+ output_layer_names = None
247
+ pose_results, _ = inference_top_down_pose_model(
248
+ pose_model,
249
+ image,
250
+ person_results,
251
+ bbox_thr=bbox_thr,
252
+ format="xyxy",
253
+ dataset=dataset,
254
+ dataset_info=None,
255
+ return_heatmap=return_heatmap,
256
+ outputs=output_layer_names,
257
+ )
258
+
259
+ im_keypose_out = imshow_keypoints(
260
+ image,
261
+ pose_results,
262
+ skeleton=skeleton,
263
+ pose_kpt_color=pose_kpt_color,
264
+ pose_link_color=pose_link_color,
265
+ radius=2,
266
+ thickness=2,
267
+ )
268
+ im_keypose_out = im_keypose_out.astype(np.uint8)
269
+
270
+ # image_hed = rearrange(image_hed, 'h w c -> 1 c h w')
271
+ # edge = netNetwork(image_hed)[0]
272
+ # edge = (edge.cpu().numpy() * 255.0).clip(0, 255).astype(np.uint8)
273
+ return im_keypose_out
274
+
275
+
276
+ def unload_hed_model():
277
+ global netNetwork
278
+ if netNetwork is not None:
279
+ netNetwork.cpu()
extensions-builtin/forge_legacy_preprocessors/annotator/keypose/faster_rcnn_r50_fpn_coco.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ checkpoint_config = dict(interval=1)
2
+ # yapf:disable
3
+ log_config = dict(
4
+ interval=50,
5
+ hooks=[
6
+ dict(type='TextLoggerHook'),
7
+ # dict(type='TensorboardLoggerHook')
8
+ ])
9
+ # yapf:enable
10
+ dist_params = dict(backend="nccl")
11
+ log_level = "INFO"
12
+ load_from = None
13
+ resume_from = None
14
+ workflow = [("train", 1)]
15
+ # optimizer
16
+ optimizer = dict(type="SGD", lr=0.02, momentum=0.9, weight_decay=0.0001)
17
+ optimizer_config = dict(grad_clip=None)
18
+ # learning policy
19
+ lr_config = dict(
20
+ policy="step", warmup="linear", warmup_iters=500, warmup_ratio=0.001, step=[8, 11]
21
+ )
22
+ total_epochs = 12
23
+
24
+ model = dict(
25
+ type="FasterRCNN",
26
+ pretrained="torchvision://resnet50",
27
+ backbone=dict(
28
+ type="ResNet",
29
+ depth=50,
30
+ num_stages=4,
31
+ out_indices=(0, 1, 2, 3),
32
+ frozen_stages=1,
33
+ norm_cfg=dict(type="BN", requires_grad=True),
34
+ norm_eval=True,
35
+ style="pytorch",
36
+ ),
37
+ neck=dict(
38
+ type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5
39
+ ),
40
+ rpn_head=dict(
41
+ type="RPNHead",
42
+ in_channels=256,
43
+ feat_channels=256,
44
+ anchor_generator=dict(
45
+ type="AnchorGenerator",
46
+ scales=[8],
47
+ ratios=[0.5, 1.0, 2.0],
48
+ strides=[4, 8, 16, 32, 64],
49
+ ),
50
+ bbox_coder=dict(
51
+ type="DeltaXYWHBBoxCoder",
52
+ target_means=[0.0, 0.0, 0.0, 0.0],
53
+ target_stds=[1.0, 1.0, 1.0, 1.0],
54
+ ),
55
+ loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0),
56
+ loss_bbox=dict(type="L1Loss", loss_weight=1.0),
57
+ ),
58
+ roi_head=dict(
59
+ type="StandardRoIHead",
60
+ bbox_roi_extractor=dict(
61
+ type="SingleRoIExtractor",
62
+ roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0),
63
+ out_channels=256,
64
+ featmap_strides=[4, 8, 16, 32],
65
+ ),
66
+ bbox_head=dict(
67
+ type="Shared2FCBBoxHead",
68
+ in_channels=256,
69
+ fc_out_channels=1024,
70
+ roi_feat_size=7,
71
+ num_classes=80,
72
+ bbox_coder=dict(
73
+ type="DeltaXYWHBBoxCoder",
74
+ target_means=[0.0, 0.0, 0.0, 0.0],
75
+ target_stds=[0.1, 0.1, 0.2, 0.2],
76
+ ),
77
+ reg_class_agnostic=False,
78
+ loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0),
79
+ loss_bbox=dict(type="L1Loss", loss_weight=1.0),
80
+ ),
81
+ ),
82
+ # model training and testing settings
83
+ train_cfg=dict(
84
+ rpn=dict(
85
+ assigner=dict(
86
+ type="MaxIoUAssigner",
87
+ pos_iou_thr=0.7,
88
+ neg_iou_thr=0.3,
89
+ min_pos_iou=0.3,
90
+ match_low_quality=True,
91
+ ignore_iof_thr=-1,
92
+ ),
93
+ sampler=dict(
94
+ type="RandomSampler",
95
+ num=256,
96
+ pos_fraction=0.5,
97
+ neg_pos_ub=-1,
98
+ add_gt_as_proposals=False,
99
+ ),
100
+ allowed_border=-1,
101
+ pos_weight=-1,
102
+ debug=False,
103
+ ),
104
+ rpn_proposal=dict(
105
+ nms_pre=2000,
106
+ max_per_img=1000,
107
+ nms=dict(type="nms", iou_threshold=0.7),
108
+ min_bbox_size=0,
109
+ ),
110
+ rcnn=dict(
111
+ assigner=dict(
112
+ type="MaxIoUAssigner",
113
+ pos_iou_thr=0.5,
114
+ neg_iou_thr=0.5,
115
+ min_pos_iou=0.5,
116
+ match_low_quality=False,
117
+ ignore_iof_thr=-1,
118
+ ),
119
+ sampler=dict(
120
+ type="RandomSampler",
121
+ num=512,
122
+ pos_fraction=0.25,
123
+ neg_pos_ub=-1,
124
+ add_gt_as_proposals=True,
125
+ ),
126
+ pos_weight=-1,
127
+ debug=False,
128
+ ),
129
+ ),
130
+ test_cfg=dict(
131
+ rpn=dict(
132
+ nms_pre=1000,
133
+ max_per_img=1000,
134
+ nms=dict(type="nms", iou_threshold=0.7),
135
+ min_bbox_size=0,
136
+ ),
137
+ rcnn=dict(
138
+ score_thr=0.05, nms=dict(type="nms", iou_threshold=0.5), max_per_img=100
139
+ ),
140
+ # soft-nms is also supported for rcnn testing
141
+ # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
142
+ ),
143
+ )
144
+
145
+ dataset_type = "CocoDataset"
146
+ data_root = "data/coco"
147
+ img_norm_cfg = dict(
148
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True
149
+ )
150
+ train_pipeline = [
151
+ dict(type="LoadImageFromFile"),
152
+ dict(type="LoadAnnotations", with_bbox=True),
153
+ dict(type="Resize", img_scale=(1333, 800), keep_ratio=True),
154
+ dict(type="RandomFlip", flip_ratio=0.5),
155
+ dict(type="Normalize", **img_norm_cfg),
156
+ dict(type="Pad", size_divisor=32),
157
+ dict(type="DefaultFormatBundle"),
158
+ dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels"]),
159
+ ]
160
+ test_pipeline = [
161
+ dict(type="LoadImageFromFile"),
162
+ dict(
163
+ type="MultiScaleFlipAug",
164
+ img_scale=(1333, 800),
165
+ flip=False,
166
+ transforms=[
167
+ dict(type="Resize", keep_ratio=True),
168
+ dict(type="RandomFlip"),
169
+ dict(type="Normalize", **img_norm_cfg),
170
+ dict(type="Pad", size_divisor=32),
171
+ dict(type="DefaultFormatBundle"),
172
+ dict(type="Collect", keys=["img"]),
173
+ ],
174
+ ),
175
+ ]
176
+ data = dict(
177
+ samples_per_gpu=2,
178
+ workers_per_gpu=2,
179
+ train=dict(
180
+ type=dataset_type,
181
+ ann_file=f"{data_root}/annotations/instances_train2017.json",
182
+ img_prefix=f"{data_root}/train2017/",
183
+ pipeline=train_pipeline,
184
+ ),
185
+ val=dict(
186
+ type=dataset_type,
187
+ ann_file=f"{data_root}/annotations/instances_val2017.json",
188
+ img_prefix=f"{data_root}/val2017/",
189
+ pipeline=test_pipeline,
190
+ ),
191
+ test=dict(
192
+ type=dataset_type,
193
+ ann_file=f"{data_root}/annotations/instances_val2017.json",
194
+ img_prefix=f"{data_root}/val2017/",
195
+ pipeline=test_pipeline,
196
+ ),
197
+ )
198
+ evaluation = dict(interval=1, metric="bbox")
extensions-builtin/forge_legacy_preprocessors/annotator/keypose/hrnet_w48_coco_256x192.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # _base_ = [
2
+ # '../../../../_base_/default_runtime.py',
3
+ # '../../../../_base_/datasets/coco.py'
4
+ # ]
5
+ evaluation = dict(interval=10, metric="mAP", save_best="AP")
6
+
7
+ optimizer = dict(
8
+ type="Adam",
9
+ lr=5e-4,
10
+ )
11
+ optimizer_config = dict(grad_clip=None)
12
+ # learning policy
13
+ lr_config = dict(
14
+ policy="step",
15
+ warmup="linear",
16
+ warmup_iters=500,
17
+ warmup_ratio=0.001,
18
+ step=[170, 200],
19
+ )
20
+ total_epochs = 210
21
+ channel_cfg = dict(
22
+ num_output_channels=17,
23
+ dataset_joints=17,
24
+ dataset_channel=[
25
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
26
+ ],
27
+ inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
28
+ )
29
+
30
+ # model settings
31
+ model = dict(
32
+ type="TopDown",
33
+ pretrained="https://download.openmmlab.com/mmpose/"
34
+ "pretrain_models/hrnet_w48-8ef0771d.pth",
35
+ backbone=dict(
36
+ type="HRNet",
37
+ in_channels=3,
38
+ extra=dict(
39
+ stage1=dict(
40
+ num_modules=1,
41
+ num_branches=1,
42
+ block="BOTTLENECK",
43
+ num_blocks=(4,),
44
+ num_channels=(64,),
45
+ ),
46
+ stage2=dict(
47
+ num_modules=1,
48
+ num_branches=2,
49
+ block="BASIC",
50
+ num_blocks=(4, 4),
51
+ num_channels=(48, 96),
52
+ ),
53
+ stage3=dict(
54
+ num_modules=4,
55
+ num_branches=3,
56
+ block="BASIC",
57
+ num_blocks=(4, 4, 4),
58
+ num_channels=(48, 96, 192),
59
+ ),
60
+ stage4=dict(
61
+ num_modules=3,
62
+ num_branches=4,
63
+ block="BASIC",
64
+ num_blocks=(4, 4, 4, 4),
65
+ num_channels=(48, 96, 192, 384),
66
+ ),
67
+ ),
68
+ ),
69
+ keypoint_head=dict(
70
+ type="TopdownHeatmapSimpleHead",
71
+ in_channels=48,
72
+ out_channels=channel_cfg["num_output_channels"],
73
+ num_deconv_layers=0,
74
+ extra=dict(
75
+ final_conv_kernel=1,
76
+ ),
77
+ loss_keypoint=dict(type="JointsMSELoss", use_target_weight=True),
78
+ ),
79
+ train_cfg=dict(),
80
+ test_cfg=dict(
81
+ flip_test=True, post_process="default", shift_heatmap=True, modulate_kernel=11
82
+ ),
83
+ )
84
+
85
+ data_cfg = dict(
86
+ image_size=[192, 256],
87
+ heatmap_size=[48, 64],
88
+ num_output_channels=channel_cfg["num_output_channels"],
89
+ num_joints=channel_cfg["dataset_joints"],
90
+ dataset_channel=channel_cfg["dataset_channel"],
91
+ inference_channel=channel_cfg["inference_channel"],
92
+ soft_nms=False,
93
+ nms_thr=1.0,
94
+ oks_thr=0.9,
95
+ vis_thr=0.2,
96
+ use_gt_bbox=False,
97
+ det_bbox_thr=0.0,
98
+ bbox_file="data/coco/person_detection_results/"
99
+ "COCO_val2017_detections_AP_H_56_person.json",
100
+ )
101
+
102
+ train_pipeline = [
103
+ dict(type="LoadImageFromFile"),
104
+ dict(type="TopDownGetBboxCenterScale", padding=1.25),
105
+ dict(type="TopDownRandomShiftBboxCenter", shift_factor=0.16, prob=0.3),
106
+ dict(type="TopDownRandomFlip", flip_prob=0.5),
107
+ dict(type="TopDownHalfBodyTransform", num_joints_half_body=8, prob_half_body=0.3),
108
+ dict(type="TopDownGetRandomScaleRotation", rot_factor=40, scale_factor=0.5),
109
+ dict(type="TopDownAffine"),
110
+ dict(type="ToTensor"),
111
+ dict(type="NormalizeTensor", mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
112
+ dict(type="TopDownGenerateTarget", sigma=2),
113
+ dict(
114
+ type="Collect",
115
+ keys=["img", "target", "target_weight"],
116
+ meta_keys=[
117
+ "image_file",
118
+ "joints_3d",
119
+ "joints_3d_visible",
120
+ "center",
121
+ "scale",
122
+ "rotation",
123
+ "bbox_score",
124
+ "flip_pairs",
125
+ ],
126
+ ),
127
+ ]
128
+
129
+ val_pipeline = [
130
+ dict(type="LoadImageFromFile"),
131
+ dict(type="TopDownGetBboxCenterScale", padding=1.25),
132
+ dict(type="TopDownAffine"),
133
+ dict(type="ToTensor"),
134
+ dict(type="NormalizeTensor", mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
135
+ dict(
136
+ type="Collect",
137
+ keys=["img"],
138
+ meta_keys=[
139
+ "image_file",
140
+ "center",
141
+ "scale",
142
+ "rotation",
143
+ "bbox_score",
144
+ "flip_pairs",
145
+ ],
146
+ ),
147
+ ]
148
+
149
+ test_pipeline = val_pipeline
150
+
151
+ data_root = "data/coco"
152
+ data = dict(
153
+ samples_per_gpu=32,
154
+ workers_per_gpu=2,
155
+ val_dataloader=dict(samples_per_gpu=32),
156
+ test_dataloader=dict(samples_per_gpu=32),
157
+ train=dict(
158
+ type="TopDownCocoDataset",
159
+ ann_file=f"{data_root}/annotations/person_keypoints_train2017.json",
160
+ img_prefix=f"{data_root}/train2017/",
161
+ data_cfg=data_cfg,
162
+ pipeline=train_pipeline,
163
+ dataset_info={{_base_.dataset_info}},
164
+ ),
165
+ val=dict(
166
+ type="TopDownCocoDataset",
167
+ ann_file=f"{data_root}/annotations/person_keypoints_val2017.json",
168
+ img_prefix=f"{data_root}/val2017/",
169
+ data_cfg=data_cfg,
170
+ pipeline=val_pipeline,
171
+ dataset_info={{_base_.dataset_info}},
172
+ ),
173
+ test=dict(
174
+ type="TopDownCocoDataset",
175
+ ann_file=f"{data_root}/annotations/person_keypoints_val2017.json",
176
+ img_prefix=f"{data_root}/val2017/",
177
+ data_cfg=data_cfg,
178
+ pipeline=test_pipeline,
179
+ dataset_info={{_base_.dataset_info}},
180
+ ),
181
+ )
extensions-builtin/forge_legacy_preprocessors/annotator/leres/__init__.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import torch
4
+ import os
5
+ from modules import devices, shared
6
+ from annotator.annotator_path import models_path
7
+ from torchvision.transforms import transforms
8
+
9
+ # AdelaiDepth/LeReS imports
10
+ from .leres.depthmap import estimateleres, estimateboost
11
+ from .leres.multi_depth_model_woauxi import RelDepthModel
12
+ from .leres.net_tools import strip_prefix_if_present
13
+
14
+ # pix2pix/merge net imports
15
+ from .pix2pix.options.test_options import TestOptions
16
+ from .pix2pix.models.pix2pix4depth_model import Pix2Pix4DepthModel
17
+
18
+ base_model_path = os.path.join(models_path, "leres")
19
+ old_modeldir = os.path.dirname(os.path.realpath(__file__))
20
+
21
+ remote_model_path_leres = (
22
+ "https://huggingface.co/lllyasviel/Annotators/resolve/main/res101.pth"
23
+ )
24
+ remote_model_path_pix2pix = (
25
+ "https://huggingface.co/lllyasviel/Annotators/resolve/main/latest_net_G.pth"
26
+ )
27
+
28
+ model = None
29
+ pix2pixmodel = None
30
+
31
+
32
+ def unload_leres_model():
33
+ global model, pix2pixmodel
34
+ if model is not None:
35
+ model = model.cpu()
36
+ if pix2pixmodel is not None:
37
+ pix2pixmodel = pix2pixmodel.unload_network("G")
38
+
39
+
40
+ def apply_leres(input_image, thr_a, thr_b, boost=False):
41
+ global model, pix2pixmodel
42
+ if model is None:
43
+ model_path = os.path.join(base_model_path, "res101.pth")
44
+ old_model_path = os.path.join(old_modeldir, "res101.pth")
45
+
46
+ if os.path.exists(old_model_path):
47
+ model_path = old_model_path
48
+ elif not os.path.exists(model_path):
49
+ from modules.modelloader import load_file_from_url
50
+
51
+ load_file_from_url(remote_model_path_leres, model_dir=base_model_path)
52
+
53
+ if torch.cuda.is_available():
54
+ checkpoint = torch.load(model_path)
55
+ else:
56
+ checkpoint = torch.load(model_path, map_location=torch.device("cpu"))
57
+
58
+ model = RelDepthModel(backbone="resnext101")
59
+ model.load_state_dict(
60
+ strip_prefix_if_present(checkpoint["depth_model"], "module."), strict=True
61
+ )
62
+ del checkpoint
63
+
64
+ if boost and pix2pixmodel is None:
65
+ pix2pixmodel_path = os.path.join(base_model_path, "latest_net_G.pth")
66
+ if not os.path.exists(pix2pixmodel_path):
67
+ from modules.modelloader import load_file_from_url
68
+
69
+ load_file_from_url(remote_model_path_pix2pix, model_dir=base_model_path)
70
+
71
+ opt = TestOptions().parse()
72
+ if not torch.cuda.is_available():
73
+ opt.gpu_ids = [] # cpu mode
74
+ pix2pixmodel = Pix2Pix4DepthModel(opt)
75
+ pix2pixmodel.save_dir = base_model_path
76
+ pix2pixmodel.load_networks("latest")
77
+ pix2pixmodel.eval()
78
+
79
+ if devices.get_device_for("controlnet").type != "mps":
80
+ model = model.to(devices.get_device_for("controlnet"))
81
+
82
+ assert input_image.ndim == 3
83
+ height, width, dim = input_image.shape
84
+
85
+ with torch.no_grad():
86
+ if boost:
87
+ pix2pixmodel.netG.to(devices.get_device_for("controlnet"))
88
+ depth = estimateboost(
89
+ input_image, model, 0, pix2pixmodel, max(width, height)
90
+ )
91
+ else:
92
+ depth = estimateleres(input_image, model, width, height)
93
+
94
+ numbytes = 2
95
+ depth_min = depth.min()
96
+ depth_max = depth.max()
97
+ max_val = (2 ** (8 * numbytes)) - 1
98
+
99
+ # check output before normalizing and mapping to 16 bit
100
+ if depth_max - depth_min > np.finfo("float").eps:
101
+ out = max_val * (depth - depth_min) / (depth_max - depth_min)
102
+ else:
103
+ out = np.zeros(depth.shape)
104
+
105
+ # single channel, 16 bit image
106
+ depth_image = out.astype("uint16")
107
+
108
+ # convert to uint8
109
+ depth_image = cv2.convertScaleAbs(depth_image, alpha=(255.0 / 65535.0))
110
+
111
+ # remove near
112
+ if thr_a != 0:
113
+ thr_a = (thr_a / 100) * 255
114
+ depth_image = cv2.threshold(depth_image, thr_a, 255, cv2.THRESH_TOZERO)[1]
115
+
116
+ # invert image
117
+ depth_image = cv2.bitwise_not(depth_image)
118
+
119
+ # remove bg
120
+ if thr_b != 0:
121
+ thr_b = (thr_b / 100) * 255
122
+ depth_image = cv2.threshold(depth_image, thr_b, 255, cv2.THRESH_TOZERO)[1]
123
+
124
+ return depth_image
extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/LICENSE ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ https://github.com/thygate/stable-diffusion-webui-depthmap-script
2
+
3
+ MIT License
4
+
5
+ Copyright (c) 2023 Bob Thiry
6
+
7
+ Permission is hereby granted, free of charge, to any person obtaining a copy
8
+ of this software and associated documentation files (the "Software"), to deal
9
+ in the Software without restriction, including without limitation the rights
10
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ copies of the Software, and to permit persons to whom the Software is
12
+ furnished to do so, subject to the following conditions:
13
+
14
+ The above copyright notice and this permission notice shall be included in all
15
+ copies or substantial portions of the Software.
16
+
17
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
+ SOFTWARE.
extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/Resnet.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ import torch.nn as NN
3
+
4
+ __all__ = ["ResNet", "resnet18", "resnet34", "resnet50", "resnet101", "resnet152"]
5
+
6
+
7
+ model_urls = {
8
+ "resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth",
9
+ "resnet34": "https://download.pytorch.org/models/resnet34-333f7ec4.pth",
10
+ "resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth",
11
+ "resnet101": "https://download.pytorch.org/models/resnet101-5d3b4d8f.pth",
12
+ "resnet152": "https://download.pytorch.org/models/resnet152-b121ed2d.pth",
13
+ }
14
+
15
+
16
+ def conv3x3(in_planes, out_planes, stride=1):
17
+ """3x3 convolution with padding"""
18
+ return nn.Conv2d(
19
+ in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
20
+ )
21
+
22
+
23
+ class BasicBlock(nn.Module):
24
+ expansion = 1
25
+
26
+ def __init__(self, inplanes, planes, stride=1, downsample=None):
27
+ super(BasicBlock, self).__init__()
28
+ self.conv1 = conv3x3(inplanes, planes, stride)
29
+ self.bn1 = NN.BatchNorm2d(planes) # NN.BatchNorm2d
30
+ self.relu = nn.ReLU(inplace=True)
31
+ self.conv2 = conv3x3(planes, planes)
32
+ self.bn2 = NN.BatchNorm2d(planes) # NN.BatchNorm2d
33
+ self.downsample = downsample
34
+ self.stride = stride
35
+
36
+ def forward(self, x):
37
+ residual = x
38
+
39
+ out = self.conv1(x)
40
+ out = self.bn1(out)
41
+ out = self.relu(out)
42
+
43
+ out = self.conv2(out)
44
+ out = self.bn2(out)
45
+
46
+ if self.downsample is not None:
47
+ residual = self.downsample(x)
48
+
49
+ out += residual
50
+ out = self.relu(out)
51
+
52
+ return out
53
+
54
+
55
+ class Bottleneck(nn.Module):
56
+ expansion = 4
57
+
58
+ def __init__(self, inplanes, planes, stride=1, downsample=None):
59
+ super(Bottleneck, self).__init__()
60
+ self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
61
+ self.bn1 = NN.BatchNorm2d(planes) # NN.BatchNorm2d
62
+ self.conv2 = nn.Conv2d(
63
+ planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
64
+ )
65
+ self.bn2 = NN.BatchNorm2d(planes) # NN.BatchNorm2d
66
+ self.conv3 = nn.Conv2d(
67
+ planes, planes * self.expansion, kernel_size=1, bias=False
68
+ )
69
+ self.bn3 = NN.BatchNorm2d(planes * self.expansion) # NN.BatchNorm2d
70
+ self.relu = nn.ReLU(inplace=True)
71
+ self.downsample = downsample
72
+ self.stride = stride
73
+
74
+ def forward(self, x):
75
+ residual = x
76
+
77
+ out = self.conv1(x)
78
+ out = self.bn1(out)
79
+ out = self.relu(out)
80
+
81
+ out = self.conv2(out)
82
+ out = self.bn2(out)
83
+ out = self.relu(out)
84
+
85
+ out = self.conv3(out)
86
+ out = self.bn3(out)
87
+
88
+ if self.downsample is not None:
89
+ residual = self.downsample(x)
90
+
91
+ out += residual
92
+ out = self.relu(out)
93
+
94
+ return out
95
+
96
+
97
+ class ResNet(nn.Module):
98
+ def __init__(self, block, layers, num_classes=1000):
99
+ self.inplanes = 64
100
+ super(ResNet, self).__init__()
101
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
102
+ self.bn1 = NN.BatchNorm2d(64) # NN.BatchNorm2d
103
+ self.relu = nn.ReLU(inplace=True)
104
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
105
+ self.layer1 = self._make_layer(block, 64, layers[0])
106
+ self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
107
+ self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
108
+ self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
109
+ # self.avgpool = nn.AvgPool2d(7, stride=1)
110
+ # self.fc = nn.Linear(512 * block.expansion, num_classes)
111
+
112
+ for m in self.modules():
113
+ if isinstance(m, nn.Conv2d):
114
+ nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
115
+ elif isinstance(m, nn.BatchNorm2d):
116
+ nn.init.constant_(m.weight, 1)
117
+ nn.init.constant_(m.bias, 0)
118
+
119
+ def _make_layer(self, block, planes, blocks, stride=1):
120
+ downsample = None
121
+ if stride != 1 or self.inplanes != planes * block.expansion:
122
+ downsample = nn.Sequential(
123
+ nn.Conv2d(
124
+ self.inplanes,
125
+ planes * block.expansion,
126
+ kernel_size=1,
127
+ stride=stride,
128
+ bias=False,
129
+ ),
130
+ NN.BatchNorm2d(planes * block.expansion), # NN.BatchNorm2d
131
+ )
132
+
133
+ layers = []
134
+ layers.append(block(self.inplanes, planes, stride, downsample))
135
+ self.inplanes = planes * block.expansion
136
+ for i in range(1, blocks):
137
+ layers.append(block(self.inplanes, planes))
138
+
139
+ return nn.Sequential(*layers)
140
+
141
+ def forward(self, x):
142
+ features = []
143
+
144
+ x = self.conv1(x)
145
+ x = self.bn1(x)
146
+ x = self.relu(x)
147
+ x = self.maxpool(x)
148
+
149
+ x = self.layer1(x)
150
+ features.append(x)
151
+ x = self.layer2(x)
152
+ features.append(x)
153
+ x = self.layer3(x)
154
+ features.append(x)
155
+ x = self.layer4(x)
156
+ features.append(x)
157
+
158
+ return features
159
+
160
+
161
+ def resnet18(pretrained=True, **kwargs):
162
+ """Constructs a ResNet-18 model.
163
+ Args:
164
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
165
+ """
166
+ model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
167
+ return model
168
+
169
+
170
+ def resnet34(pretrained=True, **kwargs):
171
+ """Constructs a ResNet-34 model.
172
+ Args:
173
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
174
+ """
175
+ model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
176
+ return model
177
+
178
+
179
+ def resnet50(pretrained=True, **kwargs):
180
+ """Constructs a ResNet-50 model.
181
+ Args:
182
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
183
+ """
184
+ model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
185
+
186
+ return model
187
+
188
+
189
+ def resnet101(pretrained=True, **kwargs):
190
+ """Constructs a ResNet-101 model.
191
+ Args:
192
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
193
+ """
194
+ model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
195
+
196
+ return model
197
+
198
+
199
+ def resnet152(pretrained=True, **kwargs):
200
+ """Constructs a ResNet-152 model.
201
+ Args:
202
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
203
+ """
204
+ model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
205
+ return model
extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/Resnext_torch.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+ import torch.nn as nn
4
+
5
+ try:
6
+ from urllib import urlretrieve
7
+ except ImportError:
8
+ from urllib.request import urlretrieve
9
+
10
+ __all__ = ["resnext101_32x8d"]
11
+
12
+
13
+ model_urls = {
14
+ "resnext50_32x4d": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
15
+ "resnext101_32x8d": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
16
+ }
17
+
18
+
19
+ def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
20
+ """3x3 convolution with padding"""
21
+ return nn.Conv2d(
22
+ in_planes,
23
+ out_planes,
24
+ kernel_size=3,
25
+ stride=stride,
26
+ padding=dilation,
27
+ groups=groups,
28
+ bias=False,
29
+ dilation=dilation,
30
+ )
31
+
32
+
33
+ def conv1x1(in_planes, out_planes, stride=1):
34
+ """1x1 convolution"""
35
+ return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
36
+
37
+
38
+ class BasicBlock(nn.Module):
39
+ expansion = 1
40
+
41
+ def __init__(
42
+ self,
43
+ inplanes,
44
+ planes,
45
+ stride=1,
46
+ downsample=None,
47
+ groups=1,
48
+ base_width=64,
49
+ dilation=1,
50
+ norm_layer=None,
51
+ ):
52
+ super(BasicBlock, self).__init__()
53
+ if norm_layer is None:
54
+ norm_layer = nn.BatchNorm2d
55
+ if groups != 1 or base_width != 64:
56
+ raise ValueError("BasicBlock only supports groups=1 and base_width=64")
57
+ if dilation > 1:
58
+ raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
59
+ # Both self.conv1 and self.downsample layers downsample the input when stride != 1
60
+ self.conv1 = conv3x3(inplanes, planes, stride)
61
+ self.bn1 = norm_layer(planes)
62
+ self.relu = nn.ReLU(inplace=True)
63
+ self.conv2 = conv3x3(planes, planes)
64
+ self.bn2 = norm_layer(planes)
65
+ self.downsample = downsample
66
+ self.stride = stride
67
+
68
+ def forward(self, x):
69
+ identity = x
70
+
71
+ out = self.conv1(x)
72
+ out = self.bn1(out)
73
+ out = self.relu(out)
74
+
75
+ out = self.conv2(out)
76
+ out = self.bn2(out)
77
+
78
+ if self.downsample is not None:
79
+ identity = self.downsample(x)
80
+
81
+ out += identity
82
+ out = self.relu(out)
83
+
84
+ return out
85
+
86
+
87
+ class Bottleneck(nn.Module):
88
+ # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
89
+ # while original implementation places the stride at the first 1x1 convolution(self.conv1)
90
+ # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
91
+ # This variant is also known as ResNet V1.5 and improves accuracy according to
92
+ # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
93
+
94
+ expansion = 4
95
+
96
+ def __init__(
97
+ self,
98
+ inplanes,
99
+ planes,
100
+ stride=1,
101
+ downsample=None,
102
+ groups=1,
103
+ base_width=64,
104
+ dilation=1,
105
+ norm_layer=None,
106
+ ):
107
+ super(Bottleneck, self).__init__()
108
+ if norm_layer is None:
109
+ norm_layer = nn.BatchNorm2d
110
+ width = int(planes * (base_width / 64.0)) * groups
111
+ # Both self.conv2 and self.downsample layers downsample the input when stride != 1
112
+ self.conv1 = conv1x1(inplanes, width)
113
+ self.bn1 = norm_layer(width)
114
+ self.conv2 = conv3x3(width, width, stride, groups, dilation)
115
+ self.bn2 = norm_layer(width)
116
+ self.conv3 = conv1x1(width, planes * self.expansion)
117
+ self.bn3 = norm_layer(planes * self.expansion)
118
+ self.relu = nn.ReLU(inplace=True)
119
+ self.downsample = downsample
120
+ self.stride = stride
121
+
122
+ def forward(self, x):
123
+ identity = x
124
+
125
+ out = self.conv1(x)
126
+ out = self.bn1(out)
127
+ out = self.relu(out)
128
+
129
+ out = self.conv2(out)
130
+ out = self.bn2(out)
131
+ out = self.relu(out)
132
+
133
+ out = self.conv3(out)
134
+ out = self.bn3(out)
135
+
136
+ if self.downsample is not None:
137
+ identity = self.downsample(x)
138
+
139
+ out += identity
140
+ out = self.relu(out)
141
+
142
+ return out
143
+
144
+
145
+ class ResNet(nn.Module):
146
+ def __init__(
147
+ self,
148
+ block,
149
+ layers,
150
+ num_classes=1000,
151
+ zero_init_residual=False,
152
+ groups=1,
153
+ width_per_group=64,
154
+ replace_stride_with_dilation=None,
155
+ norm_layer=None,
156
+ ):
157
+ super(ResNet, self).__init__()
158
+ if norm_layer is None:
159
+ norm_layer = nn.BatchNorm2d
160
+ self._norm_layer = norm_layer
161
+
162
+ self.inplanes = 64
163
+ self.dilation = 1
164
+ if replace_stride_with_dilation is None:
165
+ # each element in the tuple indicates if we should replace
166
+ # the 2x2 stride with a dilated convolution instead
167
+ replace_stride_with_dilation = [False, False, False]
168
+ if len(replace_stride_with_dilation) != 3:
169
+ raise ValueError(
170
+ "replace_stride_with_dilation should be None "
171
+ "or a 3-element tuple, got {}".format(replace_stride_with_dilation)
172
+ )
173
+ self.groups = groups
174
+ self.base_width = width_per_group
175
+ self.conv1 = nn.Conv2d(
176
+ 3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
177
+ )
178
+ self.bn1 = norm_layer(self.inplanes)
179
+ self.relu = nn.ReLU(inplace=True)
180
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
181
+ self.layer1 = self._make_layer(block, 64, layers[0])
182
+ self.layer2 = self._make_layer(
183
+ block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
184
+ )
185
+ self.layer3 = self._make_layer(
186
+ block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
187
+ )
188
+ self.layer4 = self._make_layer(
189
+ block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
190
+ )
191
+ # self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
192
+ # self.fc = nn.Linear(512 * block.expansion, num_classes)
193
+
194
+ for m in self.modules():
195
+ if isinstance(m, nn.Conv2d):
196
+ nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
197
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
198
+ nn.init.constant_(m.weight, 1)
199
+ nn.init.constant_(m.bias, 0)
200
+
201
+ # Zero-initialize the last BN in each residual branch,
202
+ # so that the residual branch starts with zeros, and each residual block behaves like an identity.
203
+ # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
204
+ if zero_init_residual:
205
+ for m in self.modules():
206
+ if isinstance(m, Bottleneck):
207
+ nn.init.constant_(m.bn3.weight, 0)
208
+ elif isinstance(m, BasicBlock):
209
+ nn.init.constant_(m.bn2.weight, 0)
210
+
211
+ def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
212
+ norm_layer = self._norm_layer
213
+ downsample = None
214
+ previous_dilation = self.dilation
215
+ if dilate:
216
+ self.dilation *= stride
217
+ stride = 1
218
+ if stride != 1 or self.inplanes != planes * block.expansion:
219
+ downsample = nn.Sequential(
220
+ conv1x1(self.inplanes, planes * block.expansion, stride),
221
+ norm_layer(planes * block.expansion),
222
+ )
223
+
224
+ layers = []
225
+ layers.append(
226
+ block(
227
+ self.inplanes,
228
+ planes,
229
+ stride,
230
+ downsample,
231
+ self.groups,
232
+ self.base_width,
233
+ previous_dilation,
234
+ norm_layer,
235
+ )
236
+ )
237
+ self.inplanes = planes * block.expansion
238
+ for _ in range(1, blocks):
239
+ layers.append(
240
+ block(
241
+ self.inplanes,
242
+ planes,
243
+ groups=self.groups,
244
+ base_width=self.base_width,
245
+ dilation=self.dilation,
246
+ norm_layer=norm_layer,
247
+ )
248
+ )
249
+
250
+ return nn.Sequential(*layers)
251
+
252
+ def _forward_impl(self, x):
253
+ # See note [TorchScript super()]
254
+ features = []
255
+ x = self.conv1(x)
256
+ x = self.bn1(x)
257
+ x = self.relu(x)
258
+ x = self.maxpool(x)
259
+
260
+ x = self.layer1(x)
261
+ features.append(x)
262
+
263
+ x = self.layer2(x)
264
+ features.append(x)
265
+
266
+ x = self.layer3(x)
267
+ features.append(x)
268
+
269
+ x = self.layer4(x)
270
+ features.append(x)
271
+
272
+ # x = self.avgpool(x)
273
+ # x = torch.flatten(x, 1)
274
+ # x = self.fc(x)
275
+
276
+ return features
277
+
278
+ def forward(self, x):
279
+ return self._forward_impl(x)
280
+
281
+
282
+ def resnext101_32x8d(pretrained=True, **kwargs):
283
+ """Constructs a ResNet-152 model.
284
+ Args:
285
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
286
+ """
287
+ kwargs["groups"] = 32
288
+ kwargs["width_per_group"] = 8
289
+
290
+ model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
291
+ return model
extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/depthmap.py ADDED
@@ -0,0 +1,666 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: thygate
2
+ # https://github.com/thygate/stable-diffusion-webui-depthmap-script
3
+
4
+ from modules import devices
5
+ from modules.shared import opts
6
+ from torchvision.transforms import transforms
7
+ from operator import getitem
8
+
9
+ import torch, gc
10
+ import cv2
11
+ import numpy as np
12
+ import skimage.measure
13
+
14
+ whole_size_threshold = 1600 # R_max from the paper
15
+ pix2pixsize = 1024
16
+
17
+
18
+ def scale_torch(img):
19
+ """
20
+ Scale the image and output it in torch.tensor.
21
+ :param img: input rgb is in shape [H, W, C], input depth/disp is in shape [H, W]
22
+ :param scale: the scale factor. float
23
+ :return: img. [C, H, W]
24
+ """
25
+ if len(img.shape) == 2:
26
+ img = img[np.newaxis, :, :]
27
+ if img.shape[2] == 3:
28
+ transform = transforms.Compose(
29
+ [
30
+ transforms.ToTensor(),
31
+ transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
32
+ ]
33
+ )
34
+ img = transform(img.astype(np.float32))
35
+ else:
36
+ img = img.astype(np.float32)
37
+ img = torch.from_numpy(img)
38
+ return img
39
+
40
+
41
+ def estimateleres(img, model, w, h):
42
+ # leres transform input
43
+ rgb_c = img[:, :, ::-1].copy()
44
+ A_resize = cv2.resize(rgb_c, (w, h))
45
+ img_torch = scale_torch(A_resize)[None, :, :, :]
46
+
47
+ # compute
48
+ with torch.no_grad():
49
+ img_torch = img_torch.to(devices.get_device_for("controlnet"))
50
+ prediction = model.depth_model(img_torch)
51
+
52
+ prediction = prediction.squeeze().cpu().numpy()
53
+ prediction = cv2.resize(
54
+ prediction, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_CUBIC
55
+ )
56
+
57
+ return prediction
58
+
59
+
60
+ def generatemask(size):
61
+ # Generates a Guassian mask
62
+ mask = np.zeros(size, dtype=np.float32)
63
+ sigma = int(size[0] / 16)
64
+ k_size = int(2 * np.ceil(2 * int(size[0] / 16)) + 1)
65
+ mask[
66
+ int(0.15 * size[0]) : size[0] - int(0.15 * size[0]),
67
+ int(0.15 * size[1]) : size[1] - int(0.15 * size[1]),
68
+ ] = 1
69
+ mask = cv2.GaussianBlur(mask, (int(k_size), int(k_size)), sigma)
70
+ mask = (mask - mask.min()) / (mask.max() - mask.min())
71
+ mask = mask.astype(np.float32)
72
+ return mask
73
+
74
+
75
+ def resizewithpool(img, size):
76
+ i_size = img.shape[0]
77
+ n = int(np.floor(i_size / size))
78
+
79
+ out = skimage.measure.block_reduce(img, (n, n), np.max)
80
+ return out
81
+
82
+
83
+ def rgb2gray(rgb):
84
+ # Converts rgb to gray
85
+ return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])
86
+
87
+
88
+ def calculateprocessingres(
89
+ img, basesize, confidence=0.1, scale_threshold=3, whole_size_threshold=3000
90
+ ):
91
+ # Returns the R_x resolution described in section 5 of the main paper.
92
+
93
+ # Parameters:
94
+ # img :input rgb image
95
+ # basesize : size the dilation kernel which is equal to receptive field of the network.
96
+ # confidence: value of x in R_x; allowed percentage of pixels that are not getting any contextual cue.
97
+ # scale_threshold: maximum allowed upscaling on the input image ; it has been set to 3.
98
+ # whole_size_threshold: maximum allowed resolution. (R_max from section 6 of the main paper)
99
+
100
+ # Returns:
101
+ # outputsize_scale*speed_scale :The computed R_x resolution
102
+ # patch_scale: K parameter from section 6 of the paper
103
+
104
+ # speed scale parameter is to process every image in a smaller size to accelerate the R_x resolution search
105
+ speed_scale = 32
106
+ image_dim = int(min(img.shape[0:2]))
107
+
108
+ gray = rgb2gray(img)
109
+ grad = np.abs(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)) + np.abs(
110
+ cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
111
+ )
112
+ grad = cv2.resize(grad, (image_dim, image_dim), cv2.INTER_AREA)
113
+
114
+ # thresholding the gradient map to generate the edge-map as a proxy of the contextual cues
115
+ m = grad.min()
116
+ M = grad.max()
117
+ middle = m + (0.4 * (M - m))
118
+ grad[grad < middle] = 0
119
+ grad[grad >= middle] = 1
120
+
121
+ # dilation kernel with size of the receptive field
122
+ kernel = np.ones((int(basesize / speed_scale), int(basesize / speed_scale)), float)
123
+ # dilation kernel with size of the a quarter of receptive field used to compute k
124
+ # as described in section 6 of main paper
125
+ kernel2 = np.ones(
126
+ (int(basesize / (4 * speed_scale)), int(basesize / (4 * speed_scale))), float
127
+ )
128
+
129
+ # Output resolution limit set by the whole_size_threshold and scale_threshold.
130
+ threshold = min(whole_size_threshold, scale_threshold * max(img.shape[:2]))
131
+
132
+ outputsize_scale = basesize / speed_scale
133
+ for p_size in range(
134
+ int(basesize / speed_scale),
135
+ int(threshold / speed_scale),
136
+ int(basesize / (2 * speed_scale)),
137
+ ):
138
+ grad_resized = resizewithpool(grad, p_size)
139
+ grad_resized = cv2.resize(grad_resized, (p_size, p_size), cv2.INTER_NEAREST)
140
+ grad_resized[grad_resized >= 0.5] = 1
141
+ grad_resized[grad_resized < 0.5] = 0
142
+
143
+ dilated = cv2.dilate(grad_resized, kernel, iterations=1)
144
+ meanvalue = (1 - dilated).mean()
145
+ if meanvalue > confidence:
146
+ break
147
+ else:
148
+ outputsize_scale = p_size
149
+
150
+ grad_region = cv2.dilate(grad_resized, kernel2, iterations=1)
151
+ patch_scale = grad_region.mean()
152
+
153
+ return int(outputsize_scale * speed_scale), patch_scale
154
+
155
+
156
+ # Generate a double-input depth estimation
157
+ def doubleestimate(img, size1, size2, pix2pixsize, model, net_type, pix2pixmodel):
158
+ # Generate the low resolution estimation
159
+ estimate1 = singleestimate(img, size1, model, net_type)
160
+ # Resize to the inference size of merge network.
161
+ estimate1 = cv2.resize(
162
+ estimate1, (pix2pixsize, pix2pixsize), interpolation=cv2.INTER_CUBIC
163
+ )
164
+
165
+ # Generate the high resolution estimation
166
+ estimate2 = singleestimate(img, size2, model, net_type)
167
+ # Resize to the inference size of merge network.
168
+ estimate2 = cv2.resize(
169
+ estimate2, (pix2pixsize, pix2pixsize), interpolation=cv2.INTER_CUBIC
170
+ )
171
+
172
+ # Inference on the merge model
173
+ pix2pixmodel.set_input(estimate1, estimate2)
174
+ pix2pixmodel.test()
175
+ visuals = pix2pixmodel.get_current_visuals()
176
+ prediction_mapped = visuals["fake_B"]
177
+ prediction_mapped = (prediction_mapped + 1) / 2
178
+ prediction_mapped = (prediction_mapped - torch.min(prediction_mapped)) / (
179
+ torch.max(prediction_mapped) - torch.min(prediction_mapped)
180
+ )
181
+ prediction_mapped = prediction_mapped.squeeze().cpu().numpy()
182
+
183
+ return prediction_mapped
184
+
185
+
186
+ # Generate a single-input depth estimation
187
+ def singleestimate(img, msize, model, net_type):
188
+ # if net_type == 0:
189
+ return estimateleres(img, model, msize, msize)
190
+ # else:
191
+ # return estimatemidasBoost(img, model, msize, msize)
192
+
193
+
194
+ def applyGridpatch(blsize, stride, img, box):
195
+ # Extract a simple grid patch.
196
+ counter1 = 0
197
+ patch_bound_list = {}
198
+ for k in range(blsize, img.shape[1] - blsize, stride):
199
+ for j in range(blsize, img.shape[0] - blsize, stride):
200
+ patch_bound_list[str(counter1)] = {}
201
+ patchbounds = [
202
+ j - blsize,
203
+ k - blsize,
204
+ j - blsize + 2 * blsize,
205
+ k - blsize + 2 * blsize,
206
+ ]
207
+ patch_bound = [
208
+ box[0] + patchbounds[1],
209
+ box[1] + patchbounds[0],
210
+ patchbounds[3] - patchbounds[1],
211
+ patchbounds[2] - patchbounds[0],
212
+ ]
213
+ patch_bound_list[str(counter1)]["rect"] = patch_bound
214
+ patch_bound_list[str(counter1)]["size"] = patch_bound[2]
215
+ counter1 = counter1 + 1
216
+ return patch_bound_list
217
+
218
+
219
+ # Generating local patches to perform the local refinement described in section 6 of the main paper.
220
+ def generatepatchs(img, base_size):
221
+ # Compute the gradients as a proxy of the contextual cues.
222
+ img_gray = rgb2gray(img)
223
+ whole_grad = np.abs(cv2.Sobel(img_gray, cv2.CV_64F, 0, 1, ksize=3)) + np.abs(
224
+ cv2.Sobel(img_gray, cv2.CV_64F, 1, 0, ksize=3)
225
+ )
226
+
227
+ threshold = whole_grad[whole_grad > 0].mean()
228
+ whole_grad[whole_grad < threshold] = 0
229
+
230
+ # We use the integral image to speed-up the evaluation of the amount of gradients for each patch.
231
+ gf = whole_grad.sum() / len(whole_grad.reshape(-1))
232
+ grad_integral_image = cv2.integral(whole_grad)
233
+
234
+ # Variables are selected such that the initial patch size would be the receptive field size
235
+ # and the stride is set to 1/3 of the receptive field size.
236
+ blsize = int(round(base_size / 2))
237
+ stride = int(round(blsize * 0.75))
238
+
239
+ # Get initial Grid
240
+ patch_bound_list = applyGridpatch(blsize, stride, img, [0, 0, 0, 0])
241
+
242
+ # Refine initial Grid of patches by discarding the flat (in terms of gradients of the rgb image) ones. Refine
243
+ # each patch size to ensure that there will be enough depth cues for the network to generate a consistent depth map.
244
+ print("Selecting patches ...")
245
+ patch_bound_list = adaptiveselection(grad_integral_image, patch_bound_list, gf)
246
+
247
+ # Sort the patch list to make sure the merging operation will be done with the correct order: starting from biggest
248
+ # patch
249
+ patchset = sorted(
250
+ patch_bound_list.items(), key=lambda x: getitem(x[1], "size"), reverse=True
251
+ )
252
+ return patchset
253
+
254
+
255
+ def getGF_fromintegral(integralimage, rect):
256
+ # Computes the gradient density of a given patch from the gradient integral image.
257
+ x1 = rect[1]
258
+ x2 = rect[1] + rect[3]
259
+ y1 = rect[0]
260
+ y2 = rect[0] + rect[2]
261
+ value = (
262
+ integralimage[x2, y2]
263
+ - integralimage[x1, y2]
264
+ - integralimage[x2, y1]
265
+ + integralimage[x1, y1]
266
+ )
267
+ return value
268
+
269
+
270
+ # Adaptively select patches
271
+ def adaptiveselection(integral_grad, patch_bound_list, gf):
272
+ patchlist = {}
273
+ count = 0
274
+ height, width = integral_grad.shape
275
+
276
+ search_step = int(32 / factor)
277
+
278
+ # Go through all patches
279
+ for c in range(len(patch_bound_list)):
280
+ # Get patch
281
+ bbox = patch_bound_list[str(c)]["rect"]
282
+
283
+ # Compute the amount of gradients present in the patch from the integral image.
284
+ cgf = getGF_fromintegral(integral_grad, bbox) / (bbox[2] * bbox[3])
285
+
286
+ # Check if patching is beneficial by comparing the gradient density of the patch to
287
+ # the gradient density of the whole image
288
+ if cgf >= gf:
289
+ bbox_test = bbox.copy()
290
+ patchlist[str(count)] = {}
291
+
292
+ # Enlarge each patch until the gradient density of the patch is equal
293
+ # to the whole image gradient density
294
+ while True:
295
+ bbox_test[0] = bbox_test[0] - int(search_step / 2)
296
+ bbox_test[1] = bbox_test[1] - int(search_step / 2)
297
+
298
+ bbox_test[2] = bbox_test[2] + search_step
299
+ bbox_test[3] = bbox_test[3] + search_step
300
+
301
+ # Check if we are still within the image
302
+ if (
303
+ bbox_test[0] < 0
304
+ or bbox_test[1] < 0
305
+ or bbox_test[1] + bbox_test[3] >= height
306
+ or bbox_test[0] + bbox_test[2] >= width
307
+ ):
308
+ break
309
+
310
+ # Compare gradient density
311
+ cgf = getGF_fromintegral(integral_grad, bbox_test) / (
312
+ bbox_test[2] * bbox_test[3]
313
+ )
314
+ if cgf < gf:
315
+ break
316
+ bbox = bbox_test.copy()
317
+
318
+ # Add patch to selected patches
319
+ patchlist[str(count)]["rect"] = bbox
320
+ patchlist[str(count)]["size"] = bbox[2]
321
+ count = count + 1
322
+
323
+ # Return selected patches
324
+ return patchlist
325
+
326
+
327
+ def impatch(image, rect):
328
+ # Extract the given patch pixels from a given image.
329
+ w1 = rect[0]
330
+ h1 = rect[1]
331
+ w2 = w1 + rect[2]
332
+ h2 = h1 + rect[3]
333
+ image_patch = image[h1:h2, w1:w2]
334
+ return image_patch
335
+
336
+
337
+ class ImageandPatchs:
338
+ def __init__(self, root_dir, name, patchsinfo, rgb_image, scale=1):
339
+ self.root_dir = root_dir
340
+ self.patchsinfo = patchsinfo
341
+ self.name = name
342
+ self.patchs = patchsinfo
343
+ self.scale = scale
344
+
345
+ self.rgb_image = cv2.resize(
346
+ rgb_image,
347
+ (round(rgb_image.shape[1] * scale), round(rgb_image.shape[0] * scale)),
348
+ interpolation=cv2.INTER_CUBIC,
349
+ )
350
+
351
+ self.do_have_estimate = False
352
+ self.estimation_updated_image = None
353
+ self.estimation_base_image = None
354
+
355
+ def __len__(self):
356
+ return len(self.patchs)
357
+
358
+ def set_base_estimate(self, est):
359
+ self.estimation_base_image = est
360
+ if self.estimation_updated_image is not None:
361
+ self.do_have_estimate = True
362
+
363
+ def set_updated_estimate(self, est):
364
+ self.estimation_updated_image = est
365
+ if self.estimation_base_image is not None:
366
+ self.do_have_estimate = True
367
+
368
+ def __getitem__(self, index):
369
+ patch_id = int(self.patchs[index][0])
370
+ rect = np.array(self.patchs[index][1]["rect"])
371
+ msize = self.patchs[index][1]["size"]
372
+
373
+ ## applying scale to rect:
374
+ rect = np.round(rect * self.scale)
375
+ rect = rect.astype("int")
376
+ msize = round(msize * self.scale)
377
+
378
+ patch_rgb = impatch(self.rgb_image, rect)
379
+ if self.do_have_estimate:
380
+ patch_whole_estimate_base = impatch(self.estimation_base_image, rect)
381
+ patch_whole_estimate_updated = impatch(self.estimation_updated_image, rect)
382
+ return {
383
+ "patch_rgb": patch_rgb,
384
+ "patch_whole_estimate_base": patch_whole_estimate_base,
385
+ "patch_whole_estimate_updated": patch_whole_estimate_updated,
386
+ "rect": rect,
387
+ "size": msize,
388
+ "id": patch_id,
389
+ }
390
+ else:
391
+ return {"patch_rgb": patch_rgb, "rect": rect, "size": msize, "id": patch_id}
392
+
393
+ def print_options(self, opt):
394
+ """Print and save options
395
+
396
+ It will print both current options and default values(if different).
397
+ It will save options into a text file / [checkpoints_dir] / opt.txt
398
+ """
399
+ message = ""
400
+ message += "----------------- Options ---------------\n"
401
+ for k, v in sorted(vars(opt).items()):
402
+ comment = ""
403
+ default = self.parser.get_default(k)
404
+ if v != default:
405
+ comment = "\t[default: %s]" % str(default)
406
+ message += "{:>25}: {:<30}{}\n".format(str(k), str(v), comment)
407
+ message += "----------------- End -------------------"
408
+ print(message)
409
+
410
+ # save to the disk
411
+ """
412
+ expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
413
+ util.mkdirs(expr_dir)
414
+ file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
415
+ with open(file_name, 'wt') as opt_file:
416
+ opt_file.write(message)
417
+ opt_file.write('\n')
418
+ """
419
+
420
+ def parse(self):
421
+ """Parse our options, create checkpoints directory suffix, and set up gpu device."""
422
+ opt = self.gather_options()
423
+ opt.isTrain = self.isTrain # train or test
424
+
425
+ # process opt.suffix
426
+ if opt.suffix:
427
+ suffix = ("_" + opt.suffix.format(**vars(opt))) if opt.suffix != "" else ""
428
+ opt.name = opt.name + suffix
429
+
430
+ # self.print_options(opt)
431
+
432
+ # set gpu ids
433
+ str_ids = opt.gpu_ids.split(",")
434
+ opt.gpu_ids = []
435
+ for str_id in str_ids:
436
+ id = int(str_id)
437
+ if id >= 0:
438
+ opt.gpu_ids.append(id)
439
+ # if len(opt.gpu_ids) > 0:
440
+ # torch.cuda.set_device(opt.gpu_ids[0])
441
+
442
+ self.opt = opt
443
+ return self.opt
444
+
445
+
446
+ def estimateboost(img, model, model_type, pix2pixmodel, max_res=512):
447
+ global whole_size_threshold
448
+
449
+ # get settings
450
+ if hasattr(opts, "depthmap_script_boost_rmax"):
451
+ whole_size_threshold = opts.depthmap_script_boost_rmax
452
+
453
+ if model_type == 0: # leres
454
+ net_receptive_field_size = 448
455
+ patch_netsize = 2 * net_receptive_field_size
456
+ elif model_type == 1: # dpt_beit_large_512
457
+ net_receptive_field_size = 512
458
+ patch_netsize = 2 * net_receptive_field_size
459
+ else: # other midas
460
+ net_receptive_field_size = 384
461
+ patch_netsize = 2 * net_receptive_field_size
462
+
463
+ gc.collect()
464
+ devices.torch_gc()
465
+
466
+ # Generate mask used to smoothly blend the local pathc estimations to the base estimate.
467
+ # It is arbitrarily large to avoid artifacts during rescaling for each crop.
468
+ mask_org = generatemask((3000, 3000))
469
+ mask = mask_org.copy()
470
+
471
+ # Value x of R_x defined in the section 5 of the main paper.
472
+ r_threshold_value = 0.2
473
+ # if R0:
474
+ # r_threshold_value = 0
475
+
476
+ input_resolution = img.shape
477
+ scale_threshold = 3 # Allows up-scaling with a scale up to 3
478
+
479
+ # Find the best input resolution R-x. The resolution search described in section 5-double estimation of the main paper and section B of the
480
+ # supplementary material.
481
+ whole_image_optimal_size, patch_scale = calculateprocessingres(
482
+ img,
483
+ net_receptive_field_size,
484
+ r_threshold_value,
485
+ scale_threshold,
486
+ whole_size_threshold,
487
+ )
488
+
489
+ # print('wholeImage being processed in :', whole_image_optimal_size)
490
+
491
+ # Generate the base estimate using the double estimation.
492
+ whole_estimate = doubleestimate(
493
+ img,
494
+ net_receptive_field_size,
495
+ whole_image_optimal_size,
496
+ pix2pixsize,
497
+ model,
498
+ model_type,
499
+ pix2pixmodel,
500
+ )
501
+
502
+ # Compute the multiplier described in section 6 of the main paper to make sure our initial patch can select
503
+ # small high-density regions of the image.
504
+ global factor
505
+ factor = max(
506
+ min(1, 4 * patch_scale * whole_image_optimal_size / whole_size_threshold), 0.2
507
+ )
508
+ # print('Adjust factor is:', 1/factor)
509
+
510
+ # Check if Local boosting is beneficial.
511
+ if max_res < whole_image_optimal_size:
512
+ # print("No Local boosting. Specified Max Res is smaller than R20, Returning doubleestimate result")
513
+ return cv2.resize(
514
+ whole_estimate,
515
+ (input_resolution[1], input_resolution[0]),
516
+ interpolation=cv2.INTER_CUBIC,
517
+ )
518
+
519
+ # Compute the default target resolution.
520
+ if img.shape[0] > img.shape[1]:
521
+ a = 2 * whole_image_optimal_size
522
+ b = round(2 * whole_image_optimal_size * img.shape[1] / img.shape[0])
523
+ else:
524
+ a = round(2 * whole_image_optimal_size * img.shape[0] / img.shape[1])
525
+ b = 2 * whole_image_optimal_size
526
+ b = int(round(b / factor))
527
+ a = int(round(a / factor))
528
+
529
+ """
530
+ # recompute a, b and saturate to max res.
531
+ if max(a,b) > max_res:
532
+ print('Default Res is higher than max-res: Reducing final resolution')
533
+ if img.shape[0] > img.shape[1]:
534
+ a = max_res
535
+ b = round(max_res * img.shape[1] / img.shape[0])
536
+ else:
537
+ a = round(max_res * img.shape[0] / img.shape[1])
538
+ b = max_res
539
+ b = int(b)
540
+ a = int(a)
541
+ """
542
+
543
+ img = cv2.resize(img, (b, a), interpolation=cv2.INTER_CUBIC)
544
+
545
+ # Extract selected patches for local refinement
546
+ base_size = net_receptive_field_size * 2
547
+ patchset = generatepatchs(img, base_size)
548
+
549
+ # print('Target resolution: ', img.shape)
550
+
551
+ # Computing a scale in case user prompted to generate the results as the same resolution of the input.
552
+ # Notice that our method output resolution is independent of the input resolution and this parameter will only
553
+ # enable a scaling operation during the local patch merge implementation to generate results with the same resolution
554
+ # as the input.
555
+ """
556
+ if output_resolution == 1:
557
+ mergein_scale = input_resolution[0] / img.shape[0]
558
+ print('Dynamicly change merged-in resolution; scale:', mergein_scale)
559
+ else:
560
+ mergein_scale = 1
561
+ """
562
+ # always rescale to input res for now
563
+ mergein_scale = input_resolution[0] / img.shape[0]
564
+
565
+ imageandpatchs = ImageandPatchs("", "", patchset, img, mergein_scale)
566
+ whole_estimate_resized = cv2.resize(
567
+ whole_estimate,
568
+ (round(img.shape[1] * mergein_scale), round(img.shape[0] * mergein_scale)),
569
+ interpolation=cv2.INTER_CUBIC,
570
+ )
571
+ imageandpatchs.set_base_estimate(whole_estimate_resized.copy())
572
+ imageandpatchs.set_updated_estimate(whole_estimate_resized.copy())
573
+
574
+ print("Resulting depthmap resolution will be :", whole_estimate_resized.shape[:2])
575
+ print("Patches to process: " + str(len(imageandpatchs)))
576
+
577
+ # Enumerate through all patches, generate their estimations and refining the base estimate.
578
+ for patch_ind in range(len(imageandpatchs)):
579
+ # Get patch information
580
+ patch = imageandpatchs[patch_ind] # patch object
581
+ patch_rgb = patch["patch_rgb"] # rgb patch
582
+ patch_whole_estimate_base = patch[
583
+ "patch_whole_estimate_base"
584
+ ] # corresponding patch from base
585
+ rect = patch["rect"] # patch size and location
586
+ patch_id = patch["id"] # patch ID
587
+ org_size = (
588
+ patch_whole_estimate_base.shape
589
+ ) # the original size from the unscaled input
590
+ print("\t Processing patch", patch_ind, "/", len(imageandpatchs) - 1, "|", rect)
591
+
592
+ # We apply double estimation for patches. The high resolution value is fixed to twice the receptive
593
+ # field size of the network for patches to accelerate the process.
594
+ patch_estimation = doubleestimate(
595
+ patch_rgb,
596
+ net_receptive_field_size,
597
+ patch_netsize,
598
+ pix2pixsize,
599
+ model,
600
+ model_type,
601
+ pix2pixmodel,
602
+ )
603
+ patch_estimation = cv2.resize(
604
+ patch_estimation, (pix2pixsize, pix2pixsize), interpolation=cv2.INTER_CUBIC
605
+ )
606
+ patch_whole_estimate_base = cv2.resize(
607
+ patch_whole_estimate_base,
608
+ (pix2pixsize, pix2pixsize),
609
+ interpolation=cv2.INTER_CUBIC,
610
+ )
611
+
612
+ # Merging the patch estimation into the base estimate using our merge network:
613
+ # We feed the patch estimation and the same region from the updated base estimate to the merge network
614
+ # to generate the target estimate for the corresponding region.
615
+ pix2pixmodel.set_input(patch_whole_estimate_base, patch_estimation)
616
+
617
+ # Run merging network
618
+ pix2pixmodel.test()
619
+ visuals = pix2pixmodel.get_current_visuals()
620
+
621
+ prediction_mapped = visuals["fake_B"]
622
+ prediction_mapped = (prediction_mapped + 1) / 2
623
+ prediction_mapped = prediction_mapped.squeeze().cpu().numpy()
624
+
625
+ mapped = prediction_mapped
626
+
627
+ # We use a simple linear polynomial to make sure the result of the merge network would match the values of
628
+ # base estimate
629
+ p_coef = np.polyfit(
630
+ mapped.reshape(-1), patch_whole_estimate_base.reshape(-1), deg=1
631
+ )
632
+ merged = np.polyval(p_coef, mapped.reshape(-1)).reshape(mapped.shape)
633
+
634
+ merged = cv2.resize(
635
+ merged, (org_size[1], org_size[0]), interpolation=cv2.INTER_CUBIC
636
+ )
637
+
638
+ # Get patch size and location
639
+ w1 = rect[0]
640
+ h1 = rect[1]
641
+ w2 = w1 + rect[2]
642
+ h2 = h1 + rect[3]
643
+
644
+ # To speed up the implementation, we only generate the Gaussian mask once with a sufficiently large size
645
+ # and resize it to our needed size while merging the patches.
646
+ if mask.shape != org_size:
647
+ mask = cv2.resize(
648
+ mask_org, (org_size[1], org_size[0]), interpolation=cv2.INTER_LINEAR
649
+ )
650
+
651
+ tobemergedto = imageandpatchs.estimation_updated_image
652
+
653
+ # Update the whole estimation:
654
+ # We use a simple Gaussian mask to blend the merged patch region with the base estimate to ensure seamless
655
+ # blending at the boundaries of the patch region.
656
+ tobemergedto[h1:h2, w1:w2] = np.multiply(
657
+ tobemergedto[h1:h2, w1:w2], 1 - mask
658
+ ) + np.multiply(merged, mask)
659
+ imageandpatchs.set_updated_estimate(tobemergedto)
660
+
661
+ # output
662
+ return cv2.resize(
663
+ imageandpatchs.estimation_updated_image,
664
+ (input_resolution[1], input_resolution[0]),
665
+ interpolation=cv2.INTER_CUBIC,
666
+ )
extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/multi_depth_model_woauxi.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import network_auxi as network
2
+ from .net_tools import get_func
3
+ import torch
4
+ import torch.nn as nn
5
+ from modules import devices
6
+
7
+
8
+ class RelDepthModel(nn.Module):
9
+ def __init__(self, backbone="resnet50"):
10
+ super(RelDepthModel, self).__init__()
11
+ if backbone == "resnet50":
12
+ encoder = "resnet50_stride32"
13
+ elif backbone == "resnext101":
14
+ encoder = "resnext101_stride32x8d"
15
+ self.depth_model = DepthModel(encoder)
16
+
17
+ def inference(self, rgb):
18
+ with torch.no_grad():
19
+ input = rgb.to(self.depth_model.device)
20
+ depth = self.depth_model(input)
21
+ # pred_depth_out = depth - depth.min() + 0.01
22
+ return depth # pred_depth_out
23
+
24
+
25
+ class DepthModel(nn.Module):
26
+ def __init__(self, encoder):
27
+ super(DepthModel, self).__init__()
28
+ backbone = network.__name__.split(".")[-1] + "." + encoder
29
+ self.encoder_modules = get_func(backbone)()
30
+ self.decoder_modules = network.Decoder()
31
+
32
+ def forward(self, x):
33
+ lateral_out = self.encoder_modules(x)
34
+ out_logit = self.decoder_modules(lateral_out)
35
+ return out_logit
extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/net_tools.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import torch
3
+ import os
4
+ from collections import OrderedDict
5
+
6
+
7
+ def get_func(func_name):
8
+ """Helper to return a function object by name. func_name must identify a
9
+ function in this module or the path to a function relative to the base
10
+ 'modeling' module.
11
+ """
12
+ if func_name == "":
13
+ return None
14
+ try:
15
+ parts = func_name.split(".")
16
+ # Refers to a function in this module
17
+ if len(parts) == 1:
18
+ return globals()[parts[0]]
19
+ # Otherwise, assume we're referencing a module under modeling
20
+ module_name = "annotator.leres.leres." + ".".join(parts[:-1])
21
+ module = importlib.import_module(module_name)
22
+ return getattr(module, parts[-1])
23
+ except Exception:
24
+ print("Failed to f1ind function: %s", func_name)
25
+ raise
26
+
27
+
28
+ def load_ckpt(args, depth_model, shift_model, focal_model):
29
+ """
30
+ Load checkpoint.
31
+ """
32
+ if os.path.isfile(args.load_ckpt):
33
+ print("loading checkpoint %s" % args.load_ckpt)
34
+ checkpoint = torch.load(args.load_ckpt)
35
+ if shift_model is not None:
36
+ shift_model.load_state_dict(
37
+ strip_prefix_if_present(checkpoint["shift_model"], "module."),
38
+ strict=True,
39
+ )
40
+ if focal_model is not None:
41
+ focal_model.load_state_dict(
42
+ strip_prefix_if_present(checkpoint["focal_model"], "module."),
43
+ strict=True,
44
+ )
45
+ depth_model.load_state_dict(
46
+ strip_prefix_if_present(checkpoint["depth_model"], "module."), strict=True
47
+ )
48
+ del checkpoint
49
+ if torch.cuda.is_available():
50
+ torch.cuda.empty_cache()
51
+
52
+
53
+ def strip_prefix_if_present(state_dict, prefix):
54
+ keys = sorted(state_dict.keys())
55
+ if not all(key.startswith(prefix) for key in keys):
56
+ return state_dict
57
+ stripped_state_dict = OrderedDict()
58
+ for key, value in state_dict.items():
59
+ stripped_state_dict[key.replace(prefix, "")] = value
60
+ return stripped_state_dict
extensions-builtin/forge_legacy_preprocessors/annotator/leres/leres/network_auxi.py ADDED
@@ -0,0 +1,509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.init as init
4
+
5
+ from . import Resnet, Resnext_torch
6
+
7
+
8
+ def resnet50_stride32():
9
+ return DepthNet(backbone="resnet", depth=50, upfactors=[2, 2, 2, 2])
10
+
11
+
12
+ def resnext101_stride32x8d():
13
+ return DepthNet(backbone="resnext101_32x8d", depth=101, upfactors=[2, 2, 2, 2])
14
+
15
+
16
+ class Decoder(nn.Module):
17
+ def __init__(self):
18
+ super(Decoder, self).__init__()
19
+ self.inchannels = [256, 512, 1024, 2048]
20
+ self.midchannels = [256, 256, 256, 512]
21
+ self.upfactors = [2, 2, 2, 2]
22
+ self.outchannels = 1
23
+
24
+ self.conv = FTB(inchannels=self.inchannels[3], midchannels=self.midchannels[3])
25
+ self.conv1 = nn.Conv2d(
26
+ in_channels=self.midchannels[3],
27
+ out_channels=self.midchannels[2],
28
+ kernel_size=3,
29
+ padding=1,
30
+ stride=1,
31
+ bias=True,
32
+ )
33
+ self.upsample = nn.Upsample(
34
+ scale_factor=self.upfactors[3], mode="bilinear", align_corners=True
35
+ )
36
+
37
+ self.ffm2 = FFM(
38
+ inchannels=self.inchannels[2],
39
+ midchannels=self.midchannels[2],
40
+ outchannels=self.midchannels[2],
41
+ upfactor=self.upfactors[2],
42
+ )
43
+ self.ffm1 = FFM(
44
+ inchannels=self.inchannels[1],
45
+ midchannels=self.midchannels[1],
46
+ outchannels=self.midchannels[1],
47
+ upfactor=self.upfactors[1],
48
+ )
49
+ self.ffm0 = FFM(
50
+ inchannels=self.inchannels[0],
51
+ midchannels=self.midchannels[0],
52
+ outchannels=self.midchannels[0],
53
+ upfactor=self.upfactors[0],
54
+ )
55
+
56
+ self.outconv = AO(
57
+ inchannels=self.midchannels[0], outchannels=self.outchannels, upfactor=2
58
+ )
59
+ self._init_params()
60
+
61
+ def _init_params(self):
62
+ for m in self.modules():
63
+ if isinstance(m, nn.Conv2d):
64
+ init.normal_(m.weight, std=0.01)
65
+ if m.bias is not None:
66
+ init.constant_(m.bias, 0)
67
+ elif isinstance(m, nn.ConvTranspose2d):
68
+ init.normal_(m.weight, std=0.01)
69
+ if m.bias is not None:
70
+ init.constant_(m.bias, 0)
71
+ elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
72
+ init.constant_(m.weight, 1)
73
+ init.constant_(m.bias, 0)
74
+ elif isinstance(m, nn.Linear):
75
+ init.normal_(m.weight, std=0.01)
76
+ if m.bias is not None:
77
+ init.constant_(m.bias, 0)
78
+
79
+ def forward(self, features):
80
+ x_32x = self.conv(features[3]) # 1/32
81
+ x_32 = self.conv1(x_32x)
82
+ x_16 = self.upsample(x_32) # 1/16
83
+
84
+ x_8 = self.ffm2(features[2], x_16) # 1/8
85
+ x_4 = self.ffm1(features[1], x_8) # 1/4
86
+ x_2 = self.ffm0(features[0], x_4) # 1/2
87
+ # -----------------------------------------
88
+ x = self.outconv(x_2) # original size
89
+ return x
90
+
91
+
92
+ class DepthNet(nn.Module):
93
+ __factory = {
94
+ 18: Resnet.resnet18,
95
+ 34: Resnet.resnet34,
96
+ 50: Resnet.resnet50,
97
+ 101: Resnet.resnet101,
98
+ 152: Resnet.resnet152,
99
+ }
100
+
101
+ def __init__(self, backbone="resnet", depth=50, upfactors=[2, 2, 2, 2]):
102
+ super(DepthNet, self).__init__()
103
+ self.backbone = backbone
104
+ self.depth = depth
105
+ self.pretrained = False
106
+ self.inchannels = [256, 512, 1024, 2048]
107
+ self.midchannels = [256, 256, 256, 512]
108
+ self.upfactors = upfactors
109
+ self.outchannels = 1
110
+
111
+ # Build model
112
+ if self.backbone == "resnet":
113
+ if self.depth not in DepthNet.__factory:
114
+ raise KeyError("Unsupported depth:", self.depth)
115
+ self.encoder = DepthNet.__factory[depth](pretrained=self.pretrained)
116
+ elif self.backbone == "resnext101_32x8d":
117
+ self.encoder = Resnext_torch.resnext101_32x8d(pretrained=self.pretrained)
118
+ else:
119
+ self.encoder = Resnext_torch.resnext101(pretrained=self.pretrained)
120
+
121
+ def forward(self, x):
122
+ x = self.encoder(x) # 1/32, 1/16, 1/8, 1/4
123
+ return x
124
+
125
+
126
+ class FTB(nn.Module):
127
+ def __init__(self, inchannels, midchannels=512):
128
+ super(FTB, self).__init__()
129
+ self.in1 = inchannels
130
+ self.mid = midchannels
131
+ self.conv1 = nn.Conv2d(
132
+ in_channels=self.in1,
133
+ out_channels=self.mid,
134
+ kernel_size=3,
135
+ padding=1,
136
+ stride=1,
137
+ bias=True,
138
+ )
139
+ # NN.BatchNorm2d
140
+ self.conv_branch = nn.Sequential(
141
+ nn.ReLU(inplace=True),
142
+ nn.Conv2d(
143
+ in_channels=self.mid,
144
+ out_channels=self.mid,
145
+ kernel_size=3,
146
+ padding=1,
147
+ stride=1,
148
+ bias=True,
149
+ ),
150
+ nn.BatchNorm2d(num_features=self.mid),
151
+ nn.ReLU(inplace=True),
152
+ nn.Conv2d(
153
+ in_channels=self.mid,
154
+ out_channels=self.mid,
155
+ kernel_size=3,
156
+ padding=1,
157
+ stride=1,
158
+ bias=True,
159
+ ),
160
+ )
161
+ self.relu = nn.ReLU(inplace=True)
162
+
163
+ self.init_params()
164
+
165
+ def forward(self, x):
166
+ x = self.conv1(x)
167
+ x = x + self.conv_branch(x)
168
+ x = self.relu(x)
169
+
170
+ return x
171
+
172
+ def init_params(self):
173
+ for m in self.modules():
174
+ if isinstance(m, nn.Conv2d):
175
+ init.normal_(m.weight, std=0.01)
176
+ if m.bias is not None:
177
+ init.constant_(m.bias, 0)
178
+ elif isinstance(m, nn.ConvTranspose2d):
179
+ # init.kaiming_normal_(m.weight, mode='fan_out')
180
+ init.normal_(m.weight, std=0.01)
181
+ # init.xavier_normal_(m.weight)
182
+ if m.bias is not None:
183
+ init.constant_(m.bias, 0)
184
+ elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
185
+ init.constant_(m.weight, 1)
186
+ init.constant_(m.bias, 0)
187
+ elif isinstance(m, nn.Linear):
188
+ init.normal_(m.weight, std=0.01)
189
+ if m.bias is not None:
190
+ init.constant_(m.bias, 0)
191
+
192
+
193
+ class ATA(nn.Module):
194
+ def __init__(self, inchannels, reduction=8):
195
+ super(ATA, self).__init__()
196
+ self.inchannels = inchannels
197
+ self.avg_pool = nn.AdaptiveAvgPool2d(1)
198
+ self.fc = nn.Sequential(
199
+ nn.Linear(self.inchannels * 2, self.inchannels // reduction),
200
+ nn.ReLU(inplace=True),
201
+ nn.Linear(self.inchannels // reduction, self.inchannels),
202
+ nn.Sigmoid(),
203
+ )
204
+ self.init_params()
205
+
206
+ def forward(self, low_x, high_x):
207
+ n, c, _, _ = low_x.size()
208
+ x = torch.cat([low_x, high_x], 1)
209
+ x = self.avg_pool(x)
210
+ x = x.view(n, -1)
211
+ x = self.fc(x).view(n, c, 1, 1)
212
+ x = low_x * x + high_x
213
+
214
+ return x
215
+
216
+ def init_params(self):
217
+ for m in self.modules():
218
+ if isinstance(m, nn.Conv2d):
219
+ # init.kaiming_normal_(m.weight, mode='fan_out')
220
+ # init.normal(m.weight, std=0.01)
221
+ init.xavier_normal_(m.weight)
222
+ if m.bias is not None:
223
+ init.constant_(m.bias, 0)
224
+ elif isinstance(m, nn.ConvTranspose2d):
225
+ # init.kaiming_normal_(m.weight, mode='fan_out')
226
+ # init.normal_(m.weight, std=0.01)
227
+ init.xavier_normal_(m.weight)
228
+ if m.bias is not None:
229
+ init.constant_(m.bias, 0)
230
+ elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
231
+ init.constant_(m.weight, 1)
232
+ init.constant_(m.bias, 0)
233
+ elif isinstance(m, nn.Linear):
234
+ init.normal_(m.weight, std=0.01)
235
+ if m.bias is not None:
236
+ init.constant_(m.bias, 0)
237
+
238
+
239
+ class FFM(nn.Module):
240
+ def __init__(self, inchannels, midchannels, outchannels, upfactor=2):
241
+ super(FFM, self).__init__()
242
+ self.inchannels = inchannels
243
+ self.midchannels = midchannels
244
+ self.outchannels = outchannels
245
+ self.upfactor = upfactor
246
+
247
+ self.ftb1 = FTB(inchannels=self.inchannels, midchannels=self.midchannels)
248
+ # self.ata = ATA(inchannels = self.midchannels)
249
+ self.ftb2 = FTB(inchannels=self.midchannels, midchannels=self.outchannels)
250
+
251
+ self.upsample = nn.Upsample(
252
+ scale_factor=self.upfactor, mode="bilinear", align_corners=True
253
+ )
254
+
255
+ self.init_params()
256
+
257
+ def forward(self, low_x, high_x):
258
+ x = self.ftb1(low_x)
259
+ x = x + high_x
260
+ x = self.ftb2(x)
261
+ x = self.upsample(x)
262
+
263
+ return x
264
+
265
+ def init_params(self):
266
+ for m in self.modules():
267
+ if isinstance(m, nn.Conv2d):
268
+ # init.kaiming_normal_(m.weight, mode='fan_out')
269
+ init.normal_(m.weight, std=0.01)
270
+ # init.xavier_normal_(m.weight)
271
+ if m.bias is not None:
272
+ init.constant_(m.bias, 0)
273
+ elif isinstance(m, nn.ConvTranspose2d):
274
+ # init.kaiming_normal_(m.weight, mode='fan_out')
275
+ init.normal_(m.weight, std=0.01)
276
+ # init.xavier_normal_(m.weight)
277
+ if m.bias is not None:
278
+ init.constant_(m.bias, 0)
279
+ elif isinstance(m, nn.BatchNorm2d): # NN.Batchnorm2d
280
+ init.constant_(m.weight, 1)
281
+ init.constant_(m.bias, 0)
282
+ elif isinstance(m, nn.Linear):
283
+ init.normal_(m.weight, std=0.01)
284
+ if m.bias is not None:
285
+ init.constant_(m.bias, 0)
286
+
287
+
288
+ class AO(nn.Module):
289
+ # Adaptive output module
290
+ def __init__(self, inchannels, outchannels, upfactor=2):
291
+ super(AO, self).__init__()
292
+ self.inchannels = inchannels
293
+ self.outchannels = outchannels
294
+ self.upfactor = upfactor
295
+
296
+ self.adapt_conv = nn.Sequential(
297
+ nn.Conv2d(
298
+ in_channels=self.inchannels,
299
+ out_channels=self.inchannels // 2,
300
+ kernel_size=3,
301
+ padding=1,
302
+ stride=1,
303
+ bias=True,
304
+ ),
305
+ nn.BatchNorm2d(num_features=self.inchannels // 2),
306
+ nn.ReLU(inplace=True),
307
+ nn.Conv2d(
308
+ in_channels=self.inchannels // 2,
309
+ out_channels=self.outchannels,
310
+ kernel_size=3,
311
+ padding=1,
312
+ stride=1,
313
+ bias=True,
314
+ ),
315
+ nn.Upsample(
316
+ scale_factor=self.upfactor, mode="bilinear", align_corners=True
317
+ ),
318
+ )
319
+
320
+ self.init_params()
321
+
322
+ def forward(self, x):
323
+ x = self.adapt_conv(x)
324
+ return x
325
+
326
+ def init_params(self):
327
+ for m in self.modules():
328
+ if isinstance(m, nn.Conv2d):
329
+ # init.kaiming_normal_(m.weight, mode='fan_out')
330
+ init.normal_(m.weight, std=0.01)
331
+ # init.xavier_normal_(m.weight)
332
+ if m.bias is not None:
333
+ init.constant_(m.bias, 0)
334
+ elif isinstance(m, nn.ConvTranspose2d):
335
+ # init.kaiming_normal_(m.weight, mode='fan_out')
336
+ init.normal_(m.weight, std=0.01)
337
+ # init.xavier_normal_(m.weight)
338
+ if m.bias is not None:
339
+ init.constant_(m.bias, 0)
340
+ elif isinstance(m, nn.BatchNorm2d): # NN.Batchnorm2d
341
+ init.constant_(m.weight, 1)
342
+ init.constant_(m.bias, 0)
343
+ elif isinstance(m, nn.Linear):
344
+ init.normal_(m.weight, std=0.01)
345
+ if m.bias is not None:
346
+ init.constant_(m.bias, 0)
347
+
348
+
349
+ # ==============================================================================================================
350
+
351
+
352
+ class ResidualConv(nn.Module):
353
+ def __init__(self, inchannels):
354
+ super(ResidualConv, self).__init__()
355
+ # NN.BatchNorm2d
356
+ self.conv = nn.Sequential(
357
+ # nn.BatchNorm2d(num_features=inchannels),
358
+ nn.ReLU(inplace=False),
359
+ # nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=3, padding=1, stride=1, groups=inchannels,bias=True),
360
+ # nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=1, padding=0, stride=1, groups=1,bias=True)
361
+ nn.Conv2d(
362
+ in_channels=inchannels,
363
+ out_channels=inchannels / 2,
364
+ kernel_size=3,
365
+ padding=1,
366
+ stride=1,
367
+ bias=False,
368
+ ),
369
+ nn.BatchNorm2d(num_features=inchannels / 2),
370
+ nn.ReLU(inplace=False),
371
+ nn.Conv2d(
372
+ in_channels=inchannels / 2,
373
+ out_channels=inchannels,
374
+ kernel_size=3,
375
+ padding=1,
376
+ stride=1,
377
+ bias=False,
378
+ ),
379
+ )
380
+ self.init_params()
381
+
382
+ def forward(self, x):
383
+ x = self.conv(x) + x
384
+ return x
385
+
386
+ def init_params(self):
387
+ for m in self.modules():
388
+ if isinstance(m, nn.Conv2d):
389
+ # init.kaiming_normal_(m.weight, mode='fan_out')
390
+ init.normal_(m.weight, std=0.01)
391
+ # init.xavier_normal_(m.weight)
392
+ if m.bias is not None:
393
+ init.constant_(m.bias, 0)
394
+ elif isinstance(m, nn.ConvTranspose2d):
395
+ # init.kaiming_normal_(m.weight, mode='fan_out')
396
+ init.normal_(m.weight, std=0.01)
397
+ # init.xavier_normal_(m.weight)
398
+ if m.bias is not None:
399
+ init.constant_(m.bias, 0)
400
+ elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
401
+ init.constant_(m.weight, 1)
402
+ init.constant_(m.bias, 0)
403
+ elif isinstance(m, nn.Linear):
404
+ init.normal_(m.weight, std=0.01)
405
+ if m.bias is not None:
406
+ init.constant_(m.bias, 0)
407
+
408
+
409
+ class FeatureFusion(nn.Module):
410
+ def __init__(self, inchannels, outchannels):
411
+ super(FeatureFusion, self).__init__()
412
+ self.conv = ResidualConv(inchannels=inchannels)
413
+ # NN.BatchNorm2d
414
+ self.up = nn.Sequential(
415
+ ResidualConv(inchannels=inchannels),
416
+ nn.ConvTranspose2d(
417
+ in_channels=inchannels,
418
+ out_channels=outchannels,
419
+ kernel_size=3,
420
+ stride=2,
421
+ padding=1,
422
+ output_padding=1,
423
+ ),
424
+ nn.BatchNorm2d(num_features=outchannels),
425
+ nn.ReLU(inplace=True),
426
+ )
427
+
428
+ def forward(self, lowfeat, highfeat):
429
+ return self.up(highfeat + self.conv(lowfeat))
430
+
431
+ def init_params(self):
432
+ for m in self.modules():
433
+ if isinstance(m, nn.Conv2d):
434
+ # init.kaiming_normal_(m.weight, mode='fan_out')
435
+ init.normal_(m.weight, std=0.01)
436
+ # init.xavier_normal_(m.weight)
437
+ if m.bias is not None:
438
+ init.constant_(m.bias, 0)
439
+ elif isinstance(m, nn.ConvTranspose2d):
440
+ # init.kaiming_normal_(m.weight, mode='fan_out')
441
+ init.normal_(m.weight, std=0.01)
442
+ # init.xavier_normal_(m.weight)
443
+ if m.bias is not None:
444
+ init.constant_(m.bias, 0)
445
+ elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
446
+ init.constant_(m.weight, 1)
447
+ init.constant_(m.bias, 0)
448
+ elif isinstance(m, nn.Linear):
449
+ init.normal_(m.weight, std=0.01)
450
+ if m.bias is not None:
451
+ init.constant_(m.bias, 0)
452
+
453
+
454
+ class SenceUnderstand(nn.Module):
455
+ def __init__(self, channels):
456
+ super(SenceUnderstand, self).__init__()
457
+ self.channels = channels
458
+ self.conv1 = nn.Sequential(
459
+ nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
460
+ nn.ReLU(inplace=True),
461
+ )
462
+ self.pool = nn.AdaptiveAvgPool2d(8)
463
+ self.fc = nn.Sequential(
464
+ nn.Linear(512 * 8 * 8, self.channels), nn.ReLU(inplace=True)
465
+ )
466
+ self.conv2 = nn.Sequential(
467
+ nn.Conv2d(
468
+ in_channels=self.channels,
469
+ out_channels=self.channels,
470
+ kernel_size=1,
471
+ padding=0,
472
+ ),
473
+ nn.ReLU(inplace=True),
474
+ )
475
+ self.initial_params()
476
+
477
+ def forward(self, x):
478
+ n, c, h, w = x.size()
479
+ x = self.conv1(x)
480
+ x = self.pool(x)
481
+ x = x.view(n, -1)
482
+ x = self.fc(x)
483
+ x = x.view(n, self.channels, 1, 1)
484
+ x = self.conv2(x)
485
+ x = x.repeat(1, 1, h, w)
486
+ return x
487
+
488
+ def initial_params(self, dev=0.01):
489
+ for m in self.modules():
490
+ if isinstance(m, nn.Conv2d):
491
+ # print torch.sum(m.weight)
492
+ m.weight.data.normal_(0, dev)
493
+ if m.bias is not None:
494
+ m.bias.data.fill_(0)
495
+ elif isinstance(m, nn.ConvTranspose2d):
496
+ # print torch.sum(m.weight)
497
+ m.weight.data.normal_(0, dev)
498
+ if m.bias is not None:
499
+ m.bias.data.fill_(0)
500
+ elif isinstance(m, nn.Linear):
501
+ m.weight.data.normal_(0, dev)
502
+
503
+
504
+ if __name__ == "__main__":
505
+ net = DepthNet(depth=50, pretrained=True)
506
+ print(net)
507
+ inputs = torch.ones(4, 3, 128, 128)
508
+ out = net(inputs)
509
+ print(out.size())
extensions-builtin/forge_legacy_preprocessors/annotator/leres/pix2pix/LICENSE ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ https://github.com/compphoto/BoostingMonocularDepth
2
+
3
+ Copyright 2021, Seyed Mahdi Hosseini Miangoleh, Sebastian Dille, Computational Photography Laboratory. All rights reserved.
4
+
5
+ This software is for academic use only. A redistribution of this
6
+ software, with or without modifications, has to be for academic
7
+ use only, while giving the appropriate credit to the original
8
+ authors of the software. The methods implemented as a part of
9
+ this software may be covered under patents or patent applications.
10
+
11
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR IMPLIED
12
+ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
13
+ FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
14
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
15
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
16
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
17
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
18
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
19
+ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
extensions-builtin/forge_legacy_preprocessors/annotator/leres/pix2pix/models/__init__.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This package contains modules related to objective functions, optimizations, and network architectures.
2
+
3
+ To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.
4
+ You need to implement the following five functions:
5
+ -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
6
+ -- <set_input>: unpack data from dataset and apply preprocessing.
7
+ -- <forward>: produce intermediate results.
8
+ -- <optimize_parameters>: calculate loss, gradients, and update network weights.
9
+ -- <modify_commandline_options>: (optionally) add model-specific options and set default options.
10
+
11
+ In the function <__init__>, you need to define four lists:
12
+ -- self.loss_names (str list): specify the training losses that you want to plot and save.
13
+ -- self.model_names (str list): define networks used in our training.
14
+ -- self.visual_names (str list): specify the images that you want to display and save.
15
+ -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.
16
+
17
+ Now you can use the model class by specifying flag '--model dummy'.
18
+ See our template model class 'template_model.py' for more details.
19
+ """
20
+
21
+ import importlib
22
+ from .base_model import BaseModel
23
+
24
+
25
+ def find_model_using_name(model_name):
26
+ """Import the module "models/[model_name]_model.py".
27
+
28
+ In the file, the class called DatasetNameModel() will
29
+ be instantiated. It has to be a subclass of BaseModel,
30
+ and it is case-insensitive.
31
+ """
32
+ model_filename = "annotator.leres.pix2pix.models." + model_name + "_model"
33
+ modellib = importlib.import_module(model_filename)
34
+ model = None
35
+ target_model_name = model_name.replace("_", "") + "model"
36
+ for name, cls in modellib.__dict__.items():
37
+ if name.lower() == target_model_name.lower() and issubclass(cls, BaseModel):
38
+ model = cls
39
+
40
+ if model is None:
41
+ print(
42
+ "In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase."
43
+ % (model_filename, target_model_name)
44
+ )
45
+ exit(0)
46
+
47
+ return model
48
+
49
+
50
+ def get_option_setter(model_name):
51
+ """Return the static method <modify_commandline_options> of the model class."""
52
+ model_class = find_model_using_name(model_name)
53
+ return model_class.modify_commandline_options
54
+
55
+
56
+ def create_model(opt):
57
+ """Create a model given the option.
58
+
59
+ This function warps the class CustomDatasetDataLoader.
60
+ This is the main interface between this package and 'train.py'/'test.py'
61
+
62
+ Example:
63
+ >>> from models import create_model
64
+ >>> model = create_model(opt)
65
+ """
66
+ model = find_model_using_name(opt.model)
67
+ instance = model(opt)
68
+ print("model [%s] was created" % type(instance).__name__)
69
+ return instance
extensions-builtin/forge_legacy_preprocessors/annotator/leres/pix2pix/models/base_model.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch, gc
3
+ from modules import devices
4
+ from collections import OrderedDict
5
+ from abc import ABC, abstractmethod
6
+ from . import networks
7
+
8
+
9
+ class BaseModel(ABC):
10
+ """This class is an abstract base class (ABC) for models.
11
+ To create a subclass, you need to implement the following five functions:
12
+ -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
13
+ -- <set_input>: unpack data from dataset and apply preprocessing.
14
+ -- <forward>: produce intermediate results.
15
+ -- <optimize_parameters>: calculate losses, gradients, and update network weights.
16
+ -- <modify_commandline_options>: (optionally) add model-specific options and set default options.
17
+ """
18
+
19
+ def __init__(self, opt):
20
+ """Initialize the BaseModel class.
21
+
22
+ Parameters:
23
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
24
+
25
+ When creating your custom class, you need to implement your own initialization.
26
+ In this function, you should first call <BaseModel.__init__(self, opt)>
27
+ Then, you need to define four lists:
28
+ -- self.loss_names (str list): specify the training losses that you want to plot and save.
29
+ -- self.model_names (str list): define networks used in our training.
30
+ -- self.visual_names (str list): specify the images that you want to display and save.
31
+ -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
32
+ """
33
+ self.opt = opt
34
+ self.gpu_ids = opt.gpu_ids
35
+ self.isTrain = opt.isTrain
36
+ self.device = (
37
+ torch.device("cuda:{}".format(self.gpu_ids[0]))
38
+ if self.gpu_ids
39
+ else torch.device("cpu")
40
+ ) # get device name: CPU or GPU
41
+ self.save_dir = os.path.join(
42
+ opt.checkpoints_dir, opt.name
43
+ ) # save all the checkpoints to save_dir
44
+ if (
45
+ opt.preprocess != "scale_width"
46
+ ): # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
47
+ torch.backends.cudnn.benchmark = True
48
+ self.loss_names = []
49
+ self.model_names = []
50
+ self.visual_names = []
51
+ self.optimizers = []
52
+ self.image_paths = []
53
+ self.metric = 0 # used for learning rate policy 'plateau'
54
+
55
+ @staticmethod
56
+ def modify_commandline_options(parser, is_train):
57
+ """Add new model-specific options, and rewrite default values for existing options.
58
+
59
+ Parameters:
60
+ parser -- original option parser
61
+ is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
62
+
63
+ Returns:
64
+ the modified parser.
65
+ """
66
+ return parser
67
+
68
+ @abstractmethod
69
+ def set_input(self, input):
70
+ """Unpack input data from the dataloader and perform necessary pre-processing steps.
71
+
72
+ Parameters:
73
+ input (dict): includes the data itself and its metadata information.
74
+ """
75
+ pass
76
+
77
+ @abstractmethod
78
+ def forward(self):
79
+ """Run forward pass; called by both functions <optimize_parameters> and <test>."""
80
+ pass
81
+
82
+ @abstractmethod
83
+ def optimize_parameters(self):
84
+ """Calculate losses, gradients, and update network weights; called in every training iteration"""
85
+ pass
86
+
87
+ def setup(self, opt):
88
+ """Load and print networks; create schedulers
89
+
90
+ Parameters:
91
+ opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
92
+ """
93
+ if self.isTrain:
94
+ self.schedulers = [
95
+ networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers
96
+ ]
97
+ if not self.isTrain or opt.continue_train:
98
+ load_suffix = "iter_%d" % opt.load_iter if opt.load_iter > 0 else opt.epoch
99
+ self.load_networks(load_suffix)
100
+ self.print_networks(opt.verbose)
101
+
102
+ def eval(self):
103
+ """Make models eval mode during test time"""
104
+ for name in self.model_names:
105
+ if isinstance(name, str):
106
+ net = getattr(self, "net" + name)
107
+ net.eval()
108
+
109
+ def test(self):
110
+ """Forward function used in test time.
111
+
112
+ This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
113
+ It also calls <compute_visuals> to produce additional visualization results
114
+ """
115
+ with torch.no_grad():
116
+ self.forward()
117
+ self.compute_visuals()
118
+
119
+ def compute_visuals(self):
120
+ """Calculate additional output images for visdom and HTML visualization"""
121
+ pass
122
+
123
+ def get_image_paths(self):
124
+ """Return image paths that are used to load current data"""
125
+ return self.image_paths
126
+
127
+ def update_learning_rate(self):
128
+ """Update learning rates for all the networks; called at the end of every epoch"""
129
+ old_lr = self.optimizers[0].param_groups[0]["lr"]
130
+ for scheduler in self.schedulers:
131
+ if self.opt.lr_policy == "plateau":
132
+ scheduler.step(self.metric)
133
+ else:
134
+ scheduler.step()
135
+
136
+ lr = self.optimizers[0].param_groups[0]["lr"]
137
+ print("learning rate %.7f -> %.7f" % (old_lr, lr))
138
+
139
+ def get_current_visuals(self):
140
+ """Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
141
+ visual_ret = OrderedDict()
142
+ for name in self.visual_names:
143
+ if isinstance(name, str):
144
+ visual_ret[name] = getattr(self, name)
145
+ return visual_ret
146
+
147
+ def get_current_losses(self):
148
+ """Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
149
+ errors_ret = OrderedDict()
150
+ for name in self.loss_names:
151
+ if isinstance(name, str):
152
+ errors_ret[name] = float(
153
+ getattr(self, "loss_" + name)
154
+ ) # float(...) works for both scalar tensor and float number
155
+ return errors_ret
156
+
157
+ def save_networks(self, epoch):
158
+ """Save all the networks to the disk.
159
+
160
+ Parameters:
161
+ epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
162
+ """
163
+ for name in self.model_names:
164
+ if isinstance(name, str):
165
+ save_filename = "%s_net_%s.pth" % (epoch, name)
166
+ save_path = os.path.join(self.save_dir, save_filename)
167
+ net = getattr(self, "net" + name)
168
+
169
+ if len(self.gpu_ids) > 0 and torch.cuda.is_available():
170
+ torch.save(net.module.cpu().state_dict(), save_path)
171
+ net.cuda(self.gpu_ids[0])
172
+ else:
173
+ torch.save(net.cpu().state_dict(), save_path)
174
+
175
+ def unload_network(self, name):
176
+ """Unload network and gc."""
177
+ if isinstance(name, str):
178
+ net = getattr(self, "net" + name)
179
+ del net
180
+ gc.collect()
181
+ devices.torch_gc()
182
+ return None
183
+
184
+ def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
185
+ """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
186
+ key = keys[i]
187
+ if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
188
+ if module.__class__.__name__.startswith("InstanceNorm") and (
189
+ key == "running_mean" or key == "running_var"
190
+ ):
191
+ if getattr(module, key) is None:
192
+ state_dict.pop(".".join(keys))
193
+ if module.__class__.__name__.startswith("InstanceNorm") and (
194
+ key == "num_batches_tracked"
195
+ ):
196
+ state_dict.pop(".".join(keys))
197
+ else:
198
+ self.__patch_instance_norm_state_dict(
199
+ state_dict, getattr(module, key), keys, i + 1
200
+ )
201
+
202
+ def load_networks(self, epoch):
203
+ """Load all the networks from the disk.
204
+
205
+ Parameters:
206
+ epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
207
+ """
208
+ for name in self.model_names:
209
+ if isinstance(name, str):
210
+ load_filename = "%s_net_%s.pth" % (epoch, name)
211
+ load_path = os.path.join(self.save_dir, load_filename)
212
+ net = getattr(self, "net" + name)
213
+ if isinstance(net, torch.nn.DataParallel):
214
+ net = net.module
215
+ # print('Loading depth boost model from %s' % load_path)
216
+ # if you are using PyTorch newer than 0.4 (e.g., built from
217
+ # GitHub source), you can remove str() on self.device
218
+ state_dict = torch.load(load_path, map_location=str(self.device))
219
+ if hasattr(state_dict, "_metadata"):
220
+ del state_dict._metadata
221
+
222
+ # patch InstanceNorm checkpoints prior to 0.4
223
+ for key in list(
224
+ state_dict.keys()
225
+ ): # need to copy keys here because we mutate in loop
226
+ self.__patch_instance_norm_state_dict(
227
+ state_dict, net, key.split(".")
228
+ )
229
+ net.load_state_dict(state_dict)
230
+
231
+ def print_networks(self, verbose):
232
+ """Print the total number of parameters in the network and (if verbose) network architecture
233
+
234
+ Parameters:
235
+ verbose (bool) -- if verbose: print the network architecture
236
+ """
237
+ print("---------- Networks initialized -------------")
238
+ for name in self.model_names:
239
+ if isinstance(name, str):
240
+ net = getattr(self, "net" + name)
241
+ num_params = 0
242
+ for param in net.parameters():
243
+ num_params += param.numel()
244
+ if verbose:
245
+ print(net)
246
+ print(
247
+ "[Network %s] Total number of parameters : %.3f M"
248
+ % (name, num_params / 1e6)
249
+ )
250
+ print("-----------------------------------------------")
251
+
252
+ def set_requires_grad(self, nets, requires_grad=False):
253
+ """Set requies_grad=Fasle for all the networks to avoid unnecessary computations
254
+ Parameters:
255
+ nets (network list) -- a list of networks
256
+ requires_grad (bool) -- whether the networks require gradients or not
257
+ """
258
+ if not isinstance(nets, list):
259
+ nets = [nets]
260
+ for net in nets:
261
+ if net is not None:
262
+ for param in net.parameters():
263
+ param.requires_grad = requires_grad
extensions-builtin/forge_legacy_preprocessors/annotator/leres/pix2pix/models/base_model_hg.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+
4
+
5
+ class BaseModelHG:
6
+ def name(self):
7
+ return "BaseModel"
8
+
9
+ def initialize(self, opt):
10
+ self.opt = opt
11
+ self.gpu_ids = opt.gpu_ids
12
+ self.isTrain = opt.isTrain
13
+ self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor
14
+ self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
15
+
16
+ def set_input(self, input):
17
+ self.input = input
18
+
19
+ def forward(self):
20
+ pass
21
+
22
+ # used in test time, no backprop
23
+ def test(self):
24
+ pass
25
+
26
+ def get_image_paths(self):
27
+ pass
28
+
29
+ def optimize_parameters(self):
30
+ pass
31
+
32
+ def get_current_visuals(self):
33
+ return self.input
34
+
35
+ def get_current_errors(self):
36
+ return {}
37
+
38
+ def save(self, label):
39
+ pass
40
+
41
+ # helper saving function that can be used by subclasses
42
+ def save_network(self, network, network_label, epoch_label, gpu_ids):
43
+ save_filename = "_%s_net_%s.pth" % (epoch_label, network_label)
44
+ save_path = os.path.join(self.save_dir, save_filename)
45
+ torch.save(network.cpu().state_dict(), save_path)
46
+ if len(gpu_ids) and torch.cuda.is_available():
47
+ network.cuda(device_id=gpu_ids[0])
48
+
49
+ # helper loading function that can be used by subclasses
50
+ def load_network(self, network, network_label, epoch_label):
51
+ save_filename = "%s_net_%s.pth" % (epoch_label, network_label)
52
+ save_path = os.path.join(self.save_dir, save_filename)
53
+ print(save_path)
54
+ model = torch.load(save_path)
55
+ return model
56
+ # network.load_state_dict(torch.load(save_path))
57
+
58
+ def update_learning_rate():
59
+ pass