UAI-Software commited on
Commit
5e2d8cd
·
verified ·
1 Parent(s): b68bc7a

Upload folder using huggingface_hub

Browse files
__pycache__/imageRequest.cpython-39.pyc ADDED
Binary file (10 kB). View file
 
__pycache__/my_handler.cpython-39.pyc ADDED
Binary file (1.46 kB). View file
 
handler.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, List, Any
3
+ import sys
4
+ rootDir = os.path.abspath(os.path.dirname(__file__))
5
+ sys.path.append(rootDir)
6
+ from imageRequest import ImageRequest
7
+
8
+ class EndpointHandler:
9
+ def __init__(self, path=""):
10
+ # Preload all the elements you are going to need at inference.
11
+ # pseudo:
12
+ # self.model= load_model(path)
13
+ self.pipe = None
14
+ self.modelName = ""
15
+ os.listdir(path)
16
+
17
+ def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
18
+ """
19
+ data args:
20
+ input (:obj: `str` | `PIL.Image` | `np.array`)
21
+ seed (:obj: `int`)
22
+ prompt (:obj: `str`)
23
+ negative_prompt (:obj: `str`)
24
+ steps (:obj: `int`)
25
+ guidance_scale (:obj: `float`)
26
+ width (:obj: `int`)
27
+ height (:obj: `int`)
28
+
29
+ kwargs
30
+ Return:
31
+ A :obj:`list` | `dict`: will be serialized and returned
32
+ """
33
+ inputs = data.pop("inputs", data)
34
+ request = ImageRequest.FromDict(inputs)
35
+ response = self.__runProcess__(request)
36
+ return response
37
+
38
+ def ImageToBase64(self, image):
39
+ import io
40
+ import base64
41
+ from PIL import Image
42
+ buffered = io.BytesIO()
43
+ image.save(buffered, format="PNG")
44
+ return base64.b64encode(buffered.getvalue()).decode()
45
+
46
+ def __runProcess__(self, request: ImageRequest) -> List[Dict[str, Any]]:
47
+ """
48
+ Run SDXL Lightning pipeline
49
+ """
50
+ import torch
51
+ from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler
52
+ from huggingface_hub import hf_hub_download
53
+ from safetensors.torch import load_file
54
+
55
+ base = "stabilityai/stable-diffusion-xl-base-1.0"
56
+ repo = "ByteDance/SDXL-Lightning"
57
+ ckpt = "sdxl_lightning_4step_unet.safetensors" # Use the correct ckpt for your step setting!
58
+ if request.model == "default":
59
+ request.model = base
60
+ else:
61
+ base = request.model
62
+ if self.pipe is None or self.modelName != request.model:
63
+ # Load model.
64
+ unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
65
+ unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cuda"))
66
+ pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
67
+
68
+ # Ensure sampler uses "trailing" timesteps.
69
+ pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
70
+ self.pipe = pipe
71
+
72
+ # Ensure using the same inference steps as the loaded model and CFG set to 0.
73
+ images = pipe(request.prompt, negative_prompt = request.negative_prompt, num_inference_steps=request.steps, guidance_scale=0).images
74
+
75
+ return {"media":[self.ImageToBase64(img) for img in images]}
imageRequest.py ADDED
@@ -0,0 +1,638 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Any
3
+ import json
4
+ class ImageRequest:
5
+ def __init__(self):
6
+ self.ratio = 1
7
+ self.configScale = 0
8
+ self.guidance_scale = 7.5
9
+ self.seed = 52125
10
+ self.prompt = ""
11
+ self.negative_prompt = ""
12
+ self.neg_prompt = ""
13
+ self.num_images_per_prompt = 1
14
+ self.imagesToGenerate = 1
15
+ self.num_frames = 1
16
+ self.size = [1920, 1080]
17
+ self.encodeSize = [1024, 576]
18
+ self.steps = 6
19
+ self.faceFix = 1
20
+ self.width = 1920
21
+ self.height = 1080
22
+ self.maxSize = 1080
23
+ self.resolution = 256
24
+ self.topOffset = 0
25
+ self.altitude = 0
26
+ self.distance = 0
27
+ self.fov = 0
28
+ self.fovx = 0
29
+ self.fovy = 0
30
+ self.range = [0,1]
31
+ self.range2 = [0,1]
32
+ self.topOffset = 0
33
+ self.version = "1.3"
34
+ self.upscale = 2
35
+ self.bg_upsampler = "realesrgan"
36
+ self.chunkSize = 8192
37
+ self.bg_tile = 400
38
+ self.suffix = "None"
39
+ self.only_center_face = False
40
+ self.aligned = False
41
+ self.ext = "auto"
42
+ self.foregroundRatio = 1.0
43
+ self.weight = 0.75
44
+ self.s_scale = 0.75
45
+ self.scale = 0.75
46
+ self.encodedFps = 8
47
+ self.fps = 24
48
+ self.loops = 0
49
+ self.maskIndex = 0
50
+ self.removeBg = False
51
+ self.isXL = False
52
+ self.isXLLightning = False
53
+ self.maskIndex = False
54
+ self.watermarked = False
55
+ self.exportGif = False
56
+ self.exportTextures = False
57
+ self.exportMeshes = False
58
+ self.exportFiles = False
59
+ self.exportOther = False
60
+ self.export = False
61
+ self.saveFaceEmbeddings = False
62
+ self.overrideForm = "False"
63
+
64
+ self.submode = "default"
65
+ self.mode = "default"
66
+ self.model = "default"
67
+ self.modelType = "default"
68
+ self.ipAdapterModel = ""
69
+ self.onnxModel = ""
70
+ self.inpaintModel = ""
71
+ self.controlnetModel = ""
72
+ self.vaeRepo = ""
73
+ self.schedueler = ""
74
+ self.device = ""
75
+ self.tempPath = ""
76
+ self.paramsData = ""
77
+ self.exportSize = ""
78
+ self.faceEmbeddings = ""
79
+ self.input = ""
80
+ self.mask = ""
81
+ self.inputs = []
82
+ self.masks = []
83
+ self.mediaPaths = []
84
+ self.urls = []
85
+ self.styleImages = []
86
+ self.vertecies = []
87
+ self.points = []
88
+ self.primitives = []
89
+ self.meshes = []
90
+ self.objects = []
91
+ self.output = ""
92
+ self.customSDBinWeights = ""
93
+ self.textualInversionWeights = ""
94
+ self.refinerRepo = ""
95
+ self.metadata = ""
96
+ self.url = ""
97
+ self.shareuser = ""
98
+ self.user = ""
99
+ self.ipAdapters = []
100
+ self.loras = []
101
+
102
+ def DetectSizeFromSize(self):
103
+ if isinstance(self.size, str):
104
+ split = self.size.split(",")
105
+ width = int(split[0])
106
+ height = int(split[1])
107
+ self.ratio = float(width) / float(height)
108
+ self.size = [width, height]
109
+ self.width = width
110
+ self.height = height
111
+
112
+ def DetectEncodeSize(self):
113
+ if isinstance(self.encodeSize, str):
114
+ split = self.encodeSize.split(" ")
115
+ sizeSplit = split[1].lower().split("x")
116
+ width = int(sizeSplit[0])
117
+ height = int(sizeSplit[1])
118
+ self.encodeSize = [width, height]
119
+
120
+ def DetectSizeFromExportSize(self):
121
+ split = self.exportSize.split(" ")
122
+ ratio = split[0]
123
+ sizeSplit = split[1].lower().split("x")
124
+ width = int(sizeSplit[0])
125
+ height = int(sizeSplit[1])
126
+ self.ratio = float(width) / float(height)
127
+ self.size = [width, height]
128
+ self.width = width
129
+ self.height = height
130
+
131
+ def DetectXL(self):
132
+ self.isXL = "XL" in self.model or "xl" in self.model
133
+ if self.isXL:
134
+ self.isXLLightning = "lightning" in self.model or "Lightning" in self.model
135
+
136
+ def JSON(self):
137
+ return json.dumps(self.__dict__)
138
+
139
+ def SetOutputExtension(self, extension = "png"):
140
+ root, ext = os.path.splitext(self.output)
141
+ if not ext:
142
+ ext = f'.{extension}'
143
+ self.output = root + ext
144
+
145
+ def CreateResponse(self, outputFiles = [], objectType = "image", isBase64 = False):
146
+
147
+ outputDict = {"media":[]}
148
+ for file in outputFiles:
149
+ outputDict["media"].append({"media":file,"prompt": self.prompt,
150
+ "seed": self.seed, "objectType": objectType, "isBase64": isBase64})
151
+
152
+ outputDict["media"].append({"media":self.input,"prompt": self.prompt,
153
+ "seed": self.seed, "objectType": objectType, "isBase64": True})
154
+ return outputDict
155
+
156
+ @staticmethod
157
+ def FromDict(obj: Any) -> 'ImageRequest':
158
+ imgRequest = ImageRequest()
159
+ try:
160
+ imgRequest.chunkSize = int(obj["chunkSize"])
161
+ except:
162
+ pass
163
+ try:
164
+ imgRequest.steps = int(obj["steps"])
165
+ except:
166
+ pass
167
+ try:
168
+ imgRequest.weight =float( obj["weight"])
169
+ except:
170
+ pass
171
+ try:
172
+ imgRequest.ratio = float(obj["ratio"])
173
+ except:
174
+ pass
175
+ try:
176
+ imgRequest.s_scale = float(obj["s_scale"])
177
+ except:
178
+ pass
179
+ try:
180
+ imgRequest.scale = float(obj["scale"])
181
+ except:
182
+ pass
183
+ try:
184
+ imgRequest.configScale = float(obj["configScale"])
185
+ imgRequest.guidance_scale = float(obj["configScale"])
186
+ except:
187
+ pass
188
+ try:
189
+ imgRequest.guidance_scale = float(obj["guidance_scale"])
190
+ imgRequest.configScale = float(obj["guidance_scale"])
191
+
192
+ except:
193
+ pass
194
+ try:
195
+ imgRequest.seed = int(obj["seed"])
196
+ except:
197
+ pass
198
+
199
+ try:
200
+ imgRequest.exportTextures = bool(obj["exportTextures"])
201
+ except:
202
+ pass
203
+
204
+ try:
205
+ imgRequest.exportOther = bool(obj["exportOther"])
206
+ except:
207
+ pass
208
+
209
+ try:
210
+ imgRequest.export = bool(obj["export"])
211
+ except:
212
+ pass
213
+
214
+ try:
215
+ imgRequest.exportFiles = bool(obj["exportFiles"])
216
+ except:
217
+ pass
218
+
219
+ try:
220
+ imgRequest.vertecies = obj["vertecies"]
221
+ except:
222
+ pass
223
+
224
+ try:
225
+ imgRequest.points = obj["points"]
226
+ except:
227
+ pass
228
+
229
+
230
+ try:
231
+ imgRequest.objects = obj["objects"]
232
+ except:
233
+ pass
234
+
235
+
236
+ try:
237
+ imgRequest.meshes = obj["meshes"]
238
+ except:
239
+ pass
240
+
241
+
242
+ try:
243
+ imgRequest.primitives = obj["primitives"]
244
+ except:
245
+ pass
246
+
247
+ try:
248
+ imgRequest.exportMeshes = bool(obj["exportMeshes"])
249
+ except:
250
+ pass
251
+
252
+ try:
253
+ imgRequest.faceEmbeddings = obj["faceEmbeddings"]
254
+ except:
255
+ pass
256
+ try:
257
+ imgRequest.encodeSize = obj["encodeSize"]
258
+ imgRequest.DetectEncodeSize()
259
+
260
+ except:
261
+ pass
262
+ try:
263
+ imgRequest.removeBg = int(obj["removeBg"])
264
+ except:
265
+ pass
266
+ try:
267
+ imgRequest.ipAdapterModel = obj["ipAdapterModel"]
268
+ except:
269
+ pass
270
+ try:
271
+ imgRequest.topOffset = int(obj["topOffset"])
272
+ except:
273
+ pass
274
+ try:
275
+ imgRequest.foregroundRatio = float(obj["foregroundRatio"])
276
+ except:
277
+ pass
278
+ try:
279
+ imgRequest.resolution = int(obj["resolution"])
280
+ except:
281
+ pass
282
+ try:
283
+ imgRequest.maxSize = int(obj["maxSize"])
284
+ except:
285
+ pass
286
+ try:
287
+ imgRequest.prompt = obj["prompt"]
288
+ except:
289
+ pass
290
+ try:
291
+ imgRequest.negative_prompt = obj["negative_prompt"]
292
+ except:
293
+ pass
294
+ try:
295
+ imgRequest.negative_prompt = obj["neg_prompt"]
296
+ imgRequest.neg_prompt = obj["neg_prompt"]
297
+ except:
298
+ pass
299
+ try:
300
+ imgRequest.negative_prompt = obj["negative_prompt"]
301
+ imgRequest.neg_prompt = obj["negative_prompt"]
302
+
303
+ except:
304
+ pass
305
+ try:
306
+ imgRequest.num_images_per_prompt = int(obj["num_images_per_prompt"])
307
+ imgRequest.imagesToGenerate = int(obj["num_images_per_prompt"])
308
+ except:
309
+ pass
310
+ try:
311
+ imgRequest.num_images_per_prompt = int(obj["imagesToGenerate"])
312
+ imgRequest.imagesToGenerate = int(obj["imagesToGenerate"])
313
+ imgRequest.num_frames = int(obj["imagesToGenerate"])
314
+ except:
315
+ pass
316
+ try:
317
+ imgRequest.num_images_per_prompt = int(obj["num_frames"])
318
+ imgRequest.imagesToGenerate = int(obj["num_frames"])
319
+ imgRequest.num_frames = int(obj["num_frames"])
320
+ except:
321
+ pass
322
+ try:
323
+ imgRequest.size = obj["size"]
324
+ except:
325
+ pass
326
+ try:
327
+ imgRequest.faceFix = int(obj["faceFix"])
328
+ except:
329
+ pass
330
+ try:
331
+ imgRequest.version = str(obj["version"])
332
+ except:
333
+ pass
334
+ try:
335
+ imgRequest.weight =float( obj["facefixweight"])
336
+ except:
337
+ pass
338
+ try:
339
+ imgRequest.upscale =int( obj["facefixupscale"])
340
+ except:
341
+ pass
342
+ try:
343
+ imgRequest.upscale = int(obj["upscale"])
344
+ except:
345
+ pass
346
+ try:
347
+ imgRequest.bg_upsampler = str(obj["bg_upsampler"])
348
+ except:
349
+ pass
350
+ try:
351
+ imgRequest.bg_tile = int(obj["bg_tile"])
352
+ except:
353
+ pass
354
+ try:
355
+ imgRequest.loops = int(obj["loops"])
356
+ except:
357
+ pass
358
+ try:
359
+ imgRequest.suffix = int(obj["suffix"])
360
+ except:
361
+ pass
362
+ try:
363
+ imgRequest.only_center_face = bool(obj["only_center_face"])
364
+ except:
365
+ pass
366
+ try:
367
+ imgRequest.exportGif =bool( obj["exportGif"])
368
+ except:
369
+ pass
370
+ try:
371
+ imgRequest.saveFaceEmbeddings =bool( obj["saveFaceEmbeddings"])
372
+ except:
373
+ pass
374
+ try:
375
+ imgRequest.aligned =bool( obj["aligned"])
376
+ except:
377
+ pass
378
+ try:
379
+ imgRequest.ext = str(obj["ext"])
380
+
381
+ except:
382
+ pass
383
+ try:
384
+ imgRequest.submode = obj["submode"]
385
+ except:
386
+ pass
387
+ try:
388
+ imgRequest.mode = obj["mode"]
389
+ except:
390
+ pass
391
+ try:
392
+ imgRequest.model = obj["model"]
393
+ except:
394
+ pass
395
+ try:
396
+ imgRequest.modelType = obj["modelType"]
397
+ except:
398
+ pass
399
+ try:
400
+ imgRequest.onnxModel = obj["onnxModel"]
401
+ except:
402
+ pass
403
+ try:
404
+ imgRequest.inpaintModel = obj["inpaintModel"]
405
+ except:
406
+ pass
407
+ try:
408
+ imgRequest.controlnetModel = obj["controlnetModel"]
409
+ except:
410
+ pass
411
+ try:
412
+ imgRequest.vaeRepo = obj["vaeRepo"]
413
+ except:
414
+ pass
415
+
416
+
417
+ try:
418
+ imgRequest.altitude = float(obj["altitude"])
419
+ except:
420
+ pass
421
+
422
+ try:
423
+ imgRequest.distance = float(obj["distance"])
424
+ except:
425
+ pass
426
+
427
+ try:
428
+ imgRequest.fov = float(obj["fov"])
429
+ except:
430
+ pass
431
+ try:
432
+ imgRequest.fovc = float(obj["fovc"])
433
+ except:
434
+ pass
435
+ try:
436
+ imgRequest.fovy = float(obj["fovy"])
437
+ except:
438
+ pass
439
+
440
+ try:
441
+ imgRequest.range = obj["range"]
442
+ except:
443
+ pass
444
+
445
+ try:
446
+ imgRequest.range2 = obj["range2"]
447
+ except:
448
+ pass
449
+
450
+ try:
451
+ imgRequest.schedueler = obj["schedueler"]
452
+ except:
453
+ pass
454
+ try:
455
+ imgRequest.device = obj["device"]
456
+ except:
457
+ pass
458
+ try:
459
+ imgRequest.tempPath = obj["tempPath"]
460
+ except:
461
+ pass
462
+ try:
463
+ imgRequest.paramsData = obj["paramsData"]
464
+ except:
465
+ pass
466
+ try:
467
+ imgRequest.exportSize = obj["exportSize"]
468
+ except:
469
+ pass
470
+ try:
471
+ imgRequest.input = obj["input"]
472
+ except:
473
+ pass
474
+ try:
475
+ import flask
476
+ import base64
477
+ inputImage = flask.request.files['inputImage']
478
+ image_string = base64.b64encode(inputImage.read())
479
+ imgRequest.input = image_string
480
+ except:
481
+ pass
482
+ try:
483
+ import flask
484
+ import base64
485
+ inputImage = flask.request.files['input']
486
+ image_string = base64.b64encode(inputImage.read())
487
+ imgRequest.input = image_string
488
+ except:
489
+ pass
490
+ try:
491
+ import base64
492
+ inputs = obj["inputs"]
493
+ for inputImage in inputs:
494
+ imgRequest.inputs.append(image_string)
495
+ except:
496
+ pass
497
+ try:
498
+ import flask
499
+ import base64
500
+ inputs = flask.request.files['inputs']
501
+ for inputImage in inputs:
502
+ image_string = base64.b64encode(inputImage.read())
503
+ imgRequest.inputs.append(image_string)
504
+ except:
505
+ pass
506
+ try:
507
+ import flask
508
+ import base64
509
+ inputs = flask.request.files['masks']
510
+ for inputImage in inputs:
511
+ image_string = base64.b64encode(inputImage.read())
512
+ imgRequest.masks.append(image_string)
513
+ except:
514
+ pass
515
+ try:
516
+ import flask
517
+ import base64
518
+ inputs = flask.request.files['styleImages']
519
+ for inputImage in inputs:
520
+ image_string = base64.b64encode(inputImage.read())
521
+ imgRequest.styleImages.append(image_string)
522
+ except:
523
+ pass
524
+ try:
525
+ import flask
526
+ import pickle
527
+ import base64
528
+ inputs = flask.request.files['faceEmbeddings']
529
+ for inputImage in inputs:
530
+ loaded = pickle.loads(inputImage.read())
531
+ imgRequest.faceEmbeddings.append(loaded)
532
+ except:
533
+ pass
534
+ try:
535
+ imgRequest.mask = obj["mask"]
536
+ except:
537
+ pass
538
+ try:
539
+ imgRequest.masks = obj["masks"]
540
+ except:
541
+ pass
542
+ try:
543
+ imgRequest.mediaPaths = obj["mediaPaths"]
544
+ except:
545
+ pass
546
+ try:
547
+ imgRequest.urls = obj["urls"]
548
+ except:
549
+ pass
550
+ try:
551
+ imgRequest.styleImages = obj["styleImages"]
552
+ except:
553
+ pass
554
+ try:
555
+ imgRequest.output = obj["output"]
556
+ except:
557
+ pass
558
+ try:
559
+ imgRequest.customSDBinWeights = obj["customSDBinWeights"]
560
+ except:
561
+ pass
562
+ try:
563
+ imgRequest.textualInversionWeights = obj["textualInversionWeights"]
564
+ except:
565
+ pass
566
+ try:
567
+ imgRequest.refinerRepo = obj["refinerRepo"]
568
+ except:
569
+ pass
570
+ try:
571
+ imgRequest.metadata = obj["metadata"]
572
+ except:
573
+ pass
574
+ try:
575
+ imgRequest.url = obj["url"]
576
+ except:
577
+ pass
578
+ try:
579
+ imgRequest.shareuser = obj["shareuser"]
580
+ except:
581
+ pass
582
+ try:
583
+ imgRequest.user = obj["user"]
584
+ except:
585
+ pass
586
+ try:
587
+ imgRequest.ipAdapters = obj["ipAdapters"]
588
+ except:
589
+ pass
590
+ try:
591
+ imgRequest.loras = obj["loras"]
592
+ except:
593
+ pass
594
+ try:
595
+ imgRequest.maskIndex = int(obj["maskIndex"])
596
+ except:
597
+ pass
598
+ try:
599
+ imgRequest.watermarked = bool(obj["watermarked"])
600
+ except:
601
+ pass
602
+ try:
603
+ imgRequest.overrideForm = obj["overrideForm"]
604
+ except:
605
+ pass
606
+ try:
607
+ imgRequest.isXL = bool(obj["isXL"])
608
+ except:
609
+ pass
610
+ try:
611
+ imgRequest.isXLLightning = bool(obj["isXLLightning"])
612
+ except:
613
+ pass
614
+ try:
615
+ imgRequest.width = int(obj["width"])
616
+ except:
617
+ pass
618
+ try:
619
+ imgRequest.height = int(obj["height"])
620
+ except:
621
+ pass
622
+ try:
623
+ imgRequest.fps = float(obj["fps"])
624
+
625
+ except:
626
+ pass
627
+ try:
628
+ imgRequest.encodedFps = float(obj["encodedFps"])
629
+
630
+ except:
631
+ pass
632
+ if imgRequest.isXL == False:
633
+ imgRequest.DetectXL()
634
+ return imgRequest
635
+
636
+ # Example Usage
637
+ # jsonstring = json.loads(myjsonstring)
638
+ # root = Root.from_dict(jsonstring)
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ diffusers
test.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from hf_endpoints_emulator.emulator import emulate
2
+
3
+
4
+ emulate(handler_path="examples/my_handler.py", port=5000)