xbarusui commited on
Commit
08cdb29
·
verified ·
1 Parent(s): 135b063

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -4
app.py CHANGED
@@ -7,16 +7,22 @@ import gradio as gr
7
  import numpy as np
8
  import random
9
  #from diffusers import DiffusionPipeline
10
- from diffusers import StableDiffusionXLPipeline
 
11
 
12
 
13
  MAX_SEED = np.iinfo(np.int32).max
14
  MAX_IMAGE_SIZE = 1216
15
 
 
 
 
 
 
 
16
  #pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
17
  pipe = StableDiffusionXLPipeline.from_pretrained(
18
- #"yodayo-ai/kivotos-xl-2.0",
19
- "Laxhar/noobai-XL-1.0",
20
  torch_dtype=torch.float16,
21
  use_safetensors=True,
22
  custom_pipeline="lpw_stable_diffusion_xl",
@@ -24,8 +30,15 @@ pipe = StableDiffusionXLPipeline.from_pretrained(
24
  )
25
  pipe.to('cuda')
26
 
 
 
 
 
 
 
 
27
  prompt = "1girl, solo, upper body, v, smile, looking at viewer, outdoors, night, masterpiece, best quality, very aesthetic, absurdres"
28
- negative_prompt = "nsfw, (low quality, worst quality:1.2), very displeasing, 3d, watermark, signature, ugly, poorly drawn"
29
 
30
  @spaces.GPU
31
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
 
7
  import numpy as np
8
  import random
9
  #from diffusers import DiffusionPipeline
10
+ from diffusers import StableDiffusionXLPipeline, DDIMScheduler
11
+ from huggingface_hub import hf_hub_download
12
 
13
 
14
  MAX_SEED = np.iinfo(np.int32).max
15
  MAX_IMAGE_SIZE = 1216
16
 
17
+ base_model_id = "Laxhar/noobai-XL-1.0"
18
+ repo_name = "ByteDance/Hyper-SD"
19
+ # Take 2-steps lora as an example
20
+ ckpt_name = "Hyper-SDXL-8steps-lora.safetensors"
21
+
22
+ # Load model.
23
  #pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
24
  pipe = StableDiffusionXLPipeline.from_pretrained(
25
+ base_model_id,
 
26
  torch_dtype=torch.float16,
27
  use_safetensors=True,
28
  custom_pipeline="lpw_stable_diffusion_xl",
 
30
  )
31
  pipe.to('cuda')
32
 
33
+ #pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16, variant="fp16").to("cuda")
34
+ pipe.load_lora_weights(hf_hub_download(repo_name, ckpt_name))
35
+ pipe.fuse_lora()
36
+ # Ensure ddim scheduler timestep spacing set as trailing !!!
37
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
38
+ # lower eta results in more detail
39
+
40
  prompt = "1girl, solo, upper body, v, smile, looking at viewer, outdoors, night, masterpiece, best quality, very aesthetic, absurdres"
41
+ negative_prompt = "(worst quality),(low quality),lowres,(bad anatomy),(deformed anatomy),(deformed fingers),(blurry),(extra finger),(extra arms), (extra legs),(monochrome:1.4),(grayscale:1.4),((watermark)),(overweight female:1.6),((pointy ears)),mascot,stuffed human, stuffed animal,chibi,english text, chinese text, korean text
42
 
43
  @spaces.GPU
44
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):