Spaces:
Running
on
T4
Running
on
T4
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ import torch
|
|
3 |
import numpy as np
|
4 |
import modin.pandas as pd
|
5 |
from PIL import Image
|
6 |
-
from diffusers import DiffusionPipeline, CogView4Pipeline #, StableDiffusion3Pipeline from diffusers import CogView4Pipeline
|
7 |
from huggingface_hub import hf_hub_download
|
8 |
|
9 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
@@ -36,11 +36,10 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed):
|
|
36 |
if Model == "Club":
|
37 |
torch.cuda.max_memory_allocated(device=device)
|
38 |
torch.cuda.empty_cache()
|
39 |
-
pipe =
|
40 |
-
# Open it for reduce GPU memory usage
|
41 |
pipe.enable_model_cpu_offload()
|
42 |
-
pipe.vae.enable_slicing()
|
43 |
-
pipe.vae.enable_tiling()
|
44 |
image = pipe(prompt=Prompt, guidance_scale=scale, num_images_per_prompt=1, num_inference_steps=steps, width=1024, height=1024,).images[0]
|
45 |
return image
|
46 |
|
|
|
3 |
import numpy as np
|
4 |
import modin.pandas as pd
|
5 |
from PIL import Image
|
6 |
+
from diffusers import DiffusionPipeline, FluxPipeline #CogView4Pipeline #, StableDiffusion3Pipeline from diffusers import CogView4Pipeline
|
7 |
from huggingface_hub import hf_hub_download
|
8 |
|
9 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
|
|
36 |
if Model == "Club":
|
37 |
torch.cuda.max_memory_allocated(device=device)
|
38 |
torch.cuda.empty_cache()
|
39 |
+
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
|
|
|
40 |
pipe.enable_model_cpu_offload()
|
41 |
+
#pipe.vae.enable_slicing()
|
42 |
+
#pipe.vae.enable_tiling()
|
43 |
image = pipe(prompt=Prompt, guidance_scale=scale, num_images_per_prompt=1, num_inference_steps=steps, width=1024, height=1024,).images[0]
|
44 |
return image
|
45 |
|