File size: 3,397 Bytes
d8fcee4
d767ca6
d8fcee4
 
 
154de23
8ec67ee
d8fcee4
 
 
 
d135db4
f606112
d767ca6
 
 
f606112
d767ca6
 
 
7d80404
f606112
 
 
 
 
 
d767ca6
 
 
8d3fd7c
f606112
d767ca6
f606112
7d80404
d767ca6
 
42be2f3
d767ca6
 
 
 
f606112
 
d767ca6
f606112
d767ca6
f606112
d767ca6
f606112
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import gradio as gr
import torch
import numpy as np
import modin.pandas as pd
from PIL import Image
from diffusers import DiffusionPipeline, FluxPipeline #CogView4Pipeline #, StableDiffusion3Pipeline from diffusers import CogView4Pipeline
from huggingface_hub import hf_hub_download

device = 'cuda' if torch.cuda.is_available() else 'cpu'
torch.cuda.max_memory_allocated(device=device)
torch.cuda.empty_cache()

def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed):
    generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
       
    if Model == "PhotoReal":
        pipe = DiffusionPipeline.from_pretrained("circulus/canvers-real-v3.9.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-real-v3.9.1")
        pipe.enable_xformers_memory_efficient_attention()
        pipe = pipe.to(device)
        torch.cuda.empty_cache()
    
        image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
        torch.cuda.empty_cache()
        return image
    
    if Model == "Animagine XL 4":
        animagine = DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-4.0", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-4.0")
        animagine.enable_xformers_memory_efficient_attention()
        animagine = animagine.to(device)
        torch.cuda.empty_cache()
        
        image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
        torch.cuda.empty_cache()
        return image

    return image
    
gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Animagine XL 4'], value='PhotoReal', label='Choose Model'),
                               gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'), 
                               gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
                               gr.Slider(512, 1024, 768, step=128, label='Height'),
                               gr.Slider(512, 1024, 768, step=128, label='Width'),
                               gr.Slider(3, maximum=12, value=5, step=.25, label='Guidance Scale', info="5-7 for PhotoReal and 7-10 for Animagine"), 
                               gr.Slider(25, maximum=50, value=25, step=25, label='Number of Iterations'), 
                               gr.Slider(minimum=0, step=1, maximum=9999999999999999, randomize=True, label='Seed: 0 is Random'), 
                               ],
             outputs=gr.Image(label='Generated Image'), 
             title="Manju Dream Booth V2.5 - GPU", 
             description="<br><br><b/>Warning: This Demo is capable of producing NSFW content.", 
             article = "If You Enjoyed this Demo and would like to Donate, you can send any amount to any of these Wallets. <br><br>SHIB (BEP20): 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>PayPal: https://www.paypal.me/ManjushriBodhisattva <br>ETH: 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>DOGE: DL5qRkGCzB2ENBKfEhHarvKm1qas3wyHx7<br><br>Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True, max_threads=80)