File size: 978 Bytes
9f836f5
ab60a70
6152c1c
 
 
9f836f5
ab60a70
 
6152c1c
 
ab60a70
 
 
6152c1c
 
 
 
 
 
ab60a70
 
6152c1c
 
 
 
 
 
 
 
 
 
ab60a70
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import gradio as gr
from transformers import AutoModelForCausalLM
from PIL import Image
from huggingface_hub import pipeline


# Load the SquanchNastyAI model from Hugging Face Spaces
model = AutoModelForCausalLM.from_pretrained("or4cl3ai/SquanchNastyAI")
# Initialize the pipeline for image generation
image_pipeline = pipeline("image-generation", model="google/vit-base-patch16-384")

# Define a function to generate a text response to a prompt
def generate_response(prompt):
    return model.generate(prompt, max_length=1024)[0]

# Define a function to generate an image from a prompt
def generate_image(prompt):
    image = image_pipeline(prompt)
    return image

# Create a Gradio interface for the SquanchNastyAI model
interface = gr.Interface(
    fn=generate_response,
    inputs="text",
    outputs=["text", "image"],
    components={
        "text": gr.Text(),
        "image": gr.Image(),
    },
    layout="row",
)

# Launch the Gradio interface
interface.launch()