or4cl3ai's picture
Update app.py
6152c1c
raw
history blame
978 Bytes
import gradio as gr
from transformers import AutoModelForCausalLM
from PIL import Image
from huggingface_hub import pipeline
# Load the SquanchNastyAI model from Hugging Face Spaces
model = AutoModelForCausalLM.from_pretrained("or4cl3ai/SquanchNastyAI")
# Initialize the pipeline for image generation
image_pipeline = pipeline("image-generation", model="google/vit-base-patch16-384")
# Define a function to generate a text response to a prompt
def generate_response(prompt):
return model.generate(prompt, max_length=1024)[0]
# Define a function to generate an image from a prompt
def generate_image(prompt):
image = image_pipeline(prompt)
return image
# Create a Gradio interface for the SquanchNastyAI model
interface = gr.Interface(
fn=generate_response,
inputs="text",
outputs=["text", "image"],
components={
"text": gr.Text(),
"image": gr.Image(),
},
layout="row",
)
# Launch the Gradio interface
interface.launch()