Spaces:
Paused
Paused
File size: 2,536 Bytes
c09e9ae e513d6a c09e9ae 3815eb3 28a2cfa e513d6a c09e9ae 3815eb3 28a2cfa 3815eb3 28a2cfa 3815eb3 28a2cfa e513d6a 3815eb3 28a2cfa 3815eb3 e513d6a 3815eb3 e513d6a 3815eb3 28a2cfa 3815eb3 e513d6a 3815eb3 e513d6a 3815eb3 e513d6a 3815eb3 e513d6a 3815eb3 e513d6a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
import gradio as gr
from transformers import pipeline, set_seed
# Define available models
AVAILABLE_MODELS = {
"GPT-2 (small, fast)": "gpt2",
"Falcon (TII UAE)": "tiiuae/falcon-7b-instruct",
"Mistral (OpenAccess)": "mistralai/Mistral-7B-v0.1"
}
set_seed(42)
# Cache models
text_model_cache = {}
image_generator = pipeline("text-to-image", model="CompVis/stable-diffusion-v1-4")
# Session memory
chat_memory = {}
def load_text_model(model_name):
if model_name not in text_model_cache:
text_model_cache[model_name] = pipeline("text-generation", model=AVAILABLE_MODELS[model_name])
return text_model_cache[model_name]
def codette_terminal(prompt, model_name, generate_image, session_id):
if session_id not in chat_memory:
chat_memory[session_id] = []
if prompt.lower() in ["exit", "quit"]:
chat_memory[session_id] = []
return "🧠 Codette signing off... Session reset.", None
# Text generation
generator = load_text_model(model_name)
response = generator(prompt, max_length=100, num_return_sequences=1, do_sample=True)[0]['generated_text'].strip()
chat_memory[session_id].append(f"🖋️ You > {prompt}")
chat_memory[session_id].append(f"🧠 Codette > {response}")
chat_log = "\n".join(chat_memory[session_id][-10:])
# Optional image generation
img = None
if generate_image:
img = image_generator(prompt)[0]['image']
return chat_log, img
# Build Gradio UI
with gr.Blocks(title="Codette Terminal: Text + Image AI") as demo:
gr.Markdown("## 🧬 Codette Terminal (Text + Image, Hugging Face Edition)")
gr.Markdown("Choose your model, enter a prompt. Enable image generation if desired. Type `'exit'` to reset.")
session_id = gr.Textbox(value="session_default", visible=False)
model_dropdown = gr.Dropdown(choices=list(AVAILABLE_MODELS.keys()), value="GPT-2 (small, fast)", label="Choose Language Model")
generate_image_toggle = gr.Checkbox(label="Also generate image?", value=False)
user_input = gr.Textbox(label="Your Prompt", placeholder="e.g. a castle on Mars, explained by an AI philosopher", lines=1)
output_text = gr.Textbox(label="Terminal Output", lines=15, interactive=False)
output_image = gr.Image(label="AI-Generated Image")
user_input.submit(
fn=codette_terminal,
inputs=[user_input, model_dropdown, generate_image_toggle, session_id],
outputs=[output_text, output_image]
)
# Launch on Spaces
if __name__ == "__main__":
demo.launch()
|