Raiff1982 commited on
Commit
bbeece2
·
verified ·
1 Parent(s): 0462aae

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -98
app.py DELETED
@@ -1,98 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from transformers import pipeline, set_seed
4
- from diffusers import DiffusionPipeline
5
- import tempfile
6
- import imageio
7
-
8
- # ---------- Setup ----------
9
- AVAILABLE_MODELS = {
10
- "GPT-2 (small, fast)": "gpt2",
11
- "Falcon (TII UAE)": "tiiuae/falcon-7b-instruct",
12
- "Mistral (OpenAccess)": "mistralai/Mistral-7B-v0.1"
13
- }
14
-
15
- set_seed(42)
16
- text_model_cache = {}
17
-
18
- # Load text-to-image model using diffusers (correct API)
19
- try:
20
- image_generator = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
21
- image_generator.to("cpu")
22
- image_enabled = True
23
- except Exception as e:
24
- image_generator = None
25
- image_enabled = False
26
- print(f"[Image model error]: {e}")
27
-
28
- # Load text-to-video model
29
- try:
30
- video_pipeline = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
31
- video_pipeline.to("cpu")
32
- video_enabled = True
33
- except Exception as e:
34
- video_pipeline = None
35
- video_enabled = False
36
- print(f"[Video model error]: {e}")
37
-
38
- chat_memory = {}
39
-
40
- # ---------- Core Function ----------
41
- def codette_terminal(prompt, model_name, generate_image, generate_video, session_id):
42
- if session_id not in chat_memory:
43
- chat_memory[session_id] = []
44
-
45
- if prompt.lower() in ["exit", "quit"]:
46
- chat_memory[session_id] = []
47
- return "🧠 Codette signing off... Session reset.", None, None
48
-
49
- if model_name not in text_model_cache:
50
- text_model_cache[model_name] = pipeline("text-generation", model=AVAILABLE_MODELS[model_name])
51
- generator = text_model_cache[model_name]
52
- response = generator(prompt, max_length=100, num_return_sequences=1, do_sample=True)[0]['generated_text'].strip()
53
-
54
- chat_memory[session_id].append(f"🖋️ You > {prompt}")
55
- chat_memory[session_id].append(f"🧠 Codette > {response}")
56
- chat_log = "\n".join(chat_memory[session_id][-10:])
57
-
58
- img = None
59
- if generate_image and image_enabled:
60
- try:
61
- img = image_generator(prompt).images[0]
62
- except Exception as e:
63
- chat_log += f"\n[Image error]: {e}"
64
-
65
- vid = None
66
- if generate_video and video_enabled:
67
- try:
68
- video_frames = video_pipeline(prompt, num_inference_steps=50).frames
69
- temp_video_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
70
- imageio.mimsave(temp_video_path, video_frames, fps=8)
71
- vid = temp_video_path
72
- except Exception as e:
73
- chat_log += f"\n[Video error]: {e}"
74
-
75
- return chat_log, img, vid
76
-
77
- # ---------- Gradio UI ----------
78
- with gr.Blocks(title="Codette Terminal – Text + Image + Video") as demo:
79
- gr.Markdown("## 🧬 Codette Terminal (Text + Image + Video, CPU-Friendly)")
80
- gr.Markdown("Type a prompt and select your model. Optionally generate images or videos. Type `'exit'` to reset session.")
81
-
82
- session_id = gr.Textbox(value="session_default", visible=False)
83
- model_dropdown = gr.Dropdown(choices=list(AVAILABLE_MODELS.keys()), value="GPT-2 (small, fast)", label="Choose Language Model")
84
- generate_image_toggle = gr.Checkbox(label="Also generate image?", value=False, interactive=image_enabled)
85
- generate_video_toggle = gr.Checkbox(label="Also generate video?", value=False, interactive=video_enabled)
86
- user_input = gr.Textbox(label="Your Prompt", placeholder="e.g. A robot dreaming on Mars", lines=1)
87
- output_text = gr.Textbox(label="Codette Output", lines=15, interactive=False)
88
- output_image = gr.Image(label="Generated Image")
89
- output_video = gr.Video(label="Generated Video")
90
-
91
- user_input.submit(
92
- fn=codette_terminal,
93
- inputs=[user_input, model_dropdown, generate_image_toggle, generate_video_toggle, session_id],
94
- outputs=[output_text, output_image, output_video]
95
- )
96
-
97
- if __name__ == "__main__":
98
- demo.launch()