Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -20,6 +20,7 @@ chat_memory = {}
|
|
20 |
try:
|
21 |
image_generator = DiffusionPipeline.from_pretrained(
|
22 |
"runwayml/stable-diffusion-v1-5",
|
|
|
23 |
torch_dtype=torch.float16 if device == "cuda" else torch.float32
|
24 |
)
|
25 |
image_generator.to(device)
|
@@ -33,6 +34,7 @@ except Exception as e:
|
|
33 |
try:
|
34 |
video_pipeline = DiffusionPipeline.from_pretrained(
|
35 |
"damo-vilab/text-to-video-ms-1.7b",
|
|
|
36 |
torch_dtype=torch.float16 if device == "cuda" else torch.float32
|
37 |
)
|
38 |
video_pipeline.to(device)
|
@@ -43,7 +45,7 @@ except Exception as e:
|
|
43 |
video_enabled = False
|
44 |
|
45 |
# ---------- Streamed Response Generator ----------
|
46 |
-
def codette_terminal(prompt, model_name, generate_image, generate_video, session_id):
|
47 |
if session_id not in chat_memory:
|
48 |
chat_memory[session_id] = []
|
49 |
|
@@ -89,10 +91,11 @@ def codette_terminal(prompt, model_name, generate_image, generate_video, session
|
|
89 |
chat_memory[session_id].append(f"🧠 Codette > {output}")
|
90 |
|
91 |
# Image Generation
|
92 |
-
|
93 |
if generate_image and image_enabled:
|
94 |
try:
|
95 |
-
|
|
|
96 |
except Exception as e:
|
97 |
response_so_far += f"\n[Image error]: {e}"
|
98 |
|
@@ -100,32 +103,36 @@ def codette_terminal(prompt, model_name, generate_image, generate_video, session
|
|
100 |
vid = None
|
101 |
if generate_video and video_enabled:
|
102 |
try:
|
103 |
-
|
|
|
104 |
temp_video_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
|
105 |
-
imageio.mimsave(temp_video_path,
|
106 |
vid = temp_video_path
|
107 |
except Exception as e:
|
108 |
response_so_far += f"\n[Video error]: {e}"
|
109 |
|
110 |
-
yield "\n".join(chat_memory[session_id][-10:]),
|
111 |
|
112 |
# ---------- Gradio UI ----------
|
113 |
with gr.Blocks(title="🧬 Codette Terminal – Streamed AI Chat") as demo:
|
114 |
-
gr.Markdown("## 🧬 Codette Terminal (Chat + Image + Video)")
|
115 |
-
gr.Markdown("Type a prompt
|
116 |
|
117 |
session_id = gr.Textbox(value="session_default", visible=False)
|
118 |
-
model_dropdown = gr.Dropdown(choices=list(AVAILABLE_MODELS.keys()), value="GPT-2 (small, fast)", label="
|
119 |
-
generate_image_toggle = gr.Checkbox(label="
|
120 |
-
generate_video_toggle = gr.Checkbox(label="
|
|
|
|
|
|
|
121 |
user_input = gr.Textbox(label="Your Prompt", placeholder="e.g. A robot dreaming on Mars", lines=1)
|
122 |
output_text = gr.Textbox(label="Codette Output", lines=15, interactive=False)
|
123 |
-
output_image = gr.
|
124 |
output_video = gr.Video(label="Generated Video")
|
125 |
|
126 |
user_input.submit(
|
127 |
codette_terminal,
|
128 |
-
inputs=[user_input, model_dropdown, generate_image_toggle, generate_video_toggle, session_id],
|
129 |
outputs=[output_text, output_image, output_video],
|
130 |
concurrency_limit=1,
|
131 |
queue=True,
|
|
|
20 |
try:
|
21 |
image_generator = DiffusionPipeline.from_pretrained(
|
22 |
"runwayml/stable-diffusion-v1-5",
|
23 |
+
safety_checker=None,
|
24 |
torch_dtype=torch.float16 if device == "cuda" else torch.float32
|
25 |
)
|
26 |
image_generator.to(device)
|
|
|
34 |
try:
|
35 |
video_pipeline = DiffusionPipeline.from_pretrained(
|
36 |
"damo-vilab/text-to-video-ms-1.7b",
|
37 |
+
safety_checker=None,
|
38 |
torch_dtype=torch.float16 if device == "cuda" else torch.float32
|
39 |
)
|
40 |
video_pipeline.to(device)
|
|
|
45 |
video_enabled = False
|
46 |
|
47 |
# ---------- Streamed Response Generator ----------
|
48 |
+
def codette_terminal(prompt, model_name, generate_image, generate_video, session_id, batch_size, video_steps, fps):
|
49 |
if session_id not in chat_memory:
|
50 |
chat_memory[session_id] = []
|
51 |
|
|
|
91 |
chat_memory[session_id].append(f"🧠 Codette > {output}")
|
92 |
|
93 |
# Image Generation
|
94 |
+
imgs = None
|
95 |
if generate_image and image_enabled:
|
96 |
try:
|
97 |
+
result = image_generator(prompt, num_images_per_prompt=batch_size)
|
98 |
+
imgs = result.images
|
99 |
except Exception as e:
|
100 |
response_so_far += f"\n[Image error]: {e}"
|
101 |
|
|
|
103 |
vid = None
|
104 |
if generate_video and video_enabled:
|
105 |
try:
|
106 |
+
result = video_pipeline(prompt, num_inference_steps=video_steps)
|
107 |
+
frames = result.frames
|
108 |
temp_video_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
|
109 |
+
imageio.mimsave(temp_video_path, frames, fps=fps)
|
110 |
vid = temp_video_path
|
111 |
except Exception as e:
|
112 |
response_so_far += f"\n[Video error]: {e}"
|
113 |
|
114 |
+
yield "\n".join(chat_memory[session_id][-10:]), imgs, vid
|
115 |
|
116 |
# ---------- Gradio UI ----------
|
117 |
with gr.Blocks(title="🧬 Codette Terminal – Streamed AI Chat") as demo:
|
118 |
+
gr.Markdown("## 🧬 Codette Terminal (Chat + Image + Video + Batch + NSFW OK)")
|
119 |
+
gr.Markdown("Type a prompt, select your model, and configure generation options. Type `'exit'` to reset.")
|
120 |
|
121 |
session_id = gr.Textbox(value="session_default", visible=False)
|
122 |
+
model_dropdown = gr.Dropdown(choices=list(AVAILABLE_MODELS.keys()), value="GPT-2 (small, fast)", label="Language Model")
|
123 |
+
generate_image_toggle = gr.Checkbox(label="Generate Image(s)?", value=False, interactive=image_enabled)
|
124 |
+
generate_video_toggle = gr.Checkbox(label="Generate Video?", value=False, interactive=video_enabled)
|
125 |
+
batch_size_slider = gr.Slider(label="Number of Images", minimum=1, maximum=4, step=1, value=1)
|
126 |
+
video_steps_slider = gr.Slider(label="Video Inference Steps", minimum=10, maximum=100, step=10, value=50)
|
127 |
+
fps_slider = gr.Slider(label="Video FPS", minimum=4, maximum=24, step=2, value=8)
|
128 |
user_input = gr.Textbox(label="Your Prompt", placeholder="e.g. A robot dreaming on Mars", lines=1)
|
129 |
output_text = gr.Textbox(label="Codette Output", lines=15, interactive=False)
|
130 |
+
output_image = gr.Gallery(label="Generated Image(s)").style(grid=2)
|
131 |
output_video = gr.Video(label="Generated Video")
|
132 |
|
133 |
user_input.submit(
|
134 |
codette_terminal,
|
135 |
+
inputs=[user_input, model_dropdown, generate_image_toggle, generate_video_toggle, session_id, batch_size_slider, video_steps_slider, fps_slider],
|
136 |
outputs=[output_text, output_image, output_video],
|
137 |
concurrency_limit=1,
|
138 |
queue=True,
|