CodetteVision / app.py
Raiff1982's picture
Update app.py
d90bae9 verified
raw
history blame
1.8 kB
from diffusers import DiffusionPipeline
import tempfile
# Load video generation model (you can switch to others)
video_pipeline = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16)
video_pipeline = video_pipeline.to("cuda")
def codette_terminal(prompt, model_name, generate_image, generate_video, session_id):
if session_id not in chat_memory:
chat_memory[session_id] = []
if prompt.lower() in ["exit", "quit"]:
chat_memory[session_id] = []
return "🧠 Codette signing off... Session reset.", None, None
generator = load_text_model(model_name)
response = generator(prompt, max_length=100, num_return_sequences=1, do_sample=True)[0]['generated_text'].strip()
chat_memory[session_id].append(f"πŸ–‹οΈ You > {prompt}")
chat_memory[session_id].append(f"🧠 Codette > {response}")
chat_log = "\n".join(chat_memory[session_id][-10:])
image = None
video = None
if generate_image:
image = image_generator(prompt)[0]['image']
if generate_video:
# Generate video from prompt
video_frames = video_pipeline(prompt, num_inference_steps=50).frames
# Save frames as video
temp_video_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
import imageio
imageio.mimsave(temp_video_path, video_frames, fps=8)
video = temp_video_path
return chat_log, image, video
generate_video_toggle = gr.Checkbox(label="Also generate video?", value=False)
output_video = gr.Video(label="AI-Generated Video")
user_input.submit(
fn=codette_terminal,
inputs=[user_input, model_dropdown, generate_image_toggle, generate_video_toggle, session_id],
outputs=[output_text, output_image, output_video]
)