Raiff1982 commited on
Commit
f625c4a
·
verified ·
1 Parent(s): f371323

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -20
app.py CHANGED
@@ -5,7 +5,7 @@ from diffusers import DiffusionPipeline
5
  import tempfile
6
  import imageio
7
 
8
- # ---------- Models ----------
9
  AVAILABLE_MODELS = {
10
  "GPT-2 (small, fast)": "gpt2",
11
  "Falcon (TII UAE)": "tiiuae/falcon-7b-instruct",
@@ -14,16 +14,26 @@ AVAILABLE_MODELS = {
14
 
15
  set_seed(42)
16
  text_model_cache = {}
17
- image_generator = pipeline("text-to-image", model="CompVis/stable-diffusion-v1-4")
18
 
19
- # Try loading video model safely
 
 
 
 
 
 
 
 
 
 
20
  try:
21
  video_pipeline = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
 
22
  video_enabled = True
23
  except Exception as e:
24
  video_pipeline = None
25
  video_enabled = False
26
- print(f"[Video model not loaded]: {e}")
27
 
28
  chat_memory = {}
29
 
@@ -36,7 +46,6 @@ def codette_terminal(prompt, model_name, generate_image, generate_video, session
36
  chat_memory[session_id] = []
37
  return "🧠 Codette signing off... Session reset.", None, None
38
 
39
- # Load and run text model
40
  if model_name not in text_model_cache:
41
  text_model_cache[model_name] = pipeline("text-generation", model=AVAILABLE_MODELS[model_name])
42
  generator = text_model_cache[model_name]
@@ -46,33 +55,35 @@ def codette_terminal(prompt, model_name, generate_image, generate_video, session
46
  chat_memory[session_id].append(f"🧠 Codette > {response}")
47
  chat_log = "\n".join(chat_memory[session_id][-10:])
48
 
49
- # Image generation
50
- image = image_generator(prompt)[0]['image'] if generate_image else None
 
 
 
 
51
 
52
- # Video generation (if enabled and requested)
53
- video_path = None
54
  if generate_video and video_enabled:
55
  try:
56
  video_frames = video_pipeline(prompt, num_inference_steps=50).frames
57
  temp_video_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
58
  imageio.mimsave(temp_video_path, video_frames, fps=8)
59
- video_path = temp_video_path
60
- except Exception as ve:
61
- chat_log += f"\n[Video generation error]: {ve}"
62
 
63
- return chat_log, image, video_path
64
 
65
- # ---------- UI ----------
66
- with gr.Blocks(title="Codette Terminal – AI Text + Image + Video") as demo:
67
- gr.Markdown("## 🧬 Codette Terminal (Text + Image + Video, Hugging Face Edition)")
68
- gr.Markdown("Choose a model, type your prompt, and optionally generate an image or video.\nType `'exit'` to reset session.")
69
 
70
  session_id = gr.Textbox(value="session_default", visible=False)
71
  model_dropdown = gr.Dropdown(choices=list(AVAILABLE_MODELS.keys()), value="GPT-2 (small, fast)", label="Choose Language Model")
72
- generate_image_toggle = gr.Checkbox(label="Also generate image?", value=False)
73
  generate_video_toggle = gr.Checkbox(label="Also generate video?", value=False, interactive=video_enabled)
74
-
75
- user_input = gr.Textbox(label="Your Prompt", placeholder="e.g. A dragon flying over Tokyo", lines=1)
76
  output_text = gr.Textbox(label="Codette Output", lines=15, interactive=False)
77
  output_image = gr.Image(label="Generated Image")
78
  output_video = gr.Video(label="Generated Video")
 
5
  import tempfile
6
  import imageio
7
 
8
+ # ---------- Setup ----------
9
  AVAILABLE_MODELS = {
10
  "GPT-2 (small, fast)": "gpt2",
11
  "Falcon (TII UAE)": "tiiuae/falcon-7b-instruct",
 
14
 
15
  set_seed(42)
16
  text_model_cache = {}
 
17
 
18
+ # Load text-to-image model using diffusers (correct API)
19
+ try:
20
+ image_generator = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
21
+ image_generator.to("cpu")
22
+ image_enabled = True
23
+ except Exception as e:
24
+ image_generator = None
25
+ image_enabled = False
26
+ print(f"[Image model error]: {e}")
27
+
28
+ # Load text-to-video model
29
  try:
30
  video_pipeline = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
31
+ video_pipeline.to("cpu")
32
  video_enabled = True
33
  except Exception as e:
34
  video_pipeline = None
35
  video_enabled = False
36
+ print(f"[Video model error]: {e}")
37
 
38
  chat_memory = {}
39
 
 
46
  chat_memory[session_id] = []
47
  return "🧠 Codette signing off... Session reset.", None, None
48
 
 
49
  if model_name not in text_model_cache:
50
  text_model_cache[model_name] = pipeline("text-generation", model=AVAILABLE_MODELS[model_name])
51
  generator = text_model_cache[model_name]
 
55
  chat_memory[session_id].append(f"🧠 Codette > {response}")
56
  chat_log = "\n".join(chat_memory[session_id][-10:])
57
 
58
+ img = None
59
+ if generate_image and image_enabled:
60
+ try:
61
+ img = image_generator(prompt).images[0]
62
+ except Exception as e:
63
+ chat_log += f"\n[Image error]: {e}"
64
 
65
+ vid = None
 
66
  if generate_video and video_enabled:
67
  try:
68
  video_frames = video_pipeline(prompt, num_inference_steps=50).frames
69
  temp_video_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
70
  imageio.mimsave(temp_video_path, video_frames, fps=8)
71
+ vid = temp_video_path
72
+ except Exception as e:
73
+ chat_log += f"\n[Video error]: {e}"
74
 
75
+ return chat_log, img, vid
76
 
77
+ # ---------- Gradio UI ----------
78
+ with gr.Blocks(title="Codette Terminal – Text + Image + Video") as demo:
79
+ gr.Markdown("## 🧬 Codette Terminal (Text + Image + Video, CPU-Friendly)")
80
+ gr.Markdown("Type a prompt and select your model. Optionally generate images or videos. Type `'exit'` to reset session.")
81
 
82
  session_id = gr.Textbox(value="session_default", visible=False)
83
  model_dropdown = gr.Dropdown(choices=list(AVAILABLE_MODELS.keys()), value="GPT-2 (small, fast)", label="Choose Language Model")
84
+ generate_image_toggle = gr.Checkbox(label="Also generate image?", value=False, interactive=image_enabled)
85
  generate_video_toggle = gr.Checkbox(label="Also generate video?", value=False, interactive=video_enabled)
86
+ user_input = gr.Textbox(label="Your Prompt", placeholder="e.g. A robot dreaming on Mars", lines=1)
 
87
  output_text = gr.Textbox(label="Codette Output", lines=15, interactive=False)
88
  output_image = gr.Image(label="Generated Image")
89
  output_video = gr.Video(label="Generated Video")