Raiff1982 commited on
Commit
919ef54
Β·
verified Β·
1 Parent(s): d81d3c2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -32
app.py CHANGED
@@ -1,42 +1,42 @@
1
- # This is a Gradio app that provides a text-based chat interface with optional image and video generation.
2
  import gradio as gr
3
  import numpy as np
4
  import tempfile
5
  import imageio
 
 
 
6
 
7
- # ---------- Setup ----------
8
  AVAILABLE_MODELS = {
9
  "GPT-2 (small, fast)": "gpt2",
10
  "Falcon (TII UAE)": "tiiuae/falcon-7b-instruct",
11
  "Mistral (OpenAccess)": "mistralai/Mistral-7B-v0.1"
12
  }
13
 
14
- # Load text-to-image model using diffusers (correct API)
 
 
 
 
15
  try:
16
- from diffusers import DiffusionPipeline
17
- image_generator = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
18
- image_generator.to("cpu")
19
  image_enabled = True
20
- except ImportError:
 
21
  image_generator = None
22
  image_enabled = False
23
- print("[Image model error]: diffusers library not found")
24
 
25
- # Load text-to-video model
26
  try:
27
- from diffusers import DiffusionPipeline
28
- video_pipeline = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
29
- video_pipeline.to("cpu")
30
  video_enabled = True
31
- except ImportError:
 
32
  video_pipeline = None
33
  video_enabled = False
34
- print("[Video model error]: diffusers library not found")
35
-
36
- text_model_cache = {}
37
- chat_memory = {}
38
 
39
- # ---------- Core Function ----------
40
  def codette_terminal(prompt, model_name, generate_image, generate_video, session_id):
41
  if session_id not in chat_memory:
42
  chat_memory[session_id] = []
@@ -45,18 +45,24 @@ def codette_terminal(prompt, model_name, generate_image, generate_video, session
45
  chat_memory[session_id] = []
46
  return "🧠 Codette signing off... Session reset.", None, None
47
 
48
- # Load the text generation model if it's not already in the cache
49
  if model_name not in text_model_cache:
50
- text_model_cache[model_name] = gr.pipelines.TextGeneration(model=AVAILABLE_MODELS[model_name])
 
 
 
 
51
  generator = text_model_cache[model_name]
52
- response = generator(prompt, max_length=100, num_return_sequences=1, do_sample=True)[0]['generated_text'].strip()
 
 
 
 
53
 
54
- # Update the chat log
55
  chat_memory[session_id].append(f"πŸ–‹οΈ You > {prompt}")
56
  chat_memory[session_id].append(f"🧠 Codette > {response}")
57
  chat_log = "\n".join(chat_memory[session_id][-10:])
58
 
59
- # Generate image if requested and image generation is enabled
60
  img = None
61
  if generate_image and image_enabled:
62
  try:
@@ -64,7 +70,6 @@ def codette_terminal(prompt, model_name, generate_image, generate_video, session
64
  except Exception as e:
65
  chat_log += f"\n[Image error]: {e}"
66
 
67
- # Generate video if requested and video generation is enabled
68
  vid = None
69
  if generate_video and video_enabled:
70
  try:
@@ -77,13 +82,11 @@ def codette_terminal(prompt, model_name, generate_image, generate_video, session
77
 
78
  return chat_log, img, vid
79
 
80
- # ---------- Gradio UI ----------
81
- with gr.Blocks(title="Codette Terminal – Text + Image + Video") as demo:
82
- gr.Markdown("## 🧬 Codette Terminal (Text + Image + Video, CPU-Friendly)")
83
- gr.Markdown("Type a prompt and select your model. Optionally generate images or videos. Type `'exit'` to reset session.")
84
-
85
  session_id = gr.Textbox(value="session_default", visible=False)
86
- model_dropdown = gr.Dropdown(choices=list(AVAILABLE_MODELS.keys()), value="GPT-2 (small, fast)", label="Choose Language Model")
87
  generate_image_toggle = gr.Checkbox(label="Also generate image?", value=False, interactive=image_enabled)
88
  generate_video_toggle = gr.Checkbox(label="Also generate video?", value=False, interactive=video_enabled)
89
  user_input = gr.Textbox(label="Your Prompt", placeholder="e.g. A robot dreaming on Mars", lines=1)
@@ -92,10 +95,10 @@ with gr.Blocks(title="Codette Terminal – Text + Image + Video") as demo:
92
  output_video = gr.Video(label="Generated Video")
93
 
94
  user_input.submit(
95
- fn=codette_terminal,
96
  inputs=[user_input, model_dropdown, generate_image_toggle, generate_video_toggle, session_id],
97
  outputs=[output_text, output_image, output_video]
98
  )
99
 
100
  if __name__ == "__main__":
101
- demo.launch(show_error=True)
 
 
1
  import gradio as gr
2
  import numpy as np
3
  import tempfile
4
  import imageio
5
+ import torch
6
+ from transformers import pipeline
7
+ from diffusers import DiffusionPipeline
8
 
9
+ # ---------- Settings ----------
10
  AVAILABLE_MODELS = {
11
  "GPT-2 (small, fast)": "gpt2",
12
  "Falcon (TII UAE)": "tiiuae/falcon-7b-instruct",
13
  "Mistral (OpenAccess)": "mistralai/Mistral-7B-v0.1"
14
  }
15
 
16
+ device = "cuda" if torch.cuda.is_available() else "cpu"
17
+ text_model_cache = {}
18
+ chat_memory = {}
19
+
20
+ # ---------- Load Optional Models ----------
21
  try:
22
+ image_generator = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 if device == "cuda" else torch.float32)
23
+ image_generator.to(device)
 
24
  image_enabled = True
25
+ except Exception as e:
26
+ print(f"[Image Load Error]: {e}")
27
  image_generator = None
28
  image_enabled = False
 
29
 
 
30
  try:
31
+ video_pipeline = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16 if device == "cuda" else torch.float32)
32
+ video_pipeline.to(device)
 
33
  video_enabled = True
34
+ except Exception as e:
35
+ print(f"[Video Load Error]: {e}")
36
  video_pipeline = None
37
  video_enabled = False
 
 
 
 
38
 
39
+ # ---------- Core Logic ----------
40
  def codette_terminal(prompt, model_name, generate_image, generate_video, session_id):
41
  if session_id not in chat_memory:
42
  chat_memory[session_id] = []
 
45
  chat_memory[session_id] = []
46
  return "🧠 Codette signing off... Session reset.", None, None
47
 
48
+ # Load text model if not cached
49
  if model_name not in text_model_cache:
50
+ try:
51
+ text_model_cache[model_name] = pipeline("text-generation", model=AVAILABLE_MODELS[model_name], device=0 if device == "cuda" else -1)
52
+ except Exception as e:
53
+ return f"[Text model error]: {e}", None, None
54
+
55
  generator = text_model_cache[model_name]
56
+ try:
57
+ output = generator(prompt, max_length=100, do_sample=True, num_return_sequences=1)
58
+ response = output[0]['generated_text'].strip()
59
+ except Exception as e:
60
+ response = f"[Text generation error]: {e}"
61
 
 
62
  chat_memory[session_id].append(f"πŸ–‹οΈ You > {prompt}")
63
  chat_memory[session_id].append(f"🧠 Codette > {response}")
64
  chat_log = "\n".join(chat_memory[session_id][-10:])
65
 
 
66
  img = None
67
  if generate_image and image_enabled:
68
  try:
 
70
  except Exception as e:
71
  chat_log += f"\n[Image error]: {e}"
72
 
 
73
  vid = None
74
  if generate_video and video_enabled:
75
  try:
 
82
 
83
  return chat_log, img, vid
84
 
85
+ # ---------- Gradio App ----------
86
+ with gr.Blocks(title="Codette Terminal (Hugging Face Edition)") as demo:
87
+ gr.Markdown("## 🧬 Codette Terminal\nA text + image + video AI powered by Hugging Face + Gradio. Type `'exit'` to reset the session.")
 
 
88
  session_id = gr.Textbox(value="session_default", visible=False)
89
+ model_dropdown = gr.Dropdown(choices=list(AVAILABLE_MODELS.keys()), value="GPT-2 (small, fast)", label="Choose a Language Model")
90
  generate_image_toggle = gr.Checkbox(label="Also generate image?", value=False, interactive=image_enabled)
91
  generate_video_toggle = gr.Checkbox(label="Also generate video?", value=False, interactive=video_enabled)
92
  user_input = gr.Textbox(label="Your Prompt", placeholder="e.g. A robot dreaming on Mars", lines=1)
 
95
  output_video = gr.Video(label="Generated Video")
96
 
97
  user_input.submit(
98
+ codette_terminal,
99
  inputs=[user_input, model_dropdown, generate_image_toggle, generate_video_toggle, session_id],
100
  outputs=[output_text, output_image, output_video]
101
  )
102
 
103
  if __name__ == "__main__":
104
+ demo.launch()