Raiff1982 commited on
Commit
0c5a382
·
verified ·
1 Parent(s): 15c06cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -48
app.py CHANGED
@@ -1,53 +1,98 @@
1
- # This Gradio app allows users to interact with a chatbot that can generate text and images based on user prompts.
2
  import gradio as gr
3
- import numpy as np
4
- from transformers_js import pipeline # Corrected import to use transformers_js instead of transformers_js_py
 
 
 
5
 
6
- # Define the available models
7
  AVAILABLE_MODELS = {
8
- "GPT-2": "gpt2",
9
- "DALL-E": "dalle-mini/dalle-mini-1.3B"
 
10
  }
11
 
12
- # Initialize the text generation pipeline
13
- text_generator = pipeline("text-generation", model=AVAILABLE_MODELS["GPT-2"])
14
-
15
- # Initialize the image generation pipeline
16
- image_generator = pipeline("image-generation", model=AVAILABLE_MODELS["DALL-E"])
17
-
18
- # Function to generate text
19
- def generate_text(prompt, model):
20
- np.random.seed(42) # Set a seed for reproducibility
21
- if model == "GPT-2":
22
- return text_generator(prompt, max_length=50, num_return_sequences=1)[0]['generated_text']
23
- else:
24
- return "Model not supported for text generation"
25
-
26
- # Function to generate images
27
- def generate_image(prompt, model):
28
- if model == "DALL-E":
29
- image = image_generator(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
30
- return image
31
- else:
32
- return "Model not supported for image generation"
33
-
34
- # Create the Gradio interface
35
- with gr.Blocks() as demo:
36
- gr.Markdown("# Chatbot with Text and Image Generation")
37
-
38
- with gr.Tab("Text Generation"):
39
- text_prompt = gr.Textbox(label="Enter your text prompt")
40
- text_model = gr.Radio(choices=list(AVAILABLE_MODELS.keys()), label="Choose a model", value="GPT-2")
41
- text_output = gr.Textbox(label="Generated Text")
42
- text_button = gr.Button("Generate Text")
43
- text_button.click(generate_text, inputs=[text_prompt, text_model], outputs=text_output)
44
-
45
- with gr.Tab("Image Generation"):
46
- image_prompt = gr.Textbox(label="Enter your image prompt")
47
- image_model = gr.Radio(choices=list(AVAILABLE_MODELS.keys()), label="Choose a model", value="DALL-E")
48
- image_output = gr.Image(label="Generated Image")
49
- image_button = gr.Button("Generate Image")
50
- image_button.click(generate_image, inputs=[image_prompt, image_model], outputs=image_output)
51
-
52
- # Launch the interface
53
- demo.launch(show_error=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import pipeline, set_seed
4
+ from diffusers import DiffusionPipeline
5
+ import tempfile
6
+ import imageio
7
 
8
+ # ---------- Setup ----------
9
  AVAILABLE_MODELS = {
10
+ "GPT-2 (small, fast)": "gpt2",
11
+ "Falcon (TII UAE)": "tiiuae/falcon-7b-instruct",
12
+ "Mistral (OpenAccess)": "mistralai/Mistral-7B-v0.1"
13
  }
14
 
15
+ set_seed(42)
16
+ text_model_cache = {}
17
+
18
+ # Load text-to-image model using diffusers (correct API)
19
+ try:
20
+ image_generator = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
21
+ image_generator.to("cpu")
22
+ image_enabled = True
23
+ except Exception as e:
24
+ image_generator = None
25
+ image_enabled = False
26
+ print(f"[Image model error]: {e}")
27
+
28
+ # Load text-to-video model
29
+ try:
30
+ video_pipeline = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b")
31
+ video_pipeline.to("cpu")
32
+ video_enabled = True
33
+ except Exception as e:
34
+ video_pipeline = None
35
+ video_enabled = False
36
+ print(f"[Video model error]: {e}")
37
+
38
+ chat_memory = {}
39
+
40
+ # ---------- Core Function ----------
41
+ def codette_terminal(prompt, model_name, generate_image, generate_video, session_id):
42
+ if session_id not in chat_memory:
43
+ chat_memory[session_id] = []
44
+
45
+ if prompt.lower() in ["exit", "quit"]:
46
+ chat_memory[session_id] = []
47
+ return "🧠 Codette signing off... Session reset.", None, None
48
+
49
+ if model_name not in text_model_cache:
50
+ text_model_cache[model_name] = pipeline("text-generation", model=AVAILABLE_MODELS[model_name])
51
+ generator = text_model_cache[model_name]
52
+ response = generator(prompt, max_length=100, num_return_sequences=1, do_sample=True)[0]['generated_text'].strip()
53
+
54
+ chat_memory[session_id].append(f"🖋️ You > {prompt}")
55
+ chat_memory[session_id].append(f"🧠 Codette > {response}")
56
+ chat_log = "\n".join(chat_memory[session_id][-10:])
57
+
58
+ img = None
59
+ if generate_image and image_enabled:
60
+ try:
61
+ img = image_generator(prompt).images[0]
62
+ except Exception as e:
63
+ chat_log += f"\n[Image error]: {e}"
64
+
65
+ vid = None
66
+ if generate_video and video_enabled:
67
+ try:
68
+ video_frames = video_pipeline(prompt, num_inference_steps=50).frames
69
+ temp_video_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
70
+ imageio.mimsave(temp_video_path, video_frames, fps=8)
71
+ vid = temp_video_path
72
+ except Exception as e:
73
+ chat_log += f"\n[Video error]: {e}"
74
+
75
+ return chat_log, img, vid
76
+
77
+ # ---------- Gradio UI ----------
78
+ with gr.Blocks(title="Codette Terminal – Text + Image + Video") as demo:
79
+ gr.Markdown("## 🧬 Codette Terminal (Text + Image + Video, CPU-Friendly)")
80
+ gr.Markdown("Type a prompt and select your model. Optionally generate images or videos. Type `'exit'` to reset session.")
81
+
82
+ session_id = gr.Textbox(value="session_default", visible=False)
83
+ model_dropdown = gr.Dropdown(choices=list(AVAILABLE_MODELS.keys()), value="GPT-2 (small, fast)", label="Choose Language Model")
84
+ generate_image_toggle = gr.Checkbox(label="Also generate image?", value=False, interactive=image_enabled)
85
+ generate_video_toggle = gr.Checkbox(label="Also generate video?", value=False, interactive=video_enabled)
86
+ user_input = gr.Textbox(label="Your Prompt", placeholder="e.g. A robot dreaming on Mars", lines=1)
87
+ output_text = gr.Textbox(label="Codette Output", lines=15, interactive=False)
88
+ output_image = gr.Image(label="Generated Image")
89
+ output_video = gr.Video(label="Generated Video")
90
+
91
+ user_input.submit(
92
+ fn=codette_terminal,
93
+ inputs=[user_input, model_dropdown, generate_image_toggle, generate_video_toggle, session_id],
94
+ outputs=[output_text, output_image, output_video]
95
+ )
96
+
97
+ if __name__ == "__main__":
98
+ demo.launch()