Raiff1982 commited on
Commit
28a2cfa
·
verified ·
1 Parent(s): f171e2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -23
app.py CHANGED
@@ -1,38 +1,56 @@
1
  import gradio as gr
2
  from transformers import pipeline, set_seed
3
 
4
- # Load Hugging Face model (adjust as needed)
5
- generator = pipeline("text-generation", model="gpt2")
 
 
 
 
 
6
  set_seed(42)
7
 
8
- chat_history = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- # Text generation function
11
- def codette_terminal(user_input):
12
- global chat_history
13
- if user_input.lower() in ["exit", "quit"]:
14
- chat_history = []
15
- return "🧠 Codette signing off. Type again to restart."
16
 
17
- output = generator(user_input, max_length=100, num_return_sequences=1)
 
18
  response = output[0]['generated_text'].strip()
19
 
20
- # Update terminal-style chat log
21
- chat_history.append(f"🖋️ You > {user_input}")
22
- chat_history.append(f"🧠 Codette > {response}")
23
- return "\n".join(chat_history[-10:]) # Keep last 10 entries for brevity
24
 
25
- # Gradio Interface
26
- with gr.Blocks(title="Codette Terminal") as demo:
27
- gr.Markdown("## 🧬 Codette Terminal Interface (Hugging Face Edition)")
28
- gr.Markdown("Type your message below. Type `'exit'` to reset conversation.\n")
29
 
30
- with gr.Row():
31
- input_box = gr.Textbox(label="Your input", placeholder="Ask me anything...", lines=1)
32
- output_box = gr.Textbox(label="Codette Output", lines=15, interactive=False)
 
33
 
34
- input_box.submit(fn=codette_terminal, inputs=input_box, outputs=output_box)
35
 
36
- # Launch in HF Space
37
  if __name__ == "__main__":
38
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline, set_seed
3
 
4
+ # Preload models into a dictionary for selection
5
+ AVAILABLE_MODELS = {
6
+ "GPT-2 (small, fast)": "gpt2",
7
+ "Falcon (TII UAE)": "tiiuae/falcon-7b-instruct",
8
+ "Mistral (OpenAccess)": "mistralai/Mistral-7B-v0.1"
9
+ }
10
+
11
  set_seed(42)
12
 
13
+ # Cache loaded models to avoid reloading
14
+ model_cache = {}
15
+
16
+ def load_model(model_name):
17
+ if model_name not in model_cache:
18
+ model_id = AVAILABLE_MODELS[model_name]
19
+ model_cache[model_name] = pipeline("text-generation", model=model_id)
20
+ return model_cache[model_name]
21
+
22
+ # Chat history buffer
23
+ chat_memory = {}
24
+
25
+ # Terminal response function
26
+ def codette_terminal(user_input, model_name, session_id):
27
+ if session_id not in chat_memory:
28
+ chat_memory[session_id] = []
29
 
30
+ if user_input.lower() in ['exit', 'quit']:
31
+ chat_memory[session_id] = []
32
+ return "🧠 Codette signing off... Session reset."
 
 
 
33
 
34
+ generator = load_model(model_name)
35
+ output = generator(user_input, max_length=100, num_return_sequences=1, do_sample=True)
36
  response = output[0]['generated_text'].strip()
37
 
38
+ chat_memory[session_id].append(f"🖋️ You > {user_input}")
39
+ chat_memory[session_id].append(f"🧠 Codette > {response}")
40
+ return "\n".join(chat_memory[session_id][-10:]) # limit memory output
 
41
 
42
+ # UI
43
+ with gr.Blocks(title="Codette Terminal - Multi-Model AI") as demo:
44
+ gr.Markdown("## 🧬 Codette Terminal (Multi-Model, Hugging Face Edition)")
45
+ gr.Markdown("Type something below. Use the dropdown to switch models. Type `'exit'` to reset session.")
46
 
47
+ session_id = gr.Textbox(label="Session ID (hidden)", value="session_default", visible=False)
48
+ model_select = gr.Dropdown(choices=list(AVAILABLE_MODELS.keys()), value="GPT-2 (small, fast)", label="Choose AI Model")
49
+ user_input = gr.Textbox(label="Your Prompt", placeholder="Ask me anything...")
50
+ output_box = gr.Textbox(label="Terminal Output", lines=20, interactive=False)
51
 
52
+ user_input.submit(fn=codette_terminal, inputs=[user_input, model_select, session_id], outputs=output_box)
53
 
54
+ # For Hugging Face Spaces
55
  if __name__ == "__main__":
56
  demo.launch()