File size: 1,345 Bytes
c09e9ae
e513d6a
c09e9ae
e513d6a
 
 
c09e9ae
e513d6a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import gradio as gr
from transformers import pipeline, set_seed

# Load Hugging Face model (adjust as needed)
generator = pipeline("text-generation", model="gpt2")
set_seed(42)

chat_history = []

# Text generation function
def codette_terminal(user_input):
    global chat_history
    if user_input.lower() in ["exit", "quit"]:
        chat_history = []
        return "🧠 Codette signing off. Type again to restart."

    output = generator(user_input, max_length=100, num_return_sequences=1)
    response = output[0]['generated_text'].strip()

    # Update terminal-style chat log
    chat_history.append(f"🖋️ You > {user_input}")
    chat_history.append(f"🧠 Codette > {response}")
    return "\n".join(chat_history[-10:])  # Keep last 10 entries for brevity

# Gradio Interface
with gr.Blocks(title="Codette Terminal") as demo:
    gr.Markdown("## 🧬 Codette Terminal Interface (Hugging Face Edition)")
    gr.Markdown("Type your message below. Type `'exit'` to reset conversation.\n")

    with gr.Row():
        input_box = gr.Textbox(label="Your input", placeholder="Ask me anything...", lines=1)
    output_box = gr.Textbox(label="Codette Output", lines=15, interactive=False)

    input_box.submit(fn=codette_terminal, inputs=input_box, outputs=output_box)

# Launch in HF Space
if __name__ == "__main__":
    demo.launch()