Spaces:
Running
Running
Harshil Patel
commited on
Commit
·
7f35293
1
Parent(s):
eb835bc
re add original main.py
Browse files
main.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
import base64
|
5 |
+
from src.manager.manager import GeminiManager, Mode
|
6 |
+
from enum import Enum
|
7 |
+
|
8 |
+
_logo_bytes = open("HASHIRU_LOGO.png", "rb").read()
|
9 |
+
_logo_b64 = base64.b64encode(_logo_bytes).decode()
|
10 |
+
_header_html = f"""
|
11 |
+
<div style="
|
12 |
+
display: flex;
|
13 |
+
flex-direction: row;
|
14 |
+
align-items: center;
|
15 |
+
justify-content: flex-start;
|
16 |
+
width: 30%;
|
17 |
+
">
|
18 |
+
<img src="data:image/png;base64,{_logo_b64}" width="40" class="logo"/>
|
19 |
+
<h1>
|
20 |
+
HASHIRU AI
|
21 |
+
</h1>
|
22 |
+
</div>
|
23 |
+
"""
|
24 |
+
css = """
|
25 |
+
.logo { margin-right: 20px; }
|
26 |
+
"""
|
27 |
+
|
28 |
+
|
29 |
+
def run_model(message, history):
|
30 |
+
if 'text' in message:
|
31 |
+
history.append({
|
32 |
+
"role": "user",
|
33 |
+
"content": message['text']
|
34 |
+
})
|
35 |
+
if 'files' in message:
|
36 |
+
for file in message['files']:
|
37 |
+
history.append({
|
38 |
+
"role": "user",
|
39 |
+
"content": (file,)
|
40 |
+
})
|
41 |
+
yield "", history
|
42 |
+
for messages in model_manager.run(history):
|
43 |
+
yield "", messages
|
44 |
+
|
45 |
+
|
46 |
+
with gr.Blocks(css=css, fill_width=True, fill_height=True) as demo:
|
47 |
+
model_manager = GeminiManager(
|
48 |
+
gemini_model="gemini-2.0-flash", modes=[mode for mode in Mode])
|
49 |
+
|
50 |
+
def update_model(modeIndexes: List[int]):
|
51 |
+
modes = [Mode(i+1) for i in modeIndexes]
|
52 |
+
print(f"Selected modes: {modes}")
|
53 |
+
model_manager.set_modes(modes)
|
54 |
+
|
55 |
+
with gr.Column(scale=1):
|
56 |
+
with gr.Row(scale=0):
|
57 |
+
gr.Markdown(_header_html)
|
58 |
+
model_dropdown = gr.Dropdown(
|
59 |
+
choices=[mode.name for mode in Mode],
|
60 |
+
value=model_manager.get_current_modes,
|
61 |
+
interactive=True,
|
62 |
+
type="index",
|
63 |
+
multiselect=True,
|
64 |
+
label="Select Modes",
|
65 |
+
)
|
66 |
+
|
67 |
+
model_dropdown.change(
|
68 |
+
fn=update_model, inputs=model_dropdown, outputs=[])
|
69 |
+
with gr.Row(scale=1):
|
70 |
+
chatbot = gr.Chatbot(
|
71 |
+
avatar_images=("HASHIRU_2.png", "HASHIRU.png"),
|
72 |
+
type="messages",
|
73 |
+
show_copy_button=True,
|
74 |
+
editable="user",
|
75 |
+
scale=1,
|
76 |
+
render_markdown=True,
|
77 |
+
placeholder="Type your message here...",
|
78 |
+
)
|
79 |
+
gr.ChatInterface(fn=run_model,
|
80 |
+
type="messages",
|
81 |
+
chatbot=chatbot,
|
82 |
+
additional_outputs=[chatbot],
|
83 |
+
save_history=True,
|
84 |
+
editable=True,
|
85 |
+
multimodal=True,)
|
86 |
+
if __name__ == "__main__":
|
87 |
+
demo.launch()
|