Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,8 +2,6 @@ import gradio as gr
|
|
2 |
from PIL import Image
|
3 |
from inference.main import MultiModalPhi2
|
4 |
|
5 |
-
messages = []
|
6 |
-
|
7 |
multimodal_phi2 = MultiModalPhi2(
|
8 |
modelname_or_path="Navyabhat/Llava-Phi2",
|
9 |
temperature=0.2,
|
@@ -11,6 +9,12 @@ multimodal_phi2 = MultiModalPhi2(
|
|
11 |
device="cpu",
|
12 |
)
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
def add_content(chatbot, text, image, audio_upload, audio_mic) -> gr.Chatbot:
|
15 |
textflag, imageflag, audioflag = False, False, False
|
16 |
if text not in ["", None]:
|
@@ -58,18 +62,18 @@ def run(history, text, image, audio_upload, audio_mic):
|
|
58 |
history.append((None, outputs.title()))
|
59 |
return history, None, None, None, None
|
60 |
|
61 |
-
with gr.Blocks() as demo:
|
62 |
gr.Markdown("## 🤖 Multi-modal LLM")
|
63 |
gr.Markdown("This is a multi-modal LLM that takes text, image and audio as inputs.")
|
64 |
|
65 |
with gr.Row():
|
66 |
with gr.Column(scale=4):
|
67 |
-
|
68 |
with gr.Box():
|
69 |
with gr.Row():
|
70 |
# Adding image
|
71 |
image = gr.Image(type="filepath", value=None)
|
72 |
-
|
73 |
with gr.Row():
|
74 |
# Add audio
|
75 |
audio_upload = gr.Audio(source="upload", type="filepath")
|
@@ -86,7 +90,7 @@ with gr.Blocks() as demo:
|
|
86 |
)
|
87 |
|
88 |
with gr.Row():
|
89 |
-
|
90 |
prompt = gr.Textbox(
|
91 |
placeholder="Ask anything", lines=2, label="Query", value=None, scale=4
|
92 |
)
|
|
|
2 |
from PIL import Image
|
3 |
from inference.main import MultiModalPhi2
|
4 |
|
|
|
|
|
5 |
multimodal_phi2 = MultiModalPhi2(
|
6 |
modelname_or_path="Navyabhat/Llava-Phi2",
|
7 |
temperature=0.2,
|
|
|
9 |
device="cpu",
|
10 |
)
|
11 |
|
12 |
+
theme = gr.themes.Default(primary_hue="blue", font=[gr.themes.GoogleFont("Inconsolata"), "Arial", "sans-serif"]).set(
|
13 |
+
loader_color="#FF0000",
|
14 |
+
button_primary_background_fill="*primary_200",
|
15 |
+
button_primary_background_fill_hover="*primary_300",
|
16 |
+
)
|
17 |
+
|
18 |
def add_content(chatbot, text, image, audio_upload, audio_mic) -> gr.Chatbot:
|
19 |
textflag, imageflag, audioflag = False, False, False
|
20 |
if text not in ["", None]:
|
|
|
62 |
history.append((None, outputs.title()))
|
63 |
return history, None, None, None, None
|
64 |
|
65 |
+
with gr.Blocks(theme=theme) as demo:
|
66 |
gr.Markdown("## 🤖 Multi-modal LLM")
|
67 |
gr.Markdown("This is a multi-modal LLM that takes text, image and audio as inputs.")
|
68 |
|
69 |
with gr.Row():
|
70 |
with gr.Column(scale=4):
|
71 |
+
|
72 |
with gr.Box():
|
73 |
with gr.Row():
|
74 |
# Adding image
|
75 |
image = gr.Image(type="filepath", value=None)
|
76 |
+
|
77 |
with gr.Row():
|
78 |
# Add audio
|
79 |
audio_upload = gr.Audio(source="upload", type="filepath")
|
|
|
90 |
)
|
91 |
|
92 |
with gr.Row():
|
93 |
+
|
94 |
prompt = gr.Textbox(
|
95 |
placeholder="Ask anything", lines=2, label="Query", value=None, scale=4
|
96 |
)
|