Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,19 +1,18 @@
|
|
1 |
import gradio as gr
|
2 |
from PIL import Image
|
3 |
from inference.main import MultiModalPhi2
|
|
|
|
|
|
|
4 |
|
5 |
-
# Initialize the chatbot model
|
6 |
multimodal_phi2 = MultiModalPhi2(
|
7 |
-
modelname_or_path="
|
8 |
temperature=0.2,
|
9 |
max_new_tokens=1024,
|
10 |
device="cpu",
|
11 |
)
|
12 |
|
13 |
-
# Initialize chatbot history
|
14 |
-
messages = []
|
15 |
|
16 |
-
# Function to add content to chatbot
|
17 |
def add_content(chatbot, text, image, audio_upload, audio_mic) -> gr.Chatbot:
|
18 |
textflag, imageflag, audioflag = False, False, False
|
19 |
if text not in ["", None]:
|
@@ -31,10 +30,14 @@ def add_content(chatbot, text, image, audio_upload, audio_mic) -> gr.Chatbot:
|
|
31 |
audioflag = True
|
32 |
if not any([textflag, imageflag, audioflag]):
|
33 |
# Raise an error if neither text nor file is provided
|
34 |
-
raise gr.Error("Enter a valid text, image
|
35 |
return chatbot
|
36 |
|
37 |
-
|
|
|
|
|
|
|
|
|
38 |
def run(history, text, image, audio_upload, audio_mic):
|
39 |
if text in [None, ""]:
|
40 |
text = None
|
@@ -56,14 +59,37 @@ def run(history, text, image, audio_upload, audio_mic):
|
|
56 |
|
57 |
history.append((None, outputs.title()))
|
58 |
return history, None, None, None, None
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
with gr.Blocks() as demo:
|
62 |
chatbot = gr.Chatbot(
|
63 |
[],
|
64 |
elem_id="chatbot",
|
65 |
bubble_full_width=False,
|
66 |
-
avatar_images=(None,
|
67 |
)
|
68 |
|
69 |
with gr.Row():
|
@@ -75,18 +101,15 @@ with gr.Blocks() as demo:
|
|
75 |
)
|
76 |
btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
|
77 |
|
78 |
-
txt_msg = txt.submit(add_content, [chatbot, txt], [chatbot, txt], queue=False).then(
|
79 |
-
run, [chatbot, txt, None, None
|
80 |
)
|
81 |
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
|
82 |
-
file_msg = btn.upload(add_content, [chatbot, None, None, btn
|
83 |
-
run, [chatbot, None, None,
|
84 |
)
|
85 |
|
86 |
chatbot.like(print_like_dislike, None, None)
|
87 |
|
88 |
-
|
89 |
-
|
90 |
-
# Launch the Gradio UI
|
91 |
demo.queue()
|
92 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from PIL import Image
|
3 |
from inference.main import MultiModalPhi2
|
4 |
+
import os
|
5 |
+
|
6 |
+
messages = []
|
7 |
|
|
|
8 |
multimodal_phi2 = MultiModalPhi2(
|
9 |
+
modelname_or_path="RaviNaik/Llava-Phi2",
|
10 |
temperature=0.2,
|
11 |
max_new_tokens=1024,
|
12 |
device="cpu",
|
13 |
)
|
14 |
|
|
|
|
|
15 |
|
|
|
16 |
def add_content(chatbot, text, image, audio_upload, audio_mic) -> gr.Chatbot:
|
17 |
textflag, imageflag, audioflag = False, False, False
|
18 |
if text not in ["", None]:
|
|
|
30 |
audioflag = True
|
31 |
if not any([textflag, imageflag, audioflag]):
|
32 |
# Raise an error if neither text nor file is provided
|
33 |
+
raise gr.Error("Enter a valid text, image or audio")
|
34 |
return chatbot
|
35 |
|
36 |
+
|
37 |
+
def clear_data():
|
38 |
+
return {"text": None, "image": None, "audio_upload": None, "audio_mic": None, "chatbot": []}
|
39 |
+
|
40 |
+
|
41 |
def run(history, text, image, audio_upload, audio_mic):
|
42 |
if text in [None, ""]:
|
43 |
text = None
|
|
|
59 |
|
60 |
history.append((None, outputs.title()))
|
61 |
return history, None, None, None, None
|
62 |
+
|
63 |
+
|
64 |
+
def print_like_dislike(x: gr.LikeData):
|
65 |
+
print(x.index, x.value, x.liked)
|
66 |
+
|
67 |
+
|
68 |
+
def add_text(history, text):
|
69 |
+
history = history + [(text, None)]
|
70 |
+
return history, gr.Textbox(value="", interactive=False)
|
71 |
+
|
72 |
+
|
73 |
+
def add_file(history, file):
|
74 |
+
history = history + [((file.name,), None)]
|
75 |
+
return history
|
76 |
+
|
77 |
+
|
78 |
+
def bot(history):
|
79 |
+
response = "**That's cool!**"
|
80 |
+
history[-1][1] = ""
|
81 |
+
for character in response:
|
82 |
+
history[-1][1] += character
|
83 |
+
time.sleep(0.05)
|
84 |
+
yield history
|
85 |
+
|
86 |
+
|
87 |
with gr.Blocks() as demo:
|
88 |
chatbot = gr.Chatbot(
|
89 |
[],
|
90 |
elem_id="chatbot",
|
91 |
bubble_full_width=False,
|
92 |
+
avatar_images=(None, (os.path.join(os.path.dirname(__file__), "avatar.png"))),
|
93 |
)
|
94 |
|
95 |
with gr.Row():
|
|
|
101 |
)
|
102 |
btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
|
103 |
|
104 |
+
txt_msg = txt.submit(add_content, [chatbot, txt, None, None], [chatbot, txt, None, None], queue=False).then(
|
105 |
+
run, [chatbot, txt, None, None], [chatbot, txt, None, None], api_name="bot_response"
|
106 |
)
|
107 |
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
|
108 |
+
file_msg = btn.upload(add_content, [chatbot, None, None, btn], [chatbot, None, None, None], queue=False).then(
|
109 |
+
run, [chatbot, None, None, btn], [chatbot, None, None, None]
|
110 |
)
|
111 |
|
112 |
chatbot.like(print_like_dislike, None, None)
|
113 |
|
|
|
|
|
|
|
114 |
demo.queue()
|
115 |
demo.launch()
|