Bryan Bimantaka (Monash University)
commited on
Commit
·
76938fd
1
Parent(s):
5d515de
add examples
Browse files
app.py
CHANGED
@@ -90,7 +90,7 @@ dataset = Dataset.from_dict(data, features=features)
|
|
90 |
|
91 |
dataset.add_faiss_index(column='embeddings')
|
92 |
|
93 |
-
def retrieve(query, top_k=
|
94 |
query_embedding = ST.encode([query])
|
95 |
scores, retrieved_examples = dataset.get_nearest_examples('embeddings', query_embedding, k=top_k)
|
96 |
|
@@ -100,69 +100,6 @@ def retrieve(query, top_k=3):
|
|
100 |
|
101 |
client = InferenceClient(BASE_MODEL)
|
102 |
|
103 |
-
# def respond(
|
104 |
-
# message,
|
105 |
-
# history: list[tuple[str, str]],
|
106 |
-
# max_tokens=512,
|
107 |
-
# temperature=0.5,
|
108 |
-
# top_p=0.9,
|
109 |
-
# ):
|
110 |
-
# # Retrieve top 3 relevant documents based on the user's query
|
111 |
-
# score, retrieved_docs = retrieve(message, top_k=TOP_K)
|
112 |
-
|
113 |
-
# # print(f"Score: {score}")
|
114 |
-
# if score <= 11:
|
115 |
-
# # Prepare the retrieved context
|
116 |
-
# context = "\n".join([f"{doc}" for i, doc in enumerate(retrieved_docs)])
|
117 |
-
# else:
|
118 |
-
# context = ""
|
119 |
-
|
120 |
-
# print(f"Feed:\n{context}")
|
121 |
-
|
122 |
-
# messages = [{"role": "system", "content": SYS_MSG}]
|
123 |
-
|
124 |
-
# for val in history:
|
125 |
-
# if val[0]:
|
126 |
-
# messages.append({"role": "user", "content": val[0]})
|
127 |
-
# if val[1]:
|
128 |
-
# messages.append({"role": "assistant", "content": val[1]})
|
129 |
-
|
130 |
-
# # messages.append({"role": "user", "content": message})
|
131 |
-
|
132 |
-
# # Append the current user message along with the retrieved context
|
133 |
-
# user_context = f"{message}\nKonteks:\n{context}"
|
134 |
-
# messages.append({"role": "user", "content": user_context})
|
135 |
-
|
136 |
-
# response = ""
|
137 |
-
|
138 |
-
# for message in client.chat_completion(
|
139 |
-
# messages,
|
140 |
-
# max_tokens=max_tokens,
|
141 |
-
# stream=True,
|
142 |
-
# temperature=temperature,
|
143 |
-
# top_p=top_p,
|
144 |
-
# ):
|
145 |
-
# token = message.choices[0].delta.content
|
146 |
-
|
147 |
-
# response += token
|
148 |
-
# yield response
|
149 |
-
|
150 |
-
# """
|
151 |
-
# For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
152 |
-
# """
|
153 |
-
# demo = gr.ChatInterface(
|
154 |
-
# respond,
|
155 |
-
# textbox=gr.Textbox(placeholder="Enter message here", container=False, scale = 7),
|
156 |
-
# chatbot=gr.Chatbot(value=[[None, "Halo namaku Bestie, apa yang perlu kamu ceritakan hari ini?"]]),
|
157 |
-
# examples=[
|
158 |
-
# ["Hello there! How are you doing?"],
|
159 |
-
# ["Can you explain briefly to me what is the Python programming language?"],
|
160 |
-
# ["Explain the plot of Cinderella in a sentence."],
|
161 |
-
# ["How many hours does it take a man to eat a Helicopter?"],
|
162 |
-
# ["Write a 100-word article on 'Benefits of Open-Source in AI research'"]
|
163 |
-
# ],
|
164 |
-
# )
|
165 |
-
|
166 |
examples = [
|
167 |
["Saya mengalami pelecehan seksual."],
|
168 |
["Bantu saya melaporkan pelecehan seksual."],
|
@@ -170,7 +107,6 @@ examples = [
|
|
170 |
["Bagaiamana melaporkan pelecahan seksual yang saya lihat?"],
|
171 |
]
|
172 |
|
173 |
-
# Fungsi respond yang mempertahankan penggunaan RAG dan score boundary
|
174 |
def respond(
|
175 |
message,
|
176 |
history: list[tuple[str, str]],
|
@@ -181,14 +117,15 @@ def respond(
|
|
181 |
# Retrieve top 3 relevant documents based on the user's query
|
182 |
score, retrieved_docs = retrieve(message, top_k=TOP_K)
|
183 |
|
184 |
-
#
|
185 |
-
if score
|
186 |
# Prepare the retrieved context
|
187 |
context = "\n".join([f"{doc}" for i, doc in enumerate(retrieved_docs)])
|
188 |
else:
|
189 |
context = ""
|
|
|
|
|
190 |
|
191 |
-
# Construct the conversation history with context
|
192 |
messages = [{"role": "system", "content": SYS_MSG}]
|
193 |
|
194 |
for val in history:
|
@@ -197,12 +134,14 @@ def respond(
|
|
197 |
if val[1]:
|
198 |
messages.append({"role": "assistant", "content": val[1]})
|
199 |
|
|
|
|
|
200 |
# Append the current user message along with the retrieved context
|
201 |
user_context = f"{message}\nKonteks:\n{context}"
|
202 |
messages.append({"role": "user", "content": user_context})
|
203 |
|
204 |
-
# Generate the response from the language model
|
205 |
response = ""
|
|
|
206 |
for message in client.chat_completion(
|
207 |
messages,
|
208 |
max_tokens=max_tokens,
|
@@ -211,36 +150,19 @@ def respond(
|
|
211 |
top_p=top_p,
|
212 |
):
|
213 |
token = message.choices[0].delta.content
|
214 |
-
response += token
|
215 |
-
|
216 |
-
# Add the response to the history
|
217 |
-
history.append((message, response))
|
218 |
-
return history, history
|
219 |
-
|
220 |
-
# Fungsi untuk mengisi contoh ke dalam kotak teks dan menjalankan respond
|
221 |
-
def example_click(example):
|
222 |
-
return example[0]
|
223 |
-
|
224 |
-
# Membuat interface Gradio dengan examples yang dapat diklik
|
225 |
-
with gr.Blocks() as demo:
|
226 |
-
chatbot = gr.Chatbot(value=[[None, "Halo namaku Bestie, apa yang perlu kamu ceritakan hari ini?"]])
|
227 |
|
228 |
-
|
229 |
-
|
230 |
-
submit_btn = gr.Button("Send")
|
231 |
-
|
232 |
-
# Menampilkan contoh di dalam chatbot
|
233 |
-
examples_box = gr.Examples(
|
234 |
-
examples=examples,
|
235 |
-
inputs=textbox,
|
236 |
-
label="Contoh yang bisa dipilih",
|
237 |
-
)
|
238 |
-
|
239 |
-
# Event tombol submit untuk mengirim pesan
|
240 |
-
submit_btn.click(respond, inputs=[textbox, chatbot], outputs=[chatbot, chatbot])
|
241 |
|
242 |
-
|
243 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
244 |
|
245 |
if __name__ == "__main__":
|
246 |
demo.launch(share=True)
|
|
|
90 |
|
91 |
dataset.add_faiss_index(column='embeddings')
|
92 |
|
93 |
+
def retrieve(query, top_k=1):
|
94 |
query_embedding = ST.encode([query])
|
95 |
scores, retrieved_examples = dataset.get_nearest_examples('embeddings', query_embedding, k=top_k)
|
96 |
|
|
|
100 |
|
101 |
client = InferenceClient(BASE_MODEL)
|
102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
examples = [
|
104 |
["Saya mengalami pelecehan seksual."],
|
105 |
["Bantu saya melaporkan pelecehan seksual."],
|
|
|
107 |
["Bagaiamana melaporkan pelecahan seksual yang saya lihat?"],
|
108 |
]
|
109 |
|
|
|
110 |
def respond(
|
111 |
message,
|
112 |
history: list[tuple[str, str]],
|
|
|
117 |
# Retrieve top 3 relevant documents based on the user's query
|
118 |
score, retrieved_docs = retrieve(message, top_k=TOP_K)
|
119 |
|
120 |
+
# print(f"Score: {score}")
|
121 |
+
if score <= 11:
|
122 |
# Prepare the retrieved context
|
123 |
context = "\n".join([f"{doc}" for i, doc in enumerate(retrieved_docs)])
|
124 |
else:
|
125 |
context = ""
|
126 |
+
|
127 |
+
print(f"Feed:\n{context}")
|
128 |
|
|
|
129 |
messages = [{"role": "system", "content": SYS_MSG}]
|
130 |
|
131 |
for val in history:
|
|
|
134 |
if val[1]:
|
135 |
messages.append({"role": "assistant", "content": val[1]})
|
136 |
|
137 |
+
# messages.append({"role": "user", "content": message})
|
138 |
+
|
139 |
# Append the current user message along with the retrieved context
|
140 |
user_context = f"{message}\nKonteks:\n{context}"
|
141 |
messages.append({"role": "user", "content": user_context})
|
142 |
|
|
|
143 |
response = ""
|
144 |
+
|
145 |
for message in client.chat_completion(
|
146 |
messages,
|
147 |
max_tokens=max_tokens,
|
|
|
150 |
top_p=top_p,
|
151 |
):
|
152 |
token = message.choices[0].delta.content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
|
154 |
+
response += token
|
155 |
+
yield response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
|
157 |
+
"""
|
158 |
+
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
159 |
+
"""
|
160 |
+
demo = gr.ChatInterface(
|
161 |
+
respond,
|
162 |
+
textbox=gr.Textbox(placeholder="Enter message here", container=False, scale = 7),
|
163 |
+
chatbot=gr.Chatbot(value=[[None, "Halo namaku Bestie, apa yang perlu kamu ceritakan hari ini?"]]),
|
164 |
+
examples=examples,
|
165 |
+
)
|
166 |
|
167 |
if __name__ == "__main__":
|
168 |
demo.launch(share=True)
|