Commit
Β·
f4bb2ee
1
Parent(s):
43f39e7
update app.py
Browse files- app.py +71 -57
- requirements.txt +1 -1
app.py
CHANGED
@@ -2,6 +2,8 @@ import os
|
|
2 |
import json
|
3 |
import re
|
4 |
import hashlib
|
|
|
|
|
5 |
from functools import partial
|
6 |
from collections import defaultdict
|
7 |
from pathlib import Path
|
@@ -27,7 +29,7 @@ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
|
27 |
#load_dotenv(dotenv_path)
|
28 |
#api_key = os.getenv("NVIDIA_API_KEY")
|
29 |
#os.environ["NVIDIA_API_KEY"] = api_key
|
30 |
-
|
31 |
api_key = os.environ.get("NVIDIA_API_KEY")
|
32 |
if not api_key:
|
33 |
raise RuntimeError("π¨ NVIDIA_API_KEY not found in environment! Please add it in Hugging Face Secrets.")
|
@@ -326,91 +328,103 @@ answer_chain = (
|
|
326 |
# Full Pipeline
|
327 |
full_pipeline = hybrid_chain | RunnableAssign({"validation": validation_chain}) | answer_chain
|
328 |
|
329 |
-
import gradio as gr
|
330 |
-
import time
|
331 |
|
332 |
def chat_interface(message, history):
|
333 |
-
|
334 |
-
if isinstance(message, list) and len(message) > 0:
|
335 |
-
if isinstance(message[-1], dict):
|
336 |
-
user_input = message[-1].get("content", "")
|
337 |
-
else:
|
338 |
-
user_input = message[-1]
|
339 |
-
else:
|
340 |
-
user_input = str(message)
|
341 |
-
|
342 |
-
# Prepare inputs for pipeline
|
343 |
-
inputs = {
|
344 |
-
"query": user_input,
|
345 |
-
"all_queries": [user_input],
|
346 |
-
"all_texts": all_chunks,
|
347 |
-
"k_per_query": 3,
|
348 |
-
"alpha": 0.7,
|
349 |
-
"vectorstore": vectorstore,
|
350 |
-
"full_document": "",
|
351 |
-
}
|
352 |
-
|
353 |
-
response = ""
|
354 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
355 |
for chunk in full_pipeline.stream(inputs):
|
356 |
if isinstance(chunk, str):
|
357 |
response += chunk
|
358 |
elif isinstance(chunk, dict) and "answer" in chunk:
|
359 |
response += chunk["answer"]
|
360 |
-
|
361 |
-
# Yield simpler format
|
362 |
yield response
|
363 |
except Exception as e:
|
364 |
-
|
365 |
-
yield response
|
366 |
|
|
|
367 |
with gr.Blocks(css="""
|
368 |
-
html, body, .gradio-container {
|
369 |
-
height: 100%;
|
370 |
-
margin: 0;
|
371 |
-
padding: 0;
|
372 |
-
}
|
373 |
.gradio-container {
|
374 |
width: 90%;
|
375 |
max-width: 1000px;
|
376 |
margin: 0 auto;
|
377 |
padding: 1rem;
|
378 |
}
|
379 |
-
|
380 |
.chatbox-container {
|
381 |
display: flex;
|
382 |
flex-direction: column;
|
383 |
-
height:
|
384 |
}
|
385 |
-
|
386 |
.chatbot {
|
387 |
flex: 1;
|
388 |
overflow-y: auto;
|
389 |
min-height: 500px;
|
390 |
}
|
391 |
-
|
392 |
.textbox {
|
393 |
margin-top: 1rem;
|
394 |
}
|
395 |
-
#component-523 {
|
396 |
-
height: 98%;
|
397 |
-
}
|
398 |
""") as demo:
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
403 |
def respond(message, chat_history):
|
|
|
404 |
bot_message = ""
|
405 |
for chunk in chat_interface(message, chat_history):
|
406 |
bot_message = chunk
|
407 |
-
|
|
|
|
|
|
|
|
|
408 |
yield chat_history
|
409 |
|
|
|
|
|
|
|
|
|
410 |
msg.submit(
|
411 |
-
|
412 |
-
[msg, chatbot],
|
413 |
-
[chatbot],
|
414 |
queue=False
|
415 |
).then(
|
416 |
respond,
|
@@ -418,14 +432,14 @@ with gr.Blocks(css="""
|
|
418 |
[chatbot]
|
419 |
)
|
420 |
|
421 |
-
|
422 |
-
examples=[
|
423 |
-
"What are Krishna's research interests?",
|
424 |
-
"Where did Krishna work?",
|
425 |
-
"What did he study at Virginia Tech?",
|
426 |
-
],
|
427 |
-
inputs=msg
|
428 |
-
)
|
429 |
|
430 |
if __name__ == "__main__":
|
431 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import json
|
3 |
import re
|
4 |
import hashlib
|
5 |
+
import gradio as gr
|
6 |
+
import time
|
7 |
from functools import partial
|
8 |
from collections import defaultdict
|
9 |
from pathlib import Path
|
|
|
29 |
#load_dotenv(dotenv_path)
|
30 |
#api_key = os.getenv("NVIDIA_API_KEY")
|
31 |
#os.environ["NVIDIA_API_KEY"] = api_key
|
32 |
+
load_dotenv()
|
33 |
api_key = os.environ.get("NVIDIA_API_KEY")
|
34 |
if not api_key:
|
35 |
raise RuntimeError("π¨ NVIDIA_API_KEY not found in environment! Please add it in Hugging Face Secrets.")
|
|
|
328 |
# Full Pipeline
|
329 |
full_pipeline = hybrid_chain | RunnableAssign({"validation": validation_chain}) | answer_chain
|
330 |
|
|
|
|
|
331 |
|
332 |
def chat_interface(message, history):
|
333 |
+
"""Handle chat interface with error handling"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
334 |
try:
|
335 |
+
# Handle input formatting
|
336 |
+
if isinstance(message, list) and len(message) > 0:
|
337 |
+
if isinstance(message[-1], dict):
|
338 |
+
user_input = message[-1].get("content", "")
|
339 |
+
else:
|
340 |
+
user_input = message[-1]
|
341 |
+
else:
|
342 |
+
user_input = str(message)
|
343 |
+
|
344 |
+
# Prepare inputs
|
345 |
+
inputs = {
|
346 |
+
"query": user_input,
|
347 |
+
"all_queries": [user_input],
|
348 |
+
"all_texts": all_chunks,
|
349 |
+
"k_per_query": 3,
|
350 |
+
"alpha": 0.7,
|
351 |
+
"vectorstore": vectorstore,
|
352 |
+
"full_document": "",
|
353 |
+
}
|
354 |
+
|
355 |
+
# Process through pipeline
|
356 |
+
response = ""
|
357 |
for chunk in full_pipeline.stream(inputs):
|
358 |
if isinstance(chunk, str):
|
359 |
response += chunk
|
360 |
elif isinstance(chunk, dict) and "answer" in chunk:
|
361 |
response += chunk["answer"]
|
|
|
|
|
362 |
yield response
|
363 |
except Exception as e:
|
364 |
+
yield f"π¨ Error: {str(e)}"
|
|
|
365 |
|
366 |
+
# Custom ChatInterface implementation
|
367 |
with gr.Blocks(css="""
|
|
|
|
|
|
|
|
|
|
|
368 |
.gradio-container {
|
369 |
width: 90%;
|
370 |
max-width: 1000px;
|
371 |
margin: 0 auto;
|
372 |
padding: 1rem;
|
373 |
}
|
|
|
374 |
.chatbox-container {
|
375 |
display: flex;
|
376 |
flex-direction: column;
|
377 |
+
height: 95vh;
|
378 |
}
|
|
|
379 |
.chatbot {
|
380 |
flex: 1;
|
381 |
overflow-y: auto;
|
382 |
min-height: 500px;
|
383 |
}
|
|
|
384 |
.textbox {
|
385 |
margin-top: 1rem;
|
386 |
}
|
|
|
|
|
|
|
387 |
""") as demo:
|
388 |
+
with gr.Column(elem_classes="chatbox-container"):
|
389 |
+
gr.Markdown("## π¬ Ask Krishna's AI Assistant")
|
390 |
+
gr.Markdown("π‘ Ask anything about Krishna Vamsi Dhulipalla")
|
391 |
+
|
392 |
+
chatbot = gr.Chatbot(elem_classes="chatbot")
|
393 |
+
msg = gr.Textbox(placeholder="Ask a question about Krishna...",
|
394 |
+
elem_classes="textbox")
|
395 |
+
clear = gr.Button("Clear Chat")
|
396 |
+
|
397 |
+
# Example questions
|
398 |
+
gr.Examples(
|
399 |
+
examples=[
|
400 |
+
"What are Krishna's research interests?",
|
401 |
+
"Where did Krishna work?",
|
402 |
+
"What did he study at Virginia Tech?",
|
403 |
+
],
|
404 |
+
inputs=msg,
|
405 |
+
label="Example Questions"
|
406 |
+
)
|
407 |
+
|
408 |
def respond(message, chat_history):
|
409 |
+
"""Handle user message and generate response"""
|
410 |
bot_message = ""
|
411 |
for chunk in chat_interface(message, chat_history):
|
412 |
bot_message = chunk
|
413 |
+
# Update last message in history
|
414 |
+
if chat_history:
|
415 |
+
chat_history[-1] = (message, bot_message)
|
416 |
+
else:
|
417 |
+
chat_history.append((message, bot_message))
|
418 |
yield chat_history
|
419 |
|
420 |
+
def user(user_message, history):
|
421 |
+
"""Append user message to history"""
|
422 |
+
return "", history + [[user_message, None]]
|
423 |
+
|
424 |
msg.submit(
|
425 |
+
user,
|
426 |
+
[msg, chatbot],
|
427 |
+
[msg, chatbot],
|
428 |
queue=False
|
429 |
).then(
|
430 |
respond,
|
|
|
432 |
[chatbot]
|
433 |
)
|
434 |
|
435 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
436 |
|
437 |
if __name__ == "__main__":
|
438 |
+
# Add resource verification
|
439 |
+
print(f"FAISS path exists: {Path(FAISS_PATH).exists()}")
|
440 |
+
print(f"Chunks path exists: {Path(CHUNKS_PATH).exists()}")
|
441 |
+
print(f"Vectorstore type: {type(vectorstore)}")
|
442 |
+
print(f"All chunks count: {len(all_chunks)}")
|
443 |
+
|
444 |
+
# Launch with queue management
|
445 |
+
demo.queue(concurrency_count=1).launch()
|
requirements.txt
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
fastapi
|
2 |
uvicorn
|
3 |
-
gradio
|
4 |
rich
|
5 |
numpy
|
6 |
python-dotenv
|
|
|
1 |
fastapi
|
2 |
uvicorn
|
3 |
+
gradio==3.50.2 # Specific compatible version
|
4 |
rich
|
5 |
numpy
|
6 |
python-dotenv
|