Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import os
|
2 |
import tempfile
|
|
|
3 |
import gradio as gr
|
4 |
from langchain_community.vectorstores import FAISS
|
5 |
from langchain_groq import ChatGroq
|
@@ -10,14 +11,17 @@ from langchain.document_loaders import PyPDFLoader
|
|
10 |
from langchain import hub
|
11 |
|
12 |
# Set API key (Replace with your actual key)
|
13 |
-
os.environ["GROQ_API_KEY"] = "
|
14 |
|
15 |
-
#
|
|
|
|
|
|
|
16 |
llm = ChatGroq(model="llama3-8b-8192")
|
17 |
model_name = "BAAI/bge-small-en"
|
18 |
hf_embeddings = HuggingFaceBgeEmbeddings(
|
19 |
model_name=model_name,
|
20 |
-
model_kwargs={'device':
|
21 |
encode_kwargs={'normalize_embeddings': True}
|
22 |
)
|
23 |
|
@@ -64,23 +68,28 @@ def ask_question(query):
|
|
64 |
if "rag_chain" not in globals():
|
65 |
return "Please upload and process a PDF first."
|
66 |
|
67 |
-
response = rag_chain.invoke(query)
|
68 |
return response
|
69 |
|
70 |
-
# Gradio UI
|
71 |
with gr.Blocks() as demo:
|
72 |
gr.Markdown("# π PDF Chatbot with RAG")
|
73 |
gr.Markdown("Upload a PDF and ask questions!")
|
74 |
-
|
75 |
-
|
76 |
-
|
|
|
|
|
77 |
output_message = gr.Textbox(label="Status", interactive=False)
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
|
|
81 |
response_output = gr.Textbox(label="AI Response")
|
82 |
|
83 |
process_button.click(process_pdf, inputs=pdf_input, outputs=output_message)
|
84 |
submit_button.click(ask_question, inputs=query_input, outputs=response_output)
|
85 |
|
86 |
-
|
|
|
|
1 |
import os
|
2 |
import tempfile
|
3 |
+
import torch
|
4 |
import gradio as gr
|
5 |
from langchain_community.vectorstores import FAISS
|
6 |
from langchain_groq import ChatGroq
|
|
|
11 |
from langchain import hub
|
12 |
|
13 |
# Set API key (Replace with your actual key)
|
14 |
+
os.environ["GROQ_API_KEY"] = "your_groq_api_key"
|
15 |
|
16 |
+
# Check if GPU is available
|
17 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
18 |
+
|
19 |
+
# Initialize LLM and Embeddings with GPU if available
|
20 |
llm = ChatGroq(model="llama3-8b-8192")
|
21 |
model_name = "BAAI/bge-small-en"
|
22 |
hf_embeddings = HuggingFaceBgeEmbeddings(
|
23 |
model_name=model_name,
|
24 |
+
model_kwargs={'device': device},
|
25 |
encode_kwargs={'normalize_embeddings': True}
|
26 |
)
|
27 |
|
|
|
68 |
if "rag_chain" not in globals():
|
69 |
return "Please upload and process a PDF first."
|
70 |
|
71 |
+
response = rag_chain.invoke(query)
|
72 |
return response
|
73 |
|
74 |
+
# Gradio UI with Mobile-Friendly Fixes
|
75 |
with gr.Blocks() as demo:
|
76 |
gr.Markdown("# π PDF Chatbot with RAG")
|
77 |
gr.Markdown("Upload a PDF and ask questions!")
|
78 |
+
|
79 |
+
with gr.Row():
|
80 |
+
pdf_input = gr.File(label="Upload PDF", type="binary")
|
81 |
+
process_button = gr.Button("Process PDF")
|
82 |
+
|
83 |
output_message = gr.Textbox(label="Status", interactive=False)
|
84 |
+
|
85 |
+
with gr.Row():
|
86 |
+
query_input = gr.Textbox(label="Ask a Question")
|
87 |
+
submit_button = gr.Button("Submit")
|
88 |
+
|
89 |
response_output = gr.Textbox(label="AI Response")
|
90 |
|
91 |
process_button.click(process_pdf, inputs=pdf_input, outputs=output_message)
|
92 |
submit_button.click(ask_question, inputs=query_input, outputs=response_output)
|
93 |
|
94 |
+
# Improved mobile layout and timeout handling
|
95 |
+
demo.launch(share=True, theme="compact", show_api=False, timeout=60)
|