dalide commited on
Commit
0724793
·
verified ·
1 Parent(s): f00bb42

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -0
app.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ # Add project root to sys.path for utils import
5
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
6
+
7
+ import streamlit as st
8
+ import torch
9
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSequenceClassification, pipeline
10
+ from langchain_community.vectorstores import FAISS
11
+ from utils.pdf_vector_utils import load_vector_store
12
+
13
+ st.set_page_config(page_title="HER2 Q&A Chatbot")
14
+ st.title("🔬 HER2 Q&A Chatbot (with Chat History)")
15
+
16
+ # Determine device
17
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
18
+
19
+ def build_prompt(context: str, history: list, question: str) -> str:
20
+ history_text = "\n".join(
21
+ f"User: {turn['user']}\nAssistant: {turn['assistant']}" for turn in history
22
+ )
23
+ prompt = (
24
+ "You are a biomedical research assistant. Use the provided paper context "
25
+ "and conversation history to answer the user's question accurately and in detail.\n\n"
26
+ f"Context:\n{context}\n\n"
27
+ f"Conversation History:\n{history_text}\n"
28
+ f"User: {question}\nAssistant:"
29
+ )
30
+ return prompt
31
+
32
+ @st.cache_resource
33
+ def load_vectorstore():
34
+ db_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "her2_faiss_db"))
35
+ return load_vector_store(persist_directory=db_path, model_name="sentence-transformers/allenai-specter")
36
+
37
+ @st.cache_resource
38
+ def load_phi2_pipeline():
39
+ model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
40
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
41
+ try:
42
+ torch.cuda.empty_cache()
43
+ model = AutoModelForCausalLM.from_pretrained(
44
+ model_id, torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32
45
+ ).to(DEVICE)
46
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if DEVICE == "cuda" else -1)
47
+ return tokenizer, pipe
48
+ except RuntimeError as e:
49
+ if "CUDA out of memory" in str(e):
50
+ torch.cuda.empty_cache()
51
+ st.warning("⚠️ GPU out of memory. Falling back to CPU.")
52
+ model = AutoModelForCausalLM.from_pretrained(model_id).to("cpu")
53
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1)
54
+ return tokenizer, pipe
55
+ else:
56
+ raise e
57
+
58
+ @st.cache_resource
59
+ def load_reranker():
60
+ model_id = "BAAI/bge-reranker-base"
61
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
62
+ model = AutoModelForSequenceClassification.from_pretrained(model_id).to(DEVICE)
63
+ return tokenizer, model
64
+
65
+ def rerank_chunks(query: str, docs: list, tokenizer, model, top_k: int = 5) -> list:
66
+ pairs = [(query, doc.page_content) for doc in docs]
67
+ inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors="pt").to(DEVICE)
68
+
69
+ with torch.no_grad():
70
+ logits = model(**inputs).logits.squeeze()
71
+ scores = logits.tolist() if logits.ndim > 0 else [logits.item()]
72
+
73
+ reranked = sorted(zip(docs, scores), key=lambda x: x[1], reverse=True)
74
+ return [doc for doc, _ in reranked[:top_k]]
75
+
76
+ def get_answer(query: str, history: list) -> str:
77
+ docs = vectorstore.similarity_search(query, k=5)
78
+ reranker_tokenizer, reranker_model = load_reranker()
79
+ top_docs = rerank_chunks(query, docs, reranker_tokenizer, reranker_model, top_k=3)
80
+
81
+ context = "\n\n".join(doc.page_content[:300] for doc in top_docs)
82
+ prompt = build_prompt(context, history, query)
83
+
84
+ result = llm_pipeline(prompt, max_new_tokens=256, do_sample=False, temperature=0.3)
85
+ return result[0]["generated_text"].split("Assistant:")[-1].strip()
86
+
87
+ # Load resources
88
+ vectorstore = load_vectorstore()
89
+ llm_tokenizer, llm_pipeline = load_phi2_pipeline()
90
+
91
+ if "chat_history" not in st.session_state:
92
+ st.session_state.chat_history = []
93
+
94
+ query = st.text_input("Ask something about the HER2 paper...")
95
+
96
+ if query:
97
+ with st.spinner("Thinking..."):
98
+ try:
99
+ answer = get_answer(query, st.session_state.chat_history)
100
+ st.session_state.chat_history.append({"user": query, "assistant": answer})
101
+ except Exception as e:
102
+ st.error(f"An error occurred: {e}")
103
+
104
+ # Display chat history
105
+ for turn in st.session_state.chat_history:
106
+ st.markdown(f"**You:** {turn['user']}")
107
+ st.markdown(f"**Bot:** {turn['assistant']}")