Spaces:
Running
Running
:speak_no_evil:
Browse files- src/app.py +19 -11
src/app.py
CHANGED
@@ -63,30 +63,39 @@ docs = pdf_loader('hwc/')
|
|
63 |
from langchain_openai import ChatOpenAI
|
64 |
llm = ChatOpenAI(model = "llama3", api_key = api_key, base_url = "https://llm.nrp-nautilus.io", temperature=0)
|
65 |
## Cirrus instead:
|
66 |
-
|
67 |
-
model = "cirrus",
|
68 |
-
api_key = cirrus_key,
|
69 |
-
base_url = "https://llm.cirrus.carlboettiger.info/v1",
|
70 |
-
)
|
71 |
|
72 |
|
73 |
|
74 |
# Build a retrival agent
|
75 |
from langchain_core.vectorstores import InMemoryVectorStore
|
76 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
|
|
|
|
77 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
|
78 |
splits = text_splitter.split_documents(docs)
|
79 |
-
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
from langchain.chains import create_retrieval_chain
|
83 |
from langchain.chains.combine_documents import create_stuff_documents_chain
|
84 |
from langchain_core.prompts import ChatPromptTemplate
|
85 |
system_prompt = (
|
86 |
"You are an assistant for question-answering tasks. "
|
87 |
-
"Use the following
|
88 |
"the question. If you don't know the answer, say that you "
|
89 |
-
"don't know. Use
|
90 |
"answer concise."
|
91 |
"\n\n"
|
92 |
"{context}"
|
@@ -112,8 +121,7 @@ if prompt := st.chat_input("What are the most cost-effective prevention methods
|
|
112 |
st.write(results['answer'])
|
113 |
|
114 |
with st.expander("See context matched"):
|
115 |
-
st.write(results['context']
|
116 |
-
st.write(results['context'][0].metadata)
|
117 |
|
118 |
|
119 |
# adapt for memory / multi-question interaction with:
|
|
|
63 |
from langchain_openai import ChatOpenAI
|
64 |
llm = ChatOpenAI(model = "llama3", api_key = api_key, base_url = "https://llm.nrp-nautilus.io", temperature=0)
|
65 |
## Cirrus instead:
|
66 |
+
|
|
|
|
|
|
|
|
|
67 |
|
68 |
|
69 |
|
70 |
# Build a retrival agent
|
71 |
from langchain_core.vectorstores import InMemoryVectorStore
|
72 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
73 |
+
|
74 |
+
|
75 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
|
76 |
splits = text_splitter.split_documents(docs)
|
77 |
+
|
78 |
+
@st.cache_data
|
79 |
+
def vector_store(_splits):
|
80 |
+
embedding = OpenAIEmbeddings(
|
81 |
+
model = "cirrus",
|
82 |
+
api_key = cirrus_key,
|
83 |
+
base_url = "https://llm.cirrus.carlboettiger.info/v1",
|
84 |
+
)
|
85 |
+
vectorstore = InMemoryVectorStore.from_documents(documents=_splits, embedding=embedding)
|
86 |
+
retriever = vectorstore.as_retriever()
|
87 |
+
return retriever
|
88 |
+
|
89 |
+
retriever = vector_store(splits)
|
90 |
|
91 |
from langchain.chains import create_retrieval_chain
|
92 |
from langchain.chains.combine_documents import create_stuff_documents_chain
|
93 |
from langchain_core.prompts import ChatPromptTemplate
|
94 |
system_prompt = (
|
95 |
"You are an assistant for question-answering tasks. "
|
96 |
+
"Use the following scientific articles as the retrieved context to answer "
|
97 |
"the question. If you don't know the answer, say that you "
|
98 |
+
"don't know. Use up to five sentences maximum and keep the "
|
99 |
"answer concise."
|
100 |
"\n\n"
|
101 |
"{context}"
|
|
|
121 |
st.write(results['answer'])
|
122 |
|
123 |
with st.expander("See context matched"):
|
124 |
+
st.write(results['context'])
|
|
|
125 |
|
126 |
|
127 |
# adapt for memory / multi-question interaction with:
|