Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -14,7 +14,8 @@ from langchain_core.output_parsers import StrOutputParser
|
|
14 |
from langchain_core.runnables import RunnableLambda
|
15 |
from datetime import date
|
16 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
17 |
-
|
|
|
18 |
# Environment variables
|
19 |
os.environ['LANGCHAIN_TRACING_V2'] = 'true'
|
20 |
os.environ['LANGCHAIN_ENDPOINT'] = 'https://api.smith.langchain.com'
|
@@ -125,7 +126,14 @@ def clear_chat_history():
|
|
125 |
st.session_state.context_sources = []
|
126 |
st.session_state.key = 0
|
127 |
|
128 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
|
130 |
# Sidebar
|
131 |
with st.sidebar:
|
@@ -168,11 +176,14 @@ if prompt := st.chat_input("How may I assist you today?"):
|
|
168 |
tab1, tab2 = st.tabs(["Answer", "Sources"])
|
169 |
with tab1:
|
170 |
with st.spinner("Generating answer..."):
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
|
|
|
|
|
|
176 |
with tab2:
|
177 |
for i, source in enumerate(st.session_state.context_sources):
|
178 |
name = f'{source}'
|
|
|
14 |
from langchain_core.runnables import RunnableLambda
|
15 |
from datetime import date
|
16 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
17 |
+
from langchain.callbacks import TimeoutCallback
|
18 |
+
import time
|
19 |
# Environment variables
|
20 |
os.environ['LANGCHAIN_TRACING_V2'] = 'true'
|
21 |
os.environ['LANGCHAIN_ENDPOINT'] = 'https://api.smith.langchain.com'
|
|
|
126 |
st.session_state.context_sources = []
|
127 |
st.session_state.key = 0
|
128 |
|
129 |
+
# In your Streamlit app
|
130 |
+
def generate_response(chain, query, context):
|
131 |
+
timeout = TimeoutCallback(timeout=60) # 60 second timeout
|
132 |
+
try:
|
133 |
+
return chain.run(context=context, question=query, callbacks=[timeout])
|
134 |
+
except TimeoutError:
|
135 |
+
return "I apologize, but I couldn't generate a response in time. The query might be too complex for me to process quickly. Could you try simplifying your question?"
|
136 |
+
|
137 |
|
138 |
# Sidebar
|
139 |
with st.sidebar:
|
|
|
176 |
tab1, tab2 = st.tabs(["Answer", "Sources"])
|
177 |
with tab1:
|
178 |
with st.spinner("Generating answer..."):
|
179 |
+
chain = get_chain(temperature)
|
180 |
+
context = "\n".join(st.session_state.context_content)
|
181 |
+
start_time = time.time()
|
182 |
+
full_answer = generate_response(chain, query, context)
|
183 |
+
end_time = time.time()
|
184 |
+
|
185 |
+
st.markdown(full_answer)
|
186 |
+
st.caption(f"Response time: {end_time - start_time:.2f} seconds")
|
187 |
with tab2:
|
188 |
for i, source in enumerate(st.session_state.context_sources):
|
189 |
name = f'{source}'
|