Update app.py
Browse files
app.py
CHANGED
@@ -1,39 +1,12 @@
|
|
1 |
-
import
|
2 |
-
import
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
def ask_openai(prompt):
|
14 |
-
try:
|
15 |
-
response = openai.Completion.create(
|
16 |
-
model="text-davinci-003", # Example model, adjust as needed
|
17 |
-
prompt=prompt,
|
18 |
-
temperature=0.7,
|
19 |
-
max_tokens=150,
|
20 |
-
n=1,
|
21 |
-
stop=None,
|
22 |
-
)
|
23 |
-
return response.choices[0].text.strip()
|
24 |
-
except Exception as e:
|
25 |
-
st.error(f"Error in generating response: {e}")
|
26 |
-
return "I encountered an error. Please try again."
|
27 |
-
|
28 |
-
# UI for input and displaying the chat
|
29 |
-
user_input = st.text_input("Ask me anything:", key="chat_input")
|
30 |
-
|
31 |
-
if user_input:
|
32 |
-
# Display user input
|
33 |
-
with st.chat_message("user"):
|
34 |
-
st.write(user_input)
|
35 |
-
|
36 |
-
# Generate and display AI response
|
37 |
-
ai_response = ask_openai(user_input)
|
38 |
-
with st.chat_message("assistant"):
|
39 |
-
st.write(ai_response)
|
|
|
1 |
+
import llama_index
|
2 |
+
import llama2
|
3 |
+
|
4 |
+
|
5 |
+
from llama_index.query_pipeline import (
|
6 |
+
QueryPipeline as QP,
|
7 |
+
Link,
|
8 |
+
InputComponent,
|
9 |
+
)
|
10 |
+
from llama_index.query_engine.pandas import PandasInstructionParser
|
11 |
+
from llama_index.llms import OpenAI
|
12 |
+
from llama_index.prompts import PromptTemplate
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|