Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -64,7 +64,8 @@ def chat_query_doc(question, history):
|
|
64 |
If don't get the answer, feel free to reply from your own knowledge."""
|
65 |
|
66 |
|
67 |
-
query = f"""If the Question is related to Electrical Domain, Provide a detailed, accurate and point-wise answer to the question: {question}
|
|
|
68 |
|
69 |
|
70 |
#llm = ChatOpenAI(model = llm_name, temperature = 0.1, api_key = OPENAI_API_KEY)
|
@@ -93,14 +94,14 @@ def chat_query_doc(question, history):
|
|
93 |
def chat_query_IS(question, history):
|
94 |
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GEMINI_API_KEY)
|
95 |
|
96 |
-
system_old = f""" Provide an elaborate, detailed and
|
97 |
-
Also, at the end of your reply, quote the Relevant Standard Referred. Topic :
|
98 |
"""
|
99 |
-
system = f""" Provide a reply poetically precise as william shakespeare for the Topic :
|
100 |
"""
|
101 |
|
102 |
|
103 |
-
result = llm.invoke(system_old
|
104 |
return result.content
|
105 |
|
106 |
|
|
|
64 |
If don't get the answer, feel free to reply from your own knowledge."""
|
65 |
|
66 |
|
67 |
+
query = f"""If the Question is related to Electrical Domain, Provide a detailed, accurate and point-wise answer to the question: {question} \
|
68 |
+
Make sure that your reply shall be based on provided documents only."""
|
69 |
|
70 |
|
71 |
#llm = ChatOpenAI(model = llm_name, temperature = 0.1, api_key = OPENAI_API_KEY)
|
|
|
94 |
def chat_query_IS(question, history):
|
95 |
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GEMINI_API_KEY)
|
96 |
|
97 |
+
system_old = f""" Provide an elaborate, detailed and pointwise reply about the Topic as an experienced Electrical Engineer, as per relevant IS/IEEE/BIS Standard.
|
98 |
+
Also, at the end of your reply, quote the Relevant Standard Referred. Topic : {question}
|
99 |
"""
|
100 |
+
system = f""" Provide a reply poetically precise as william shakespeare for the Topic : {question}
|
101 |
"""
|
102 |
|
103 |
|
104 |
+
result = llm.invoke(system_old)
|
105 |
return result.content
|
106 |
|
107 |
|