abhivsh commited on
Commit
cd208bd
·
verified ·
1 Parent(s): 956bcb4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -162,16 +162,16 @@ def chat_query_IS(question, chat_history_IS):
162
  """
163
  Handles queries about Indian/International Standards using OpenAI model.
164
  """
165
- #llm = ChatOpenAI(model = llm_name, temperature = 0.1, api_key = OPENAI_API_KEY)
166
  #llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GEMINI_API_KEY) ###
167
  #llm = OllamaLLM(model="unsloth/Llama-3.2-3B")
168
 
169
- llm = HuggingFacePipeline.from_model_id(
170
- model_id=llm_name, # Replace with a valid Hugging Face model ID
171
- task="text-generation", # Specify the appropriate task for your model
172
- device=0, # Use -1 for CPU or 0 for GPU
173
- model_kwargs={"temperature": 0.1}
174
- )
175
 
176
  system_prompt = f"""
177
  Provide an elaborate, detailed and point-wise reply about the topic as per relevant IS/IEEE/BIS standards:
@@ -181,10 +181,10 @@ def chat_query_IS(question, chat_history_IS):
181
 
182
  system = f""" Provide a reply poetically precise as william shakespeare for the Topic : {question}"""
183
 
184
- result = llm(system_prompt)
185
- chat_history_IS.append((system_prompt, result))
186
 
187
- return result
188
 
189
 
190
  iface_doc = gr.ChatInterface(
 
162
  """
163
  Handles queries about Indian/International Standards using OpenAI model.
164
  """
165
+ llm = ChatOpenAI(model = llm_name, temperature = 0.1, api_key = OPENAI_API_KEY)
166
  #llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GEMINI_API_KEY) ###
167
  #llm = OllamaLLM(model="unsloth/Llama-3.2-3B")
168
 
169
+ # llm = HuggingFacePipeline.from_model_id(
170
+ # model_id=llm_name, # Replace with a valid Hugging Face model ID
171
+ # task="text-generation", # Specify the appropriate task for your model
172
+ # device=0, # Use -1 for CPU or 0 for GPU
173
+ # model_kwargs={"temperature": 0.1}
174
+ # )
175
 
176
  system_prompt = f"""
177
  Provide an elaborate, detailed and point-wise reply about the topic as per relevant IS/IEEE/BIS standards:
 
181
 
182
  system = f""" Provide a reply poetically precise as william shakespeare for the Topic : {question}"""
183
 
184
+ result = llm.invoke(system_prompt)
185
+ chat_history_IS.append((system_prompt, result.content))
186
 
187
+ return result.content
188
 
189
 
190
  iface_doc = gr.ChatInterface(