krishnadhulipalla commited on
Commit
4d5f0e8
·
verified ·
1 Parent(s): bbcae45

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -8
app.py CHANGED
@@ -105,7 +105,8 @@ knowledge_base = KnowledgeBase()
105
  # repharser_llm = ChatNVIDIA(model="mistralai/mistral-7b-instruct-v0.3") | StrOutputParser()
106
  repharser_llm = ChatNVIDIA(model="microsoft/phi-3-mini-4k-instruct") | StrOutputParser()
107
  validation_llm = ChatNVIDIA(model="microsoft/phi-3-small-8k-instruct") | StrOutputParser()
108
- instruct_llm = ChatNVIDIA(model="mistralai/mixtral-8x22b-instruct-v0.1") | StrOutputParser()
 
109
  relevance_llm = ChatNVIDIA(model="nvidia/llama-3.1-nemotron-70b-instruct") | StrOutputParser()
110
  answer_llm = ChatOpenAI(
111
  model="gpt-4o",
@@ -369,7 +370,7 @@ extract_validation_inputs = RunnableLambda(lambda x: {
369
  validation_chain = (
370
  extract_validation_inputs
371
  | relevance_prompt
372
- | validation_llm
373
  | RunnableLambda(safe_json_parse)
374
  )
375
 
@@ -422,7 +423,7 @@ def RExtract(pydantic_class: Type[BaseModel], llm, prompt):
422
 
423
  knowledge_extractor = RExtract(
424
  pydantic_class=KnowledgeBase,
425
- llm=validation_llm,
426
  prompt=parser_prompt
427
  )
428
 
@@ -469,14 +470,10 @@ def chat_interface(message, history):
469
  "vectorstore": vectorstore,
470
  "bm25_retriever": bm25_retriever,
471
  }
472
-
473
- hybrid_result = hybrid_chain.invoke(inputs)
474
- hybrid_result["validation"] = validation_chain.invoke(hybrid_result)
475
-
476
  full_response = ""
477
 
478
  # Stream the response to user
479
- for chunk in answer_chain.stream(hybrid_result):
480
  if isinstance(chunk, dict) and "answer" in chunk:
481
  full_response += chunk["answer"]
482
  yield full_response
 
105
  # repharser_llm = ChatNVIDIA(model="mistralai/mistral-7b-instruct-v0.3") | StrOutputParser()
106
  repharser_llm = ChatNVIDIA(model="microsoft/phi-3-mini-4k-instruct") | StrOutputParser()
107
  validation_llm = ChatNVIDIA(model="microsoft/phi-3-small-8k-instruct") | StrOutputParser()
108
+ instruct_llm = ChatNVIDIA(model="mistralai/mistral-7b-instruct-v0.2") | StrOutputParser()
109
+ #instruct_llm = ChatNVIDIA(model="mistralai/mixtral-8x22b-instruct-v0.1") | StrOutputParser()
110
  relevance_llm = ChatNVIDIA(model="nvidia/llama-3.1-nemotron-70b-instruct") | StrOutputParser()
111
  answer_llm = ChatOpenAI(
112
  model="gpt-4o",
 
370
  validation_chain = (
371
  extract_validation_inputs
372
  | relevance_prompt
373
+ | instruct_llm
374
  | RunnableLambda(safe_json_parse)
375
  )
376
 
 
423
 
424
  knowledge_extractor = RExtract(
425
  pydantic_class=KnowledgeBase,
426
+ llm=instruct_llm,
427
  prompt=parser_prompt
428
  )
429
 
 
470
  "vectorstore": vectorstore,
471
  "bm25_retriever": bm25_retriever,
472
  }
 
 
 
 
473
  full_response = ""
474
 
475
  # Stream the response to user
476
+ for chunk in full_pipeline.stream(inputs):
477
  if isinstance(chunk, dict) and "answer" in chunk:
478
  full_response += chunk["answer"]
479
  yield full_response