Spaces:
Sleeping
Sleeping
from langchain.embeddings import HuggingFaceEmbeddings | |
from langchain.vectorstores import Chroma | |
from langchain.llms import HuggingFaceHub | |
from langchain.chains import RetrievalQA | |
from knowledge_base import load_vectorstore | |
import os | |
# Load vectorstore with meal plan embeddings | |
db = load_vectorstore() | |
retriever = db.as_retriever() | |
# Load LLM (HuggingFace Inference API) | |
llm = HuggingFaceHub( | |
repo_id="mistralai/Mistral-7B-Instruct-v0.1", | |
model_kwargs={"temperature": 0.3, "max_new_tokens": 512} | |
) | |
# Build Retrieval QA Chain | |
qa_chain = RetrievalQA.from_chain_type( | |
llm=llm, | |
retriever=retriever, | |
chain_type="stuff" | |
) | |
def get_bot_response(query): | |
""" | |
Accepts a user query, runs through RAG chain, and returns a response. | |
""" | |
if not query: | |
return "Please ask something." | |
try: | |
result = qa_chain.run(query) | |
return result | |
except Exception as e: | |
return f"Error processing query: {str(e)}" | |