Spaces:
Sleeping
Sleeping
File size: 964 Bytes
3cf3f2d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain.llms import HuggingFaceHub
from langchain.chains import RetrievalQA
from knowledge_base import load_vectorstore
import os
# Load vectorstore with meal plan embeddings
db = load_vectorstore()
retriever = db.as_retriever()
# Load LLM (HuggingFace Inference API)
llm = HuggingFaceHub(
repo_id="mistralai/Mistral-7B-Instruct-v0.1",
model_kwargs={"temperature": 0.3, "max_new_tokens": 512}
)
# Build Retrieval QA Chain
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
retriever=retriever,
chain_type="stuff"
)
def get_bot_response(query):
"""
Accepts a user query, runs through RAG chain, and returns a response.
"""
if not query:
return "Please ask something."
try:
result = qa_chain.run(query)
return result
except Exception as e:
return f"Error processing query: {str(e)}"
|