DurgaDeepak commited on
Commit
ec0a2ba
·
verified ·
1 Parent(s): 6eae2d9

Create agent.py

Browse files
Files changed (1) hide show
  1. agent.py +26 -0
agent.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from meal_loader import documents
2
+ from langchain_community.embeddings import HuggingFaceEmbeddings
3
+ from langchain_community.vectorstores import FAISS
4
+ from langchain_community.llms import HuggingFaceHub
5
+ from langchain.chains import ConversationalRetrievalChain
6
+ from langchain.memory import ConversationBufferMemory
7
+
8
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
9
+ db = FAISS.from_documents(documents, embeddings)
10
+ retriever = db.as_retriever(search_kwargs={"k": 3})
11
+ llm = HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.1", model_kwargs={"temperature": 0.3, "max_new_tokens": 500})
12
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
13
+
14
+ qa_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)
15
+
16
+ def generate_response(message, history, preferences):
17
+ prompt = f"""
18
+ You are a meal plan assistant. The user has the following preferences:
19
+ - Diet: {', '.join(preferences['diet'])}
20
+ - Goal: {preferences['goal']}
21
+ - Duration: {preferences['weeks']} week(s)
22
+
23
+ User query: {message}
24
+ """
25
+ result = qa_chain({"question": prompt})
26
+ return result["answer"]