Spaces:
Sleeping
Sleeping
File size: 1,832 Bytes
6c94128 38b9656 6c94128 38b9656 6c94128 38b9656 6c94128 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_anthropic import ChatAnthropic
RAG_TEMPLATE = """
You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise but friendly.
If the question is about yourself, answer you're the digital assistant coach of OriginWS.
<context>
{context}
</context>
Answer the following question:
{question}"""
def format_docs(docs):
"""Format documents into a single string."""
return "\n\n".join(doc.page_content for doc in docs)
def execute_query(question, vectorstore):
"""Run the query against the vectorstore and return a response."""
print(f"Searching for: {question}")
docs = vectorstore.similarity_search(question, k=10)
print(f"Found {len(docs)} relevant documents for the query.")
# Set up the LLM and prompt handling
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
#llm = ChatAnthropic(model="claude-3-5-sonnet-20241022",temperature=0,max_tokens=1024,timeout=None,max_retries=2,
# api_key="...",
# base_url="...",
# other params...
#)
# Define the RAG prompt template
rag_prompt = ChatPromptTemplate.from_template(RAG_TEMPLATE)
# Create the chain
chain = (
RunnablePassthrough.assign(context=lambda input: format_docs(input["context"]))
| rag_prompt
| llm
| StrOutputParser()
)
# Run the chain with the query
response = chain.invoke({"context": docs, "question": question})
return response
|