File size: 2,698 Bytes
3be1020
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import os
from langchain_core.output_parsers import StrOutputParser
from langchain_groq import ChatGroq
from langchain_core.prompts import PromptTemplate
from langchain_google_genai import ChatGoogleGenerativeAI
from typing import List
from typing_extensions import TypedDict
from typing import Annotated
from langgraph.graph.message import AnyMessage, add_messages
from langchain_core.messages import HumanMessage, AIMessage
from langgraph.graph import END, StateGraph, START
from langgraph.checkpoint.memory import MemorySaver
from langchain_community.document_loaders import DirectoryLoader
from langchain_text_splitters import CharacterTextSplitter
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import datetime

app = FastAPI()

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

class Request(BaseModel):
    query : str
    id : str

llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0.5)
memory = MemorySaver()

glob_pattern="./*.md"
directory_path = "./documents"
loader = DirectoryLoader(directory_path, glob=glob_pattern)
cv = loader.load()

prompt = PromptTemplate.from_template("""
Tu dois uniquement répondre aux questions posées à propos de Ninon Roche.
Pour t'aider dans tes réponses, voici un texte qui comporte des informations sur Ninon Roche :
{document}
Voici l'historique de la conversation :
{historical}
Et enfin, la question posée par l'utilisateur :
{question}
Voici la date du jour : {date}
""")

chain = prompt | llm | StrOutputParser()

def format_historical(hist):
    historical = []
    for i in range(0,len(hist)-2,2):
        historical.append("Utilisateur : "+hist[i].content)
        historical.append("Assistant : "+hist[i+1].content)
    return "\n".join(historical[-10:])


class GraphState(TypedDict):
    messages: Annotated[list[AnyMessage], add_messages]

def chatbot(state : GraphState):
    response = chain.invoke({'document': cv, 'historical': format_historical(state['messages']), 'question' : state['messages'][-1].content, 'date': datetime.date.today()})
    return {"messages": [AIMessage(content=response)]}

workflow = StateGraph(GraphState)
workflow.add_node('chatbot', chatbot)

workflow.add_edge(START,'chatbot')
workflow.add_edge('chatbot', END)

app_chatbot = workflow.compile(checkpointer=memory)

@app.post('/request')
def request(req: Request):
    config = {"configurable": {"thread_id": req.id}}
    rep = app_chatbot.invoke({"messages": [HumanMessage(content=req.query)]},config, stream_mode="values")
    return {"response":rep['messages'][-1].content}