File size: 3,635 Bytes
8d6e3ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b8f1eac
8d6e3ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import os
import streamlit as st
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain_community.llms import HuggingFaceHub
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate

# Environment
os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HUGGINGFACEHUB_API_TOKEN")
EMBEDDING_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"

@st.cache_resource
def get_response(question):
    result = st.session_state.conversational_chain({"question": question})
    response_text = result.get("answer", "Maaf, saya tidak mengetahui jawaban itu.")
    
    # Membersihkan jawaban dari teks yang tidak diperlukan
    if "Answer:" in response_text:
        response_text = response_text.split("Answer:")[1].strip()
    return response_text


def setup_vectorstore():
    persist_directory = "./vector_db_dir"
    embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
    return Chroma(persist_directory=persist_directory, embedding_function=embeddings)

def chat_chain(vectorstore):
    hf_hub_llm = HuggingFaceHub(
        repo_id="SeaLLMs/SeaLLMs-v3-7B-Chat",
        model_kwargs={"temperature": 1, "max_new_tokens": 1024},
    )

    prompt_template = """
    You are an assistant specialized in women's health. Use the retrieved documents to answer the user's question.
    If you don't know the answer or the information is not in the documents, reply with: "I'm sorry, I don't know."

    Chat History:
    {chat_history}

    Question:
    {question}

    Answer:"""
    prompt = PromptTemplate(input_variables=["chat_history", "question"], template=prompt_template)


    # qa_prompt = ChatPromptTemplate.from_messages(messages)

    retriever = vectorstore.as_retriever(
        search_type="similarity",
        search_kwargs={"k": 2}
    )

    memory = ConversationBufferMemory(
        llm=hf_hub_llm,
        output_key="answer",
        memory_key="chat_history",
        return_messages=True
    )

    chain = ConversationalRetrievalChain.from_llm(
        llm=hf_hub_llm,
        retriever=retriever,
        chain_type="stuff",
        memory=memory,
        verbose=True,
        combine_docs_chain_kwargs={"prompt": prompt},
    )
    return chain

# Streamlit App
st.set_page_config(
    page_title="Asisten Kesehatan Wanita",
    page_icon="πŸ’Š",
    layout="centered"
)

st.title("πŸ’Š Asisten Kesehatan Wanita")

if "chat_history" not in st.session_state:
    st.session_state.chat_history = []

if "vectorstore" not in st.session_state:
    st.session_state.vectorstore = setup_vectorstore()

if "conversational_chain" not in st.session_state:
    st.session_state.conversational_chain = chat_chain(st.session_state.vectorstore)

# Display Chat History
for message in st.session_state.chat_history:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# User Input
user_input = st.chat_input("Tanyakan sesuatu...")

if user_input:
    st.session_state.chat_history.append({"role": "user", "content": user_input})

    with st.chat_message("user"):
        st.markdown(user_input)


    with st.chat_message("assistant"):
        response = st.session_state.conversational_chain({"question": user_input})
        assistant_response = response["answer"]
        st.markdown(assistant_response)
        st.session_state.chat_history.append({"role": "assistant", "content": assistant_response})