import streamlit as st import os import pickle from streamlit_extras.add_vertical_space import add_vertical_space from huggingface_hub import Repository from PyPDF2 import PdfReader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import FAISS from langchain.llms import OpenAI from langchain.chains.question_answering import load_qa_chain from langchain.callbacks import get_openai_callback # Step 1: Clone the Dataset Repository repo = Repository( local_dir="Private_Book", # Local directory to clone the repository repo_type="dataset", # Specify that this is a dataset repository clone_from="Anne31415/Private_Book", # Replace with your repository URL token=os.getenv("HUB_TOKEN") # Use the secret token to authenticate ) repo.git_pull() # Pull the latest changes (if any) # Step 2: Load the PDF File pdf_file_path = "Private_Book/KOMBI_all2.pdf" # Replace with your PDF file path import streamlit as st import streamlit as st import streamlit as st def cloud_button(label, key=None, color=None): button_id = f"cloud-button-{key or label}" color_class = f"color-{color}" if color else "" num_circles = max(3, min(12, len(label) // 4)) # Adjust the number of circles based on text length # Generate circles on both sides of the text circles_html = ''.join([ f'
' for _ in range(num_circles//2) ] + [ f'
' for _ in range(num_circles//2) ]) cloud_button_html = f"""
{circles_html}
{label}
""" st.markdown(cloud_button_html, unsafe_allow_html=True) # Examples cloud_button("Short Text", color="1") cloud_button("This is a longer piece of text", color="2") cloud_button("This is an even longer piece of text to test the cloud button", color="3") def load_pdf(file_path): pdf_reader = PdfReader(file_path) text = "" for page in pdf_reader.pages: text += page.extract_text() text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, length_function=len ) chunks = text_splitter.split_text(text=text) store_name, _ = os.path.splitext(os.path.basename(file_path)) if os.path.exists(f"{store_name}.pkl"): with open(f"{store_name}.pkl", "rb") as f: VectorStore = pickle.load(f) else: embeddings = OpenAIEmbeddings() VectorStore = FAISS.from_texts(chunks, embedding=embeddings) with open(f"{store_name}.pkl", "wb") as f: pickle.dump(VectorStore, f) return VectorStore def load_chatbot(): return load_qa_chain(llm=OpenAI(), chain_type="stuff") def display_chat_history(chat_history): for chat in chat_history: background_color = "#FFA07A" if chat[2] == "new" else "#acf" if chat[0] == "User" else "#caf" st.markdown(f"
{chat[0]}: {chat[1]}
", unsafe_allow_html=True) def main(): with st.sidebar: st.title('BinDoc GmbH') st.markdown("Experience revolutionary interaction with BinDocs Chat App, leveraging state-of-the-art AI technology.") add_vertical_space(1) # Adjust as per the desired spacing st.markdown(""" Hello! I’m here to assist you with:

šŸ“˜ **Glossary Inquiries:**
I can clarify terms like "DiGA", "AOP", or "BfArM", providing clear and concise explanations to help you understand our content better.

šŸ†˜ **Help Page Navigation:**
Ask me if you forgot your password or want to know more about topics related to the platform.

šŸ“° **Latest Whitepapers Insights:**
Curious about our recent publications? Feel free to ask about our latest whitepapers!

""", unsafe_allow_html=True) add_vertical_space(1) # Adjust as per the desired spacing st.write('Made with ā¤ļø by BinDoc GmbH') api_key = os.getenv("OPENAI_API_KEY") hide_streamlit_style = """ """ st.markdown(hide_streamlit_style, unsafe_allow_html=True) # Main content st.title("Welcome to BinDocs ChatBot! šŸ¤–") pdf_path = "path_to_your_pdf_file.pdf" # Update this to the path of your PDF file if not os.path.exists(pdf_path): st.error("File not found. Please check the file path.") return if "chat_history" not in st.session_state: st.session_state['chat_history'] = [] display_chat_history(st.session_state['chat_history']) st.write("", unsafe_allow_html=True) st.write("
", unsafe_allow_html=True) st.write("", unsafe_allow_html=True) new_messages_placeholder = st.empty() if pdf_path is not None: query = st.text_input("Ask questions about your PDF file (in any preferred language):") if st.button("Ask") or (not st.session_state['chat_history'] and query) or (st.session_state['chat_history'] and query != st.session_state['chat_history'][-1][1]): st.session_state['chat_history'].append(("User", query, "new")) loading_message = st.empty() loading_message.text('Bot is thinking...') VectorStore = load_pdf(pdf_path) chain = load_chatbot() docs = VectorStore.similarity_search(query=query, k=3) response = chain.run(input_documents=docs, question=query) st.session_state['chat_history'].append(("Bot", response, "new")) new_messages = st.session_state['chat_history'][-2:] for chat in new_messages: background_color = "#FFA07A" if chat[2] == "new" else "#acf" if chat[0] == "User" else "#caf" new_messages_placeholder.markdown(f"
{chat[0]}: {chat[1]}
", unsafe_allow_html=True) st.write("", unsafe_allow_html=True) loading_message.empty() query = "" st.session_state['chat_history'] = [(sender, msg, "old") for sender, msg, _ in st.session_state['chat_history']] if __name__ == "__main__": main()