File size: 4,859 Bytes
d2f1b7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
# streamlit_app.py
import streamlit as st
import sys
import os

# Add the directory containing app.py to the Python path
# This assumes app.py is in the same directory as streamlit_app.py
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

# Import your respond function and any necessary global variables from app.py
# Make sure app.py loads the model, tokenizer, etc. when imported
try:
    from app import respond, model_id # Import your main function and model_id
    # You might also need to import other things if respond relies on globals directly
    # from app import model, tokenizer, embedder, nlp, data, descriptions, embeddings, ...
    print("Successfully imported respond function from app.py")
except ImportError as e:
    st.error(f"Error importing core logic from app.py: {e}")
    st.stop() # Stop the app if the core logic can't be loaded

# Set Streamlit page config
st.set_page_config(page_title="Business Q&A Assistant")

st.title(f"Business Q&A Assistant with {model_id}")
st.write("Ask questions about the business (details from Google Sheet) or general knowledge (via search).")

# Initialize chat history in Streamlit's session state
# Session state persists across reruns for a single user session
if "messages" not in st.session_state:
    st.session_state.messages = []

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Accept user input
if prompt := st.chat_input("Your Question"):
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})
    # Display user message in chat message container
    with st.chat_message("user"):
        st.markdown(prompt)

    # Get the current chat history in the format your respond function expects
    # Gradio's history is [(user, bot), (user, bot), ...]
    # Streamlit's session state is a list of dicts [{"role": "user", "content": "..."}]
    # We need to convert Streamlit's history format to Gradio's format for your respond function
    gradio_chat_history = []
    # Start from the second message if the first was from the system/initial state
    # Or just iterate through pairs, skipping the latest user prompt for history pass
    # The respond function expects history *before* the current turn
    history_for_respond = []
    # Iterate through messages, excluding the very last user prompt which is the current input
    for i in range(len(st.session_state.messages) - 1):
        if st.session_state.messages[i]["role"] == "user" and st.session_state.messages[i+1]["role"] == "assistant":
            history_for_respond.append((st.session_state.messages[i]["content"], st.session_state.messages[i+1]["content"]))
        # Handle cases where the last turn was user, but no assistant response yet (e.g., previous session)
        elif st.session_state.messages[i]["role"] == "user" and (i+1 == len(st.session_state.messages) or st.session_state.messages[i+1]["role"] == "user"):
            # If the last message was user and there's no following assistant message,
            # this user message is part of the current turn's input, not history.
            # This part of the loop structure might need adjustment based on how history is built
            pass # Skip the current user prompt


    # Call your respond function
    with st.spinner("Thinking..."):
        # Your respond function returns ("", updated_chat_history)
        # We only need the updated chat history part
        # We also need to pass the current user prompt as the first argument
        _, updated_gradio_history = respond(prompt, history_for_respond) # Call your core logic

        # The respond function modifies and returns the history in Gradio format.
        # The last entry in updated_gradio_history should be the current turn's (user_prompt, bot_response).
        # We need to extract the bot_response part and add it to Streamlit's session state.

        if updated_gradio_history and updated_gradio_history[-1][0] == prompt:
            bot_response = updated_gradio_history[-1][1]
        else:
            # Fallback if the history structure is unexpected
            bot_response = "Sorry, I couldn't get a response from the model."
            print("Warning: respond function returned history in an unexpected format.")


    # Add assistant response to chat history
    st.session_state.messages.append({"role": "assistant", "content": bot_response})
    # Display assistant response in chat message container
    with st.chat_message("assistant"):
        st.markdown(bot_response)

# You might want a clear history button similar to Gradio's
if st.button("Clear Chat History"):
    st.session_state.messages = []
    st.experimental_rerun() # Rerun the app to clear the display