Futuresony commited on
Commit
f4a8e2e
·
verified ·
1 Parent(s): 861abb8

Update streamlit_app.py

Browse files
Files changed (1) hide show
  1. streamlit_app.py +10 -12
streamlit_app.py CHANGED
@@ -46,29 +46,27 @@ if prompt := st.chat_input("Your Question"):
46
  # Gradio's history is [(user, bot), (user, bot), ...]
47
  # Streamlit's session state is a list of dicts [{"role": "user", "content": "..."}]
48
  # We need to convert Streamlit's history format to Gradio's format for your respond function
49
- gradio_chat_history = []
50
- # Start from the second message if the first was from the system/initial state
51
- # Or just iterate through pairs, skipping the latest user prompt for history pass
52
- # The respond function expects history *before* the current turn
53
  history_for_respond = []
54
  # Iterate through messages, excluding the very last user prompt which is the current input
55
  for i in range(len(st.session_state.messages) - 1):
56
  if st.session_state.messages[i]["role"] == "user" and st.session_state.messages[i+1]["role"] == "assistant":
57
  history_for_respond.append((st.session_state.messages[i]["content"], st.session_state.messages[i+1]["content"]))
58
- # Handle cases where the last turn was user, but no assistant response yet (e.g., previous session)
59
- elif st.session_state.messages[i]["role"] == "user" and (i+1 == len(st.session_state.messages) or st.session_state.messages[i+1]["role"] == "user"):
60
- # If the last message was user and there's no following assistant message,
61
- # this user message is part of the current turn's input, not history.
62
- # This part of the loop structure might need adjustment based on how history is built
63
- pass # Skip the current user prompt
64
-
65
 
66
  # Call your respond function
67
  with st.spinner("Thinking..."):
68
  # Your respond function returns ("", updated_chat_history)
69
  # We only need the updated chat history part
70
  # We also need to pass the current user prompt as the first argument
71
- _, updated_gradio_history = respond(prompt, history_for_respond) # Call your core logic
 
 
 
 
 
 
 
 
 
72
 
73
  # The respond function modifies and returns the history in Gradio format.
74
  # The last entry in updated_gradio_history should be the current turn's (user_prompt, bot_response).
 
46
  # Gradio's history is [(user, bot), (user, bot), ...]
47
  # Streamlit's session state is a list of dicts [{"role": "user", "content": "..."}]
48
  # We need to convert Streamlit's history format to Gradio's format for your respond function
 
 
 
 
49
  history_for_respond = []
50
  # Iterate through messages, excluding the very last user prompt which is the current input
51
  for i in range(len(st.session_state.messages) - 1):
52
  if st.session_state.messages[i]["role"] == "user" and st.session_state.messages[i+1]["role"] == "assistant":
53
  history_for_respond.append((st.session_state.messages[i]["content"], st.session_state.messages[i+1]["content"]))
 
 
 
 
 
 
 
54
 
55
  # Call your respond function
56
  with st.spinner("Thinking..."):
57
  # Your respond function returns ("", updated_chat_history)
58
  # We only need the updated chat history part
59
  # We also need to pass the current user prompt as the first argument
60
+ # Ensure we are unpacking the result correctly into two variables
61
+ response_tuple = respond(prompt, history_for_respond)
62
+ # Check if the returned value is a tuple with at least two elements
63
+ if isinstance(response_tuple, tuple) and len(response_tuple) >= 2:
64
+ _, updated_gradio_history = response_tuple
65
+ else:
66
+ # Handle the case where the function did not return the expected tuple
67
+ print(f"Warning: respond function returned unexpected value: {response_tuple}")
68
+ updated_gradio_history = history_for_respond + [(prompt, "Sorry, I couldn't get a valid response.")]
69
+
70
 
71
  # The respond function modifies and returns the history in Gradio format.
72
  # The last entry in updated_gradio_history should be the current turn's (user_prompt, bot_response).