mohammedelfeky-ai commited on
Commit
fd4101e
·
verified ·
1 Parent(s): 9b8d24a

Update Gradio_UI.py

Browse files
Files changed (1) hide show
  1. Gradio_UI.py +17 -47
Gradio_UI.py CHANGED
@@ -123,16 +123,14 @@ def stream_to_gradio(
123
  final_answer_content = all_step_logs[-1]
124
 
125
  actual_content_for_handling = final_answer_content
126
- # Check if final_answer_content is a wrapper like FinalAnswerStep and extract the core content
127
- if hasattr(final_answer_content, 'final_answer') and not isinstance(final_answer_content, (str, PILImage.Image, tuple)): # Added tuple to avoid unwrapping already formatted image content
128
  actual_content_for_handling = final_answer_content.final_answer
129
  print(f"DEBUG Gradio: Extracted actual_content_for_handling from FinalAnswerStep: {type(actual_content_for_handling)}")
130
 
131
- # Priority 1: Handle raw PIL Image object for direct display
132
  if isinstance(actual_content_for_handling, PILImage.Image):
133
  print("DEBUG Gradio (stream_to_gradio): Actual content IS a raw PIL Image.")
134
  try:
135
- with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp_file: # delete=False is important
136
  actual_content_for_handling.save(tmp_file, format="PNG")
137
  image_path_for_gradio = tmp_file.name
138
  print(f"DEBUG Gradio: Saved PIL image to temp path: {image_path_for_gradio}")
@@ -143,7 +141,6 @@ def stream_to_gradio(
143
  yield {"role": "assistant", "content": f"**Final Answer (Error displaying image):** {e}"}
144
  return
145
 
146
- # Priority 2: Use smolagents' type handling if not a raw PIL image
147
  final_answer_processed = handle_agent_output_types(actual_content_for_handling)
148
  print(f"DEBUG Gradio: final_answer_processed type after handle_agent_output_types: {type(final_answer_processed)}")
149
 
@@ -168,20 +165,15 @@ def stream_to_gradio(
168
  print(f"DEBUG Gradio: {err_msg}")
169
  yield {"role": "assistant", "content": f"**Final Answer ({err_msg})**"}
170
  else:
171
- # This will display the string representation of the object if not specifically handled
172
  yield {"role": "assistant", "content": f"**Final Answer:**\n{str(final_answer_processed)}"}
173
 
174
 
175
  class GradioUI:
176
- def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None): # file_upload_folder kept for potential future use
177
  if not _is_package_available("gradio"):
178
  raise ModuleNotFoundError("Install 'gradio': `pip install 'smolagents[gradio]'`")
179
  self.agent = agent
180
- # self.file_upload_folder = file_upload_folder # Commented out as per request to simplify
181
- # if self.file_upload_folder is not None:
182
- # if not os.path.exists(self.file_upload_folder):
183
- # os.makedirs(self.file_upload_folder, exist_ok=True)
184
- self.file_upload_folder = None # Explicitly disable for now
185
  self._latest_file_path_for_download = None
186
 
187
  def _check_for_created_file(self):
@@ -191,26 +183,22 @@ class GradioUI:
191
  for log_entry in reversed(self.agent.interaction_logs):
192
  if isinstance(log_entry, ActionStep):
193
  observations = getattr(log_entry, 'observations', None)
194
- tool_calls = getattr(log_entry, 'tool_calls', []) # Get tool calls if any
195
 
196
- # Check if this step involved python_interpreter
197
  is_python_interpreter_step = any(tc.name == "python_interpreter" for tc in tool_calls)
198
 
199
  if is_python_interpreter_step and observations and isinstance(observations, str):
200
- print(f"DEBUG Gradio UI (_check_for_file): Python Interpreter Observations: '''{observations[:500]}...'''") # Log snippet
 
201
 
202
- # Regex to find paths specifically printed by our create_document tool
203
- # This pattern expects: "Document created (docx): /tmp/random/generated_document.docx"
204
  match = re.search(
205
- # Capture group 1 is the prefix, group 2 is the path
206
- r"(Document created \((?:docx|pdf|txt)\):|Document converted to PDF:)\s*(/tmp/[a-zA-Z0-9_]+/generated_document\.(?:docx|pdf|txt))",
207
  observations,
208
  re.MULTILINE
209
  )
210
 
211
  if match:
212
- # extracted_prefix = match.group(1) # e.g., "Document created (docx):"
213
- extracted_path = match.group(2) # The actual path
214
  print(f"DEBUG Gradio UI: Regex matched. Extracted path: '{extracted_path}'")
215
  normalized_path = os.path.normpath(extracted_path)
216
  if os.path.exists(normalized_path):
@@ -229,31 +217,21 @@ class GradioUI:
229
 
230
  updated_chat_history = current_chat_history + [{"role": "user", "content": prompt_text}]
231
 
232
- # Initial yield: show user message, hide download components until agent run is complete
233
  yield updated_chat_history, gr.update(visible=False), gr.update(value=None, visible=False)
234
 
235
  agent_responses_for_history = []
236
  for msg_dict in stream_to_gradio(self.agent, task=prompt_text, reset_agent_memory=False):
237
  agent_responses_for_history.append(msg_dict)
238
- # Yield progressively to update chat, keep download components hidden during streaming
239
  yield updated_chat_history + agent_responses_for_history, gr.update(visible=False), gr.update(value=None, visible=False)
240
 
241
- # After all agent messages are processed and added to history
242
- final_chat_display_content = updated_chat_history + agent_responses_for_history
243
-
244
- # Now check for created files to decide visibility of download button
245
  file_found_for_download = self._check_for_created_file()
246
 
 
247
  print(f"DEBUG Gradio: Final chat history for display: {len(final_chat_display_content)} messages. File found for download button: {file_found_for_download}")
248
- # Final yield: update chat, set visibility of download button, keep file display component hidden (it's shown on button click)
249
  yield final_chat_display_content, gr.update(visible=file_found_for_download), gr.update(value=None, visible=False)
250
 
251
-
252
- def log_user_message(self, text_input_value: str): # Removed current_file_uploads as upload is disabled
253
  full_prompt = text_input_value
254
- # if current_file_uploads: # This part is now disabled
255
- # files_str = ", ".join([os.path.basename(f) for f in current_file_uploads])
256
- # full_prompt += f"\n\n[Uploaded files for context: {files_str}]"
257
  print(f"DEBUG Gradio: Prepared prompt for agent: {full_prompt[:300]}...")
258
  return full_prompt, ""
259
 
@@ -269,13 +247,12 @@ class GradioUI:
269
 
270
  def launch(self, **kwargs):
271
  with gr.Blocks(fill_height=True, theme=gr.themes.Soft(primary_hue=gr.themes.colors.blue)) as demo:
272
- # file_uploads_log_state = gr.State([]) # No longer needed as upload is disabled
273
  prepared_prompt_for_agent = gr.State("")
274
 
275
  gr.Markdown("## Smol Talk with your Agent")
276
 
277
  with gr.Row(equal_height=False):
278
- with gr.Column(scale=3): # Main chat column
279
  chatbot_display = gr.Chatbot(
280
  type="messages",
281
  avatar_images=(None, "https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo-round.png"),
@@ -290,26 +267,19 @@ class GradioUI:
290
  show_label=False
291
  )
292
 
293
- with gr.Column(scale=1): # Sidebar column
294
- # --- File Upload Section Removed/Commented Out ---
295
  # if self.file_upload_folder is not None:
296
  # with gr.Accordion("File Upload", open=False):
297
- # file_uploader = gr.File(label="Upload a supporting file (PDF, DOCX, TXT, JPG, PNG)")
298
- # upload_status_text = gr.Textbox(label="Upload Status", interactive=False, lines=1)
299
- # file_uploader.upload(
300
- # self.upload_file,
301
- # [file_uploader, file_uploads_log_state],
302
- # [upload_status_text, file_uploads_log_state],
303
- # )
304
 
305
- with gr.Accordion("Generated File", open=True): # Keep this section
306
  download_action_button = gr.Button("Download Generated File", visible=False)
307
  file_download_display_component = gr.File(label="Downloadable Document", visible=False, interactive=False)
308
 
309
- # Event Handling Chain for Text Submission
310
  text_message_input.submit(
311
  self.log_user_message,
312
- [text_message_input], # Removed file_uploads_log_state from inputs
313
  [prepared_prompt_for_agent, text_message_input]
314
  ).then(
315
  self.interact_with_agent,
 
123
  final_answer_content = all_step_logs[-1]
124
 
125
  actual_content_for_handling = final_answer_content
126
+ if hasattr(final_answer_content, 'final_answer') and not isinstance(final_answer_content, (str, PILImage.Image, tuple)):
 
127
  actual_content_for_handling = final_answer_content.final_answer
128
  print(f"DEBUG Gradio: Extracted actual_content_for_handling from FinalAnswerStep: {type(actual_content_for_handling)}")
129
 
 
130
  if isinstance(actual_content_for_handling, PILImage.Image):
131
  print("DEBUG Gradio (stream_to_gradio): Actual content IS a raw PIL Image.")
132
  try:
133
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp_file:
134
  actual_content_for_handling.save(tmp_file, format="PNG")
135
  image_path_for_gradio = tmp_file.name
136
  print(f"DEBUG Gradio: Saved PIL image to temp path: {image_path_for_gradio}")
 
141
  yield {"role": "assistant", "content": f"**Final Answer (Error displaying image):** {e}"}
142
  return
143
 
 
144
  final_answer_processed = handle_agent_output_types(actual_content_for_handling)
145
  print(f"DEBUG Gradio: final_answer_processed type after handle_agent_output_types: {type(final_answer_processed)}")
146
 
 
165
  print(f"DEBUG Gradio: {err_msg}")
166
  yield {"role": "assistant", "content": f"**Final Answer ({err_msg})**"}
167
  else:
 
168
  yield {"role": "assistant", "content": f"**Final Answer:**\n{str(final_answer_processed)}"}
169
 
170
 
171
  class GradioUI:
172
+ def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None):
173
  if not _is_package_available("gradio"):
174
  raise ModuleNotFoundError("Install 'gradio': `pip install 'smolagents[gradio]'`")
175
  self.agent = agent
176
+ self.file_upload_folder = None # File upload UI is disabled
 
 
 
 
177
  self._latest_file_path_for_download = None
178
 
179
  def _check_for_created_file(self):
 
183
  for log_entry in reversed(self.agent.interaction_logs):
184
  if isinstance(log_entry, ActionStep):
185
  observations = getattr(log_entry, 'observations', None)
186
+ tool_calls = getattr(log_entry, 'tool_calls', [])
187
 
 
188
  is_python_interpreter_step = any(tc.name == "python_interpreter" for tc in tool_calls)
189
 
190
  if is_python_interpreter_step and observations and isinstance(observations, str):
191
+ # CRITICAL DEBUG LINE: Print the exact observations string
192
+ print(f"DEBUG Gradio UI (_check_for_file): Python Interpreter Observations: '''{observations}'''")
193
 
 
 
194
  match = re.search(
195
+ r"(?:Document created \((?:docx|pdf|txt)\):|Document converted to PDF:)\s*(/tmp/[a-zA-Z0-9_]+/generated_document\.(?:docx|pdf|txt))",
 
196
  observations,
197
  re.MULTILINE
198
  )
199
 
200
  if match:
201
+ extracted_path = match.group(1) # Path is group 1 due to simplified regex
 
202
  print(f"DEBUG Gradio UI: Regex matched. Extracted path: '{extracted_path}'")
203
  normalized_path = os.path.normpath(extracted_path)
204
  if os.path.exists(normalized_path):
 
217
 
218
  updated_chat_history = current_chat_history + [{"role": "user", "content": prompt_text}]
219
 
 
220
  yield updated_chat_history, gr.update(visible=False), gr.update(value=None, visible=False)
221
 
222
  agent_responses_for_history = []
223
  for msg_dict in stream_to_gradio(self.agent, task=prompt_text, reset_agent_memory=False):
224
  agent_responses_for_history.append(msg_dict)
 
225
  yield updated_chat_history + agent_responses_for_history, gr.update(visible=False), gr.update(value=None, visible=False)
226
 
 
 
 
 
227
  file_found_for_download = self._check_for_created_file()
228
 
229
+ final_chat_display_content = updated_chat_history + agent_responses_for_history
230
  print(f"DEBUG Gradio: Final chat history for display: {len(final_chat_display_content)} messages. File found for download button: {file_found_for_download}")
 
231
  yield final_chat_display_content, gr.update(visible=file_found_for_download), gr.update(value=None, visible=False)
232
 
233
+ def log_user_message(self, text_input_value: str):
 
234
  full_prompt = text_input_value
 
 
 
235
  print(f"DEBUG Gradio: Prepared prompt for agent: {full_prompt[:300]}...")
236
  return full_prompt, ""
237
 
 
247
 
248
  def launch(self, **kwargs):
249
  with gr.Blocks(fill_height=True, theme=gr.themes.Soft(primary_hue=gr.themes.colors.blue)) as demo:
 
250
  prepared_prompt_for_agent = gr.State("")
251
 
252
  gr.Markdown("## Smol Talk with your Agent")
253
 
254
  with gr.Row(equal_height=False):
255
+ with gr.Column(scale=3):
256
  chatbot_display = gr.Chatbot(
257
  type="messages",
258
  avatar_images=(None, "https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo-round.png"),
 
267
  show_label=False
268
  )
269
 
270
+ with gr.Column(scale=1):
271
+ # File Upload UI is disabled
272
  # if self.file_upload_folder is not None:
273
  # with gr.Accordion("File Upload", open=False):
274
+ # # ... (upload components would be here)
 
 
 
 
 
 
275
 
276
+ with gr.Accordion("Generated File", open=True):
277
  download_action_button = gr.Button("Download Generated File", visible=False)
278
  file_download_display_component = gr.File(label="Downloadable Document", visible=False, interactive=False)
279
 
 
280
  text_message_input.submit(
281
  self.log_user_message,
282
+ [text_message_input], # Only text_input as file uploads are disabled
283
  [prepared_prompt_for_agent, text_message_input]
284
  ).then(
285
  self.interact_with_agent,