Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -589,11 +589,12 @@ Available: {match.get('Available', 'N/A')}"""
|
|
589 |
final_response = "..." # Indicate potentially empty response
|
590 |
|
591 |
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
|
596 |
-
|
|
|
597 |
cleaned_response = final_response
|
598 |
# Filter out the Pass 2 instructions and context markers that might bleed through
|
599 |
lines = cleaned_response.split('\n')
|
@@ -608,7 +609,7 @@ Available: {match.get('Available', 'N/A')}"""
|
|
608 |
# Extract and list URLs from the search results that were actually used
|
609 |
# This assumes the model uses the provided snippets with URLs
|
610 |
urls_to_list = [result.get('href') for result in search_results_dicts if result.get('href')]
|
611 |
-
urls_to_list = list(dict.fromkeys(urls_to_list)) # Remove duplicates
|
612 |
|
613 |
# Only add Sources if search was performed AND results were found
|
614 |
if search_results_dicts and urls_to_list:
|
@@ -622,6 +623,9 @@ Available: {match.get('Available', 'N/A')}"""
|
|
622 |
print("Warning: Final response was empty after cleaning.")
|
623 |
|
624 |
|
|
|
|
|
|
|
625 |
else: # Model or tokenizer not loaded
|
626 |
final_response = "Sorry, the core language model is not available."
|
627 |
print("Error: LLM model or tokenizer not loaded for Pass 2.")
|
@@ -639,6 +643,7 @@ if len(chat_history) > max_history_pairs:
|
|
639 |
|
640 |
|
641 |
# Return the updated history state
|
|
|
642 |
return "", chat_history # Return empty string for the input box, and the updated history
|
643 |
|
644 |
# --- Gradio Interface ---
|
|
|
589 |
final_response = "..." # Indicate potentially empty response
|
590 |
|
591 |
|
592 |
+
except Exception as gen_error: # <--- Error occurred here previously
|
593 |
+
print(f"Error during model generation in Pass 2: {gen_error}")
|
594 |
+
final_response = "Error generating response in Pass 2."
|
595 |
|
596 |
+
|
597 |
+
# --- Post-process Final Response from Pass 2 ---
|
598 |
cleaned_response = final_response
|
599 |
# Filter out the Pass 2 instructions and context markers that might bleed through
|
600 |
lines = cleaned_response.split('\n')
|
|
|
609 |
# Extract and list URLs from the search results that were actually used
|
610 |
# This assumes the model uses the provided snippets with URLs
|
611 |
urls_to_list = [result.get('href') for result in search_results_dicts if result.get('href')]
|
612 |
+
urls_to_list = list(dict.fromkeys(urls_to_list)) # Remove duplicates # <-- THIS LINE WAS THE SOURCE OF THE PREVIOUS SYNTAX ERROR
|
613 |
|
614 |
# Only add Sources if search was performed AND results were found
|
615 |
if search_results_dicts and urls_to_list:
|
|
|
623 |
print("Warning: Final response was empty after cleaning.")
|
624 |
|
625 |
|
626 |
+
# This 'else' block is tied to the 'if model is not None and tokenizer is not None:' check much earlier in the function
|
627 |
+
# It seems correctly placed as a fallback if models didn't load at the start.
|
628 |
+
# Make sure the indentation aligns with that outer 'if'.
|
629 |
else: # Model or tokenizer not loaded
|
630 |
final_response = "Sorry, the core language model is not available."
|
631 |
print("Error: LLM model or tokenizer not loaded for Pass 2.")
|
|
|
643 |
|
644 |
|
645 |
# Return the updated history state
|
646 |
+
# This return statement is part of the 'respond' function
|
647 |
return "", chat_history # Return empty string for the input box, and the updated history
|
648 |
|
649 |
# --- Gradio Interface ---
|