Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -16,15 +16,16 @@ system_prompt = (
|
|
16 |
# 🛑 Function to remove any detected code snippets.
|
17 |
def remove_code_snippets(text):
|
18 |
"""Removes potential code snippets from the output."""
|
19 |
-
pattern = r"
|
20 |
-
|
|
|
21 |
|
22 |
# ✅ Use only the final_answer tool.
|
23 |
final_answer = FinalAnswerTool()
|
24 |
|
25 |
-
# 🎯 Model setup
|
26 |
model = HfApiModel(
|
27 |
-
max_tokens=2096
|
28 |
temperature=0.5,
|
29 |
model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-32B',
|
30 |
custom_role_conversions=None,
|
@@ -34,11 +35,11 @@ model = HfApiModel(
|
|
34 |
with open("prompts.yaml", 'r') as stream:
|
35 |
prompt_templates = yaml.safe_load(stream)
|
36 |
|
37 |
-
# 🔄 Initialize CodeAgent (
|
38 |
agent = CodeAgent(
|
39 |
model=model,
|
40 |
tools=[final_answer], # Restricted to final advice only.
|
41 |
-
max_steps=6
|
42 |
verbosity_level=1,
|
43 |
grammar=None,
|
44 |
planning_interval=None,
|
@@ -49,9 +50,28 @@ agent = CodeAgent(
|
|
49 |
|
50 |
# 🛑 Run agent with filtering.
|
51 |
def run_agent(user_input):
|
52 |
-
"""Runs the agent
|
53 |
raw_response = agent.run(user_input)
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
return clean_response
|
56 |
|
57 |
# 🚀 Launch the Gradio UI.
|
|
|
16 |
# 🛑 Function to remove any detected code snippets.
|
17 |
def remove_code_snippets(text):
|
18 |
"""Removes potential code snippets from the output."""
|
19 |
+
pattern = r"```[\s\S]+?```|`[^`]+`|(?:\bimport\b|\bdef\b|\bclass\b|\breturn\b|\bprint\b|\bif\b|\belse\b|\btry\b|\bexcept\b|\bwhile\b|\bfor\b)"
|
20 |
+
clean_text = re.sub(pattern, "", text, flags=re.DOTALL | re.IGNORECASE).strip()
|
21 |
+
return clean_text if clean_text else "Invalid response generated. Please try again."
|
22 |
|
23 |
# ✅ Use only the final_answer tool.
|
24 |
final_answer = FinalAnswerTool()
|
25 |
|
26 |
+
# 🎯 Model setup with reduced token limit to prevent long outputs
|
27 |
model = HfApiModel(
|
28 |
+
max_tokens=1024, # Reduced from 2096 to prevent long responses
|
29 |
temperature=0.5,
|
30 |
model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-32B',
|
31 |
custom_role_conversions=None,
|
|
|
35 |
with open("prompts.yaml", 'r') as stream:
|
36 |
prompt_templates = yaml.safe_load(stream)
|
37 |
|
38 |
+
# 🔄 Initialize CodeAgent (blocking code extraction)
|
39 |
agent = CodeAgent(
|
40 |
model=model,
|
41 |
tools=[final_answer], # Restricted to final advice only.
|
42 |
+
max_steps=4, # Reduced from 6 to minimize infinite loops
|
43 |
verbosity_level=1,
|
44 |
grammar=None,
|
45 |
planning_interval=None,
|
|
|
50 |
|
51 |
# 🛑 Run agent with filtering.
|
52 |
def run_agent(user_input):
|
53 |
+
"""Runs the agent, ensuring valid output and blocking infinite loops."""
|
54 |
raw_response = agent.run(user_input)
|
55 |
+
|
56 |
+
# Debugging: Print raw response
|
57 |
+
print("Raw Agent Response:", raw_response)
|
58 |
+
|
59 |
+
# Prevent infinite loops and blank responses
|
60 |
+
if not raw_response.strip():
|
61 |
+
return "I'm sorry, but I couldn't generate a response. Please try again."
|
62 |
+
|
63 |
+
# Ensure only final_answer is used
|
64 |
+
if "final_answer" not in raw_response.lower():
|
65 |
+
return "Error: The response did not use the `final_answer` tool. Please try again."
|
66 |
+
|
67 |
+
# Remove code snippets
|
68 |
+
clean_response = remove_code_snippets(raw_response)
|
69 |
+
|
70 |
+
# Detect and prevent repeated phrases (possible infinite loop indicator)
|
71 |
+
words = clean_response.split()
|
72 |
+
if len(set(words)) < 5: # Arbitrary threshold for detecting repetition
|
73 |
+
return "I'm unable to generate a meaningful response. Please refine your query."
|
74 |
+
|
75 |
return clean_response
|
76 |
|
77 |
# 🚀 Launch the Gradio UI.
|