Update agent.py
Browse files
agent.py
CHANGED
@@ -25,6 +25,9 @@ import requests
|
|
25 |
import logging
|
26 |
from llama_index.core.workflow import Context
|
27 |
from llama_index.core.agent.workflow import AgentStream
|
|
|
|
|
|
|
28 |
|
29 |
wandb_callback = WandbCallbackHandler(run_args={"project": "gaia-llamaindex-agents"})
|
30 |
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
|
@@ -60,6 +63,7 @@ Settings.embed_model = embed_model
|
|
60 |
Settings.callback_manager = callback_manager
|
61 |
|
62 |
|
|
|
63 |
class EnhancedRAGQueryEngine:
|
64 |
def __init__(self, task_context: str = ""):
|
65 |
self.task_context = task_context
|
@@ -71,10 +75,12 @@ class EnhancedRAGQueryEngine:
|
|
71 |
'.docx': DocxReader(),
|
72 |
'.doc': DocxReader(),
|
73 |
'.csv': CSVReader(),
|
74 |
-
'.txt': lambda file_path: [Document(text=open(file_path, 'r').read())],
|
75 |
'.jpg': ImageReader(),
|
76 |
'.jpeg': ImageReader(),
|
77 |
-
'.png': ImageReader()
|
|
|
|
|
78 |
}
|
79 |
|
80 |
self.sentence_window_parser = SentenceWindowNodeParser.from_defaults(
|
@@ -101,13 +107,18 @@ class EnhancedRAGQueryEngine:
|
|
101 |
else:
|
102 |
docs = reader.load_data(file=file_path)
|
103 |
|
|
|
|
|
|
|
|
|
104 |
# Add metadata to all documents
|
105 |
for doc in docs:
|
106 |
-
doc
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
|
|
111 |
documents.extend(docs)
|
112 |
|
113 |
except Exception as e:
|
@@ -119,8 +130,8 @@ class EnhancedRAGQueryEngine:
|
|
119 |
text=content,
|
120 |
metadata={"file_path": file_path, "file_type": "text", "error": str(e)}
|
121 |
))
|
122 |
-
except:
|
123 |
-
print(f"Failed to process {file_path}: {e}")
|
124 |
|
125 |
return documents
|
126 |
|
@@ -140,8 +151,7 @@ class EnhancedRAGQueryEngine:
|
|
140 |
def create_context_aware_query_engine(self, index: VectorStoreIndex):
|
141 |
retriever = VectorIndexRetriever(
|
142 |
index=index,
|
143 |
-
similarity_top_k=10
|
144 |
-
embed_model=self.embed_model
|
145 |
)
|
146 |
|
147 |
query_engine = RetrieverQueryEngine(
|
@@ -152,6 +162,121 @@ class EnhancedRAGQueryEngine:
|
|
152 |
|
153 |
return query_engine
|
154 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
def comprehensive_rag_analysis(file_paths: List[str], query: str, task_context: str = "") -> str:
|
156 |
try:
|
157 |
rag_engine = EnhancedRAGQueryEngine(task_context)
|
@@ -220,7 +345,7 @@ def cross_document_analysis(file_paths: List[str], query: str, task_context: str
|
|
220 |
enhanced_rag_tool = FunctionTool.from_defaults(
|
221 |
fn=comprehensive_rag_analysis,
|
222 |
name="Enhanced RAG Analysis",
|
223 |
-
description="Comprehensive document analysis using advanced RAG with
|
224 |
)
|
225 |
|
226 |
cross_document_tool = FunctionTool.from_defaults(
|
@@ -232,7 +357,7 @@ cross_document_tool = FunctionTool.from_defaults(
|
|
232 |
# Analysis Agent
|
233 |
analysis_agent = FunctionAgent(
|
234 |
name="AnalysisAgent",
|
235 |
-
description="Advanced multimodal analysis using enhanced RAG
|
236 |
system_prompt="""
|
237 |
You are an advanced analysis specialist with access to:
|
238 |
- Enhanced RAG with hybrid search and reranking
|
@@ -247,7 +372,7 @@ analysis_agent = FunctionAgent(
|
|
247 |
4. Extract precise information with source attribution
|
248 |
5. Handle both text and visual content analysis
|
249 |
|
250 |
-
Always consider the
|
251 |
""",
|
252 |
llm=proj_llm,
|
253 |
tools=[enhanced_rag_tool, cross_document_tool],
|
@@ -261,7 +386,7 @@ class IntelligentSourceRouter:
|
|
261 |
self.duckduckgo_tool = DuckDuckGoSearchToolSpec().to_tool_list()[0]
|
262 |
|
263 |
def route_and_search(self, query: str) -> str:
|
264 |
-
"""Simple routing between academic and general search"""
|
265 |
|
266 |
# Quick intent detection
|
267 |
intent_prompt = f"""
|
@@ -276,18 +401,23 @@ class IntelligentSourceRouter:
|
|
276 |
|
277 |
try:
|
278 |
if source == "arxiv":
|
279 |
-
|
|
|
|
|
|
|
280 |
else:
|
281 |
result = self.duckduckgo_tool.call(query=query)
|
282 |
if isinstance(result, list):
|
283 |
-
|
|
|
|
|
284 |
return str(result)
|
285 |
except Exception as e:
|
286 |
return f"Search failed: {str(e)}"
|
287 |
|
288 |
# Simple research function
|
289 |
def research_tool_function(query: str) -> str:
|
290 |
-
"""
|
291 |
router = IntelligentSourceRouter()
|
292 |
return router.route_and_search(query)
|
293 |
|
@@ -295,16 +425,16 @@ def research_tool_function(query: str) -> str:
|
|
295 |
research_tool = FunctionTool.from_defaults(
|
296 |
fn=research_tool_function,
|
297 |
name="research_tool",
|
298 |
-
description="""Intelligent
|
299 |
|
300 |
**When to Use:**
|
301 |
- Questions requiring external knowledge beyond training data
|
302 |
- Current or recent information (post-training cutoff)
|
303 |
- Scientific research requiring academic sources
|
304 |
- Factual verification of specific claims
|
305 |
-
- Any question where
|
306 |
|
307 |
-
Simply provide your question and get
|
308 |
)
|
309 |
|
310 |
def execute_python_code(code: str) -> str:
|
@@ -486,11 +616,11 @@ code_tool = FunctionTool.from_defaults(
|
|
486 |
- **Self-debugging**: Identifies and fixes errors through iterative refinement
|
487 |
- **Library Integration**: Leverages numpy, pandas, matplotlib, scipy, sklearn, and other scientific libraries
|
488 |
- **Result Verification**: Validates outputs and adjusts approach as needed
|
489 |
-
|
490 |
**When to Use:**
|
491 |
- Mathematical calculations requiring step-by-step computation
|
492 |
- Data analysis and statistical processing
|
493 |
-
- Algorithm implementation and
|
494 |
- Numerical simulations and modeling
|
495 |
- Text processing and pattern analysis
|
496 |
- Complex logical operations requiring code verification
|
@@ -520,30 +650,23 @@ class EnhancedGAIAAgent:
|
|
520 |
|
521 |
**1. analysis_tool** - Advanced multimodal document analysis specialist
|
522 |
- Use for: PDF, Word, CSV, image file analysis
|
523 |
-
- Capabilities: Extract data from tables/charts, cross-reference documents, semantic search
|
524 |
- When to use: Questions with file attachments, document analysis, data extraction
|
525 |
|
526 |
**2. research_tool** - Intelligent research specialist with automatic routing
|
527 |
- Use for: External knowledge, current events, scientific papers
|
528 |
-
- Capabilities: Auto-routes between ArXiv (scientific) and web search (general), answers the question
|
529 |
- When to use: Questions requiring external knowledge, factual verification, current information
|
530 |
|
531 |
**3. code_tool** - Advanced computational specialist using ReAct reasoning
|
532 |
- Use for: Mathematical calculations, data processing, logical operations
|
533 |
-
- Capabilities: Generates and executes Python
|
534 |
- When to use: Precise calculations, data manipulation, mathematical problem solving
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
CRITICAL: Your final answer must be EXACT and CONCISE as required by GAIA format:
|
540 |
-
- For numbers: provide only the number (e.g., "42" or "3.14")
|
541 |
-
- For strings: provide only the exact string (e.g., "Paris" or "Einstein")
|
542 |
-
- For lists: use comma separation (e.g., "apple, banana, orange")
|
543 |
-
- NO explanations, NO additional text, ONLY the precise answer
|
544 |
""",
|
545 |
llm=proj_llm,
|
546 |
-
tools=[analysis_tool, research_tool, code_tool],
|
547 |
max_steps=10,
|
548 |
verbose = True,
|
549 |
callback_manager=callback_manager,
|
@@ -634,8 +757,8 @@ class EnhancedGAIAAgent:
|
|
634 |
{'File downloaded: ' + file_path if file_path else 'No additional files referenced'}
|
635 |
|
636 |
Additionnal instructions to system prompt :
|
637 |
-
1. If a file is available, use the analysis_tool
|
638 |
-
2. If a link is in the question, use the research_tool.
|
639 |
"""
|
640 |
|
641 |
try:
|
|
|
25 |
import logging
|
26 |
from llama_index.core.workflow import Context
|
27 |
from llama_index.core.agent.workflow import AgentStream
|
28 |
+
from llama_index.readers.web import TrafilaturaWebReader
|
29 |
+
from llama_index.readers.youtube_transcript import YoutubeTranscriptReader
|
30 |
+
|
31 |
|
32 |
wandb_callback = WandbCallbackHandler(run_args={"project": "gaia-llamaindex-agents"})
|
33 |
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
|
|
|
63 |
Settings.callback_manager = callback_manager
|
64 |
|
65 |
|
66 |
+
|
67 |
class EnhancedRAGQueryEngine:
|
68 |
def __init__(self, task_context: str = ""):
|
69 |
self.task_context = task_context
|
|
|
75 |
'.docx': DocxReader(),
|
76 |
'.doc': DocxReader(),
|
77 |
'.csv': CSVReader(),
|
78 |
+
'.txt': lambda file_path: [Document(text=open(file_path, 'r', encoding='utf-8').read())],
|
79 |
'.jpg': ImageReader(),
|
80 |
'.jpeg': ImageReader(),
|
81 |
+
'.png': ImageReader(),
|
82 |
+
'web': TrafilaturaWebReader(),
|
83 |
+
'youtube': YoutubeTranscriptReader()
|
84 |
}
|
85 |
|
86 |
self.sentence_window_parser = SentenceWindowNodeParser.from_defaults(
|
|
|
107 |
else:
|
108 |
docs = reader.load_data(file=file_path)
|
109 |
|
110 |
+
# Ensure docs is a list
|
111 |
+
if not isinstance(docs, list):
|
112 |
+
docs = [docs]
|
113 |
+
|
114 |
# Add metadata to all documents
|
115 |
for doc in docs:
|
116 |
+
if hasattr(doc, 'metadata'):
|
117 |
+
doc.metadata.update({
|
118 |
+
"file_path": file_path,
|
119 |
+
"file_type": file_ext[1:],
|
120 |
+
"task_context": self.task_context
|
121 |
+
})
|
122 |
documents.extend(docs)
|
123 |
|
124 |
except Exception as e:
|
|
|
130 |
text=content,
|
131 |
metadata={"file_path": file_path, "file_type": "text", "error": str(e)}
|
132 |
))
|
133 |
+
except Exception as fallback_error:
|
134 |
+
print(f"Failed to process {file_path}: {e}, Fallback error: {fallback_error}")
|
135 |
|
136 |
return documents
|
137 |
|
|
|
151 |
def create_context_aware_query_engine(self, index: VectorStoreIndex):
|
152 |
retriever = VectorIndexRetriever(
|
153 |
index=index,
|
154 |
+
similarity_top_k=10
|
|
|
155 |
)
|
156 |
|
157 |
query_engine = RetrieverQueryEngine(
|
|
|
162 |
|
163 |
return query_engine
|
164 |
|
165 |
+
class HybridWebRAGTool:
|
166 |
+
def __init__(self, rag_engine: EnhancedRAGQueryEngine):
|
167 |
+
self.duckduckgo_tool = DuckDuckGoSearchToolSpec().to_tool_list()[0]
|
168 |
+
self.rag_engine = rag_engine
|
169 |
+
|
170 |
+
def is_youtube_url(self, url: str) -> bool:
|
171 |
+
"""Check if URL is a YouTube video"""
|
172 |
+
return 'youtube.com/watch' in url or 'youtu.be/' in url
|
173 |
+
|
174 |
+
def search_and_analyze(self, query: str, max_results: int = 3) -> str:
|
175 |
+
"""Search web and analyze content with RAG, including YouTube support"""
|
176 |
+
|
177 |
+
try:
|
178 |
+
# Step 1: Get URLs from DuckDuckGo
|
179 |
+
search_results = self.duckduckgo_tool.call(query=query, max_results=max_results)
|
180 |
+
|
181 |
+
if isinstance(search_results, list):
|
182 |
+
urls = [r.get('href', '') for r in search_results if r.get('href')]
|
183 |
+
else:
|
184 |
+
return f"Search failed: {search_results}"
|
185 |
+
|
186 |
+
if not urls:
|
187 |
+
return "No URLs found in search results"
|
188 |
+
|
189 |
+
# Step 2: Process URLs based on type
|
190 |
+
web_documents = []
|
191 |
+
youtube_urls = []
|
192 |
+
regular_urls = []
|
193 |
+
|
194 |
+
# Separate YouTube URLs from regular web URLs
|
195 |
+
for url in urls:
|
196 |
+
if self.is_youtube_url(url):
|
197 |
+
youtube_urls.append(url)
|
198 |
+
else:
|
199 |
+
regular_urls.append(url)
|
200 |
+
|
201 |
+
# Process YouTube videos
|
202 |
+
if youtube_urls:
|
203 |
+
try:
|
204 |
+
youtube_docs = self.rag_engine.readers['youtube'].load_data(youtube_urls)
|
205 |
+
if isinstance(youtube_docs, list):
|
206 |
+
web_documents.extend(youtube_docs)
|
207 |
+
else:
|
208 |
+
web_documents.append(youtube_docs)
|
209 |
+
except Exception as e:
|
210 |
+
print(f"Failed to load YouTube videos: {e}")
|
211 |
+
|
212 |
+
# Process regular web pages
|
213 |
+
for url in regular_urls:
|
214 |
+
try:
|
215 |
+
docs = self.rag_engine.readers['web'].load_data([url])
|
216 |
+
if isinstance(docs, list):
|
217 |
+
web_documents.extend(docs)
|
218 |
+
else:
|
219 |
+
web_documents.append(docs)
|
220 |
+
except Exception as e:
|
221 |
+
print(f"Failed to load {url}: {e}")
|
222 |
+
continue
|
223 |
+
|
224 |
+
if not web_documents:
|
225 |
+
return "No content could be extracted from URLs"
|
226 |
+
|
227 |
+
# Step 3: Create temporary index and query
|
228 |
+
temp_index = self.rag_engine.create_advanced_index(web_documents)
|
229 |
+
|
230 |
+
# Step 4: Query the indexed content
|
231 |
+
query_engine = self.rag_engine.create_context_aware_query_engine(temp_index)
|
232 |
+
|
233 |
+
response = query_engine.query(query)
|
234 |
+
|
235 |
+
# Add source information
|
236 |
+
source_info = []
|
237 |
+
if youtube_urls:
|
238 |
+
source_info.append(f"YouTube videos: {len(youtube_urls)}")
|
239 |
+
if regular_urls:
|
240 |
+
source_info.append(f"Web pages: {len(regular_urls)}")
|
241 |
+
|
242 |
+
return f"{str(response)}\n\nSources analyzed: {', '.join(source_info)}"
|
243 |
+
|
244 |
+
except Exception as e:
|
245 |
+
return f"Error in hybrid search: {str(e)}"
|
246 |
+
|
247 |
+
# Create the research tool function
|
248 |
+
def research_tool_function(query: str) -> str:
|
249 |
+
"""Combines DuckDuckGo search with RAG analysis of web content and YouTube videos"""
|
250 |
+
try:
|
251 |
+
rag_engine = EnhancedRAGQueryEngine()
|
252 |
+
hybrid_tool = HybridWebRAGTool(rag_engine)
|
253 |
+
return hybrid_tool.search_and_analyze(query)
|
254 |
+
except Exception as e:
|
255 |
+
return f"Research tool error: {str(e)}"
|
256 |
+
|
257 |
+
# Create the research tool for your agent
|
258 |
+
research_tool = FunctionTool.from_defaults(
|
259 |
+
fn=research_tool_function,
|
260 |
+
name="research_tool",
|
261 |
+
description="""Advanced research tool that combines web search with RAG analysis, supporting both web pages and YouTube videos, with context-aware processing.
|
262 |
+
|
263 |
+
**When to Use:**
|
264 |
+
- Questions requiring external knowledge beyond training data
|
265 |
+
- Current or recent information (post-training cutoff)
|
266 |
+
- Scientific research requiring academic sources
|
267 |
+
- Factual verification of specific claims
|
268 |
+
- Any question where search results could provide the exact answer
|
269 |
+
- Research involving video content and tutorials
|
270 |
+
- Complex queries needing synthesis of multiple sources
|
271 |
+
|
272 |
+
**Advantages:**
|
273 |
+
- Full content analysis from both web and video sources
|
274 |
+
- Automatic content type detection and processing
|
275 |
+
- Semantic search within retrieved content
|
276 |
+
- Reranking for relevance across all source types
|
277 |
+
- Comprehensive synthesis of multimedia information"""
|
278 |
+
)
|
279 |
+
|
280 |
def comprehensive_rag_analysis(file_paths: List[str], query: str, task_context: str = "") -> str:
|
281 |
try:
|
282 |
rag_engine = EnhancedRAGQueryEngine(task_context)
|
|
|
345 |
enhanced_rag_tool = FunctionTool.from_defaults(
|
346 |
fn=comprehensive_rag_analysis,
|
347 |
name="Enhanced RAG Analysis",
|
348 |
+
description="Comprehensive document analysis using advanced RAG with context-aware processing"
|
349 |
)
|
350 |
|
351 |
cross_document_tool = FunctionTool.from_defaults(
|
|
|
357 |
# Analysis Agent
|
358 |
analysis_agent = FunctionAgent(
|
359 |
name="AnalysisAgent",
|
360 |
+
description="Advanced multimodal analysis using enhanced RAG and cross-document capabilities",
|
361 |
system_prompt="""
|
362 |
You are an advanced analysis specialist with access to:
|
363 |
- Enhanced RAG with hybrid search and reranking
|
|
|
372 |
4. Extract precise information with source attribution
|
373 |
5. Handle both text and visual content analysis
|
374 |
|
375 |
+
Always consider the task context and provide precise, well-sourced answers.
|
376 |
""",
|
377 |
llm=proj_llm,
|
378 |
tools=[enhanced_rag_tool, cross_document_tool],
|
|
|
386 |
self.duckduckgo_tool = DuckDuckGoSearchToolSpec().to_tool_list()[0]
|
387 |
|
388 |
def route_and_search(self, query: str) -> str:
|
389 |
+
"""Simple routing between academic and general search - returns URLs only"""
|
390 |
|
391 |
# Quick intent detection
|
392 |
intent_prompt = f"""
|
|
|
401 |
|
402 |
try:
|
403 |
if source == "arxiv":
|
404 |
+
# ArXiv results typically contain URLs in the response text
|
405 |
+
arxiv_result = self.arxiv_tool.call(query=query)
|
406 |
+
# Extract URLs from ArXiv response (you may need to parse this based on actual format)
|
407 |
+
return str(arxiv_result) # ArXiv tool should return URLs
|
408 |
else:
|
409 |
result = self.duckduckgo_tool.call(query=query)
|
410 |
if isinstance(result, list):
|
411 |
+
# Extract only URLs from search results
|
412 |
+
urls = [r.get('href', '') for r in result if r.get('href')]
|
413 |
+
return "\n".join(urls)
|
414 |
return str(result)
|
415 |
except Exception as e:
|
416 |
return f"Search failed: {str(e)}"
|
417 |
|
418 |
# Simple research function
|
419 |
def research_tool_function(query: str) -> str:
|
420 |
+
"""Returns URLs for queries using intelligent source routing"""
|
421 |
router = IntelligentSourceRouter()
|
422 |
return router.route_and_search(query)
|
423 |
|
|
|
425 |
research_tool = FunctionTool.from_defaults(
|
426 |
fn=research_tool_function,
|
427 |
name="research_tool",
|
428 |
+
description="""Intelligent URL finder that routes between academic (ArXiv) and general (web) search sources to return relevant URLs.
|
429 |
|
430 |
**When to Use:**
|
431 |
- Questions requiring external knowledge beyond training data
|
432 |
- Current or recent information (post-training cutoff)
|
433 |
- Scientific research requiring academic sources
|
434 |
- Factual verification of specific claims
|
435 |
+
- Any question where you need URLs to relevant sources
|
436 |
|
437 |
+
Simply provide your question and get URLs to visit for further reading."""
|
438 |
)
|
439 |
|
440 |
def execute_python_code(code: str) -> str:
|
|
|
616 |
- **Self-debugging**: Identifies and fixes errors through iterative refinement
|
617 |
- **Library Integration**: Leverages numpy, pandas, matplotlib, scipy, sklearn, and other scientific libraries
|
618 |
- **Result Verification**: Validates outputs and adjusts approach as needed
|
619 |
+
|
620 |
**When to Use:**
|
621 |
- Mathematical calculations requiring step-by-step computation
|
622 |
- Data analysis and statistical processing
|
623 |
+
- Algorithm implementation, optimization and execution
|
624 |
- Numerical simulations and modeling
|
625 |
- Text processing and pattern analysis
|
626 |
- Complex logical operations requiring code verification
|
|
|
650 |
|
651 |
**1. analysis_tool** - Advanced multimodal document analysis specialist
|
652 |
- Use for: PDF, Word, CSV, image file analysis
|
|
|
653 |
- When to use: Questions with file attachments, document analysis, data extraction
|
654 |
|
655 |
**2. research_tool** - Intelligent research specialist with automatic routing
|
656 |
- Use for: External knowledge, current events, scientific papers
|
|
|
657 |
- When to use: Questions requiring external knowledge, factual verification, current information
|
658 |
|
659 |
**3. code_tool** - Advanced computational specialist using ReAct reasoning
|
660 |
- Use for: Mathematical calculations, data processing, logical operations
|
661 |
+
- Capabilities: Generates and executes Python, handles complex computations, step-by-step problem solving
|
662 |
- When to use: Precise calculations, data manipulation, mathematical problem solving
|
663 |
+
|
664 |
+
**4. code_execution_tool** - Use only to execute .py file
|
665 |
+
|
666 |
+
CRITICAL: Your final answer must be EXACT and CONCISE as required by GAIA format : NO explanations, NO additional text, ONLY the precise answer
|
|
|
|
|
|
|
|
|
|
|
667 |
""",
|
668 |
llm=proj_llm,
|
669 |
+
tools=[analysis_tool, research_tool, code_tool, code_execution_tool],
|
670 |
max_steps=10,
|
671 |
verbose = True,
|
672 |
callback_manager=callback_manager,
|
|
|
757 |
{'File downloaded: ' + file_path if file_path else 'No additional files referenced'}
|
758 |
|
759 |
Additionnal instructions to system prompt :
|
760 |
+
1. If a file is available, use the analysis_tool (except for .py files).
|
761 |
+
2. If a link is in the question, use the research_tool.
|
762 |
"""
|
763 |
|
764 |
try:
|