helloparthshah commited on
Commit
60ee681
·
1 Parent(s): 8b86bcd

Fixing get website tool

Browse files
main.py CHANGED
@@ -180,7 +180,7 @@ no_auth = args.no_auth
180
 
181
  with gr.Blocks(title="HASHIRU AI", css=css, fill_width=True, fill_height=True) as demo:
182
  model_manager = GeminiManager(
183
- gemini_model="gemini-2.0-flash", modes=[mode for mode in Mode])
184
 
185
  def update_model(modeIndexes: List[int]):
186
  modes = [Mode(i+1) for i in modeIndexes]
@@ -230,6 +230,6 @@ if __name__ == "__main__":
230
  import uvicorn
231
 
232
  if no_auth:
233
- demo.launch(favicon_path="favicon.ico", server_name="localhost")
234
  else:
235
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
180
 
181
  with gr.Blocks(title="HASHIRU AI", css=css, fill_width=True, fill_height=True) as demo:
182
  model_manager = GeminiManager(
183
+ gemini_model="gemini-2.5-flash-preview-04-17", modes=[mode for mode in Mode])
184
 
185
  def update_model(modeIndexes: List[int]):
186
  modes = [Mode(i+1) for i in modeIndexes]
 
230
  import uvicorn
231
 
232
  if no_auth:
233
+ demo.launch(favicon_path="favicon.ico", server_name="localhost", share=True)
234
  else:
235
  uvicorn.run(app, host="0.0.0.0", port=7860)
src/manager/manager.py CHANGED
@@ -20,6 +20,7 @@ from google.genai.errors import APIError
20
  import backoff
21
  import mimetypes
22
  import json
 
23
 
24
  logger = logging.getLogger(__name__)
25
  handler = logging.StreamHandler(sys.stdout)
@@ -326,6 +327,7 @@ class GeminiManager:
326
  messages = messages + function_call_requests
327
  yield messages
328
  except Exception as e:
 
329
  print(messages)
330
  print(chat_history)
331
  messages.append({
 
20
  import backoff
21
  import mimetypes
22
  import json
23
+ import traceback
24
 
25
  logger = logging.getLogger(__name__)
26
  handler = logging.StreamHandler(sys.stdout)
 
327
  messages = messages + function_call_requests
328
  yield messages
329
  except Exception as e:
330
+ traceback.print_exc(file=sys.stdout)
331
  print(messages)
332
  print(chat_history)
333
  messages.append({
src/tools/default_tools/get_website_tool.py CHANGED
@@ -21,9 +21,9 @@ class GetWebsite():
21
  },
22
  "output_type": {
23
  "type": "string",
24
- "enum": ["summary", "full_text"],
25
- "description": "The type of output to return. 'summary' returns a summary of the text, 'full_text' returns the full text content.",
26
- "default": "full_text"
27
  },
28
  "css_selector": {
29
  "type": "string",
@@ -121,17 +121,26 @@ class GetWebsite():
121
  response = requests.get(url, headers=headers, timeout=10)
122
  response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
123
  response.encoding = response.apparent_encoding # Handle encoding
 
 
 
 
 
 
 
124
 
125
  # Parse the content using BeautifulSoup
126
- soup = BeautifulSoup(response.content, 'html.parser')
127
 
128
  if css_selector:
129
  # Extract text from the selected elements
130
  elements = soup.select(css_selector)
131
- text = ('\n'.join([element.get_text() for element in elements])).encode('utf-8', 'ignore').decode('utf-8')
 
132
  else:
133
  # Extract text from the parsed HTML
134
  text = soup.get_text()
 
135
 
136
  if output_type == "summary":
137
  # Summarize the text
 
21
  },
22
  "output_type": {
23
  "type": "string",
24
+ "enum": ["summary", "full_text", "html"],
25
+ "description": "The type of output to return. 'summary' returns a summary of the text, 'full_text' returns the full text content, and 'html' returns the raw HTML content.",
26
+ "default": "summary"
27
  },
28
  "css_selector": {
29
  "type": "string",
 
121
  response = requests.get(url, headers=headers, timeout=10)
122
  response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
123
  response.encoding = response.apparent_encoding # Handle encoding
124
+ if output_type == "html":
125
+ # Return the raw HTML content
126
+ return {
127
+ "status": "success",
128
+ "message": "Search completed successfully",
129
+ "output": response.text,
130
+ }
131
 
132
  # Parse the content using BeautifulSoup
133
+ soup = BeautifulSoup(response.text, 'html.parser')
134
 
135
  if css_selector:
136
  # Extract text from the selected elements
137
  elements = soup.select(css_selector)
138
+ text = ('\n'.join([element.get_text() for element in elements]))
139
+ text = text.encode('utf-8', 'ignore').decode('utf-8')
140
  else:
141
  # Extract text from the parsed HTML
142
  text = soup.get_text()
143
+ text = text.encode('utf-8', 'ignore').decode('utf-8')
144
 
145
  if output_type == "summary":
146
  # Summarize the text