Spaces:
Running
Running
Tecnhotron
commited on
Commit
·
e78c9e1
1
Parent(s):
a3d3eec
First
Browse files- __pycache__/computer_control_helper.cpython-313.pyc +0 -0
- __pycache__/streaming.cpython-313.pyc +0 -0
- __pycache__/test_api.cpython-313-pytest-8.3.5.pyc +0 -0
- api.py +109 -366
- computer_control_helper.py +0 -99
- downloaded_files/driver_fixing.lock +0 -0
- downloaded_files/pipfinding.lock +0 -0
- downloaded_files/pyautogui.lock +0 -0
- lmarena.log +790 -0
- requirements.txt +1 -0
- sdfhsdf.py +676 -0
- streaming.py +163 -170
- test_api.py +5 -40
__pycache__/computer_control_helper.cpython-313.pyc
CHANGED
Binary files a/__pycache__/computer_control_helper.cpython-313.pyc and b/__pycache__/computer_control_helper.cpython-313.pyc differ
|
|
__pycache__/streaming.cpython-313.pyc
CHANGED
Binary files a/__pycache__/streaming.cpython-313.pyc and b/__pycache__/streaming.cpython-313.pyc differ
|
|
__pycache__/test_api.cpython-313-pytest-8.3.5.pyc
ADDED
Binary file (9.06 kB). View file
|
|
api.py
CHANGED
@@ -21,8 +21,12 @@ from selenium.webdriver.support import expected_conditions as EC
|
|
21 |
from selenium.common.exceptions import TimeoutException, NoSuchElementException, StaleElementReferenceException
|
22 |
import random
|
23 |
import pyautogui
|
|
|
24 |
from streaming import StreamProcessor, create_streaming_response, StreamConfig, StreamingResponseGenerator
|
25 |
import base64
|
|
|
|
|
|
|
26 |
|
27 |
# Virtual display setup for Linux headless environments
|
28 |
import platform
|
@@ -36,6 +40,11 @@ if platform.system() == 'Linux':
|
|
36 |
logging.basicConfig(
|
37 |
level=logging.INFO,
|
38 |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
|
|
|
|
|
|
|
|
|
39 |
)
|
40 |
logger = logging.getLogger(__name__)
|
41 |
|
@@ -68,10 +77,14 @@ class DriverNotAvailableError(LmArenaError): pass
|
|
68 |
class Message(BaseModel):
|
69 |
role: str
|
70 |
content: Union[str, List[Dict[str, str]]]
|
|
|
71 |
class ChatCompletionRequest(BaseModel):
|
72 |
messages: List[Message]
|
73 |
model: str
|
74 |
stream: Optional[bool] = False
|
|
|
|
|
|
|
75 |
class Usage(BaseModel):
|
76 |
prompt_tokens: int; completion_tokens: int; total_tokens: int
|
77 |
class Choice(BaseModel):
|
@@ -94,7 +107,6 @@ class ModelListResponse(BaseModel):
|
|
94 |
object: str = "list"
|
95 |
data: List[ModelInfo]
|
96 |
|
97 |
-
|
98 |
class DriverManager:
|
99 |
def __init__(self):
|
100 |
logger.info("DriverManager instance created.")
|
@@ -107,7 +119,7 @@ class DriverManager:
|
|
107 |
logger.info("Gemini client initialized successfully.")
|
108 |
except Exception as e:
|
109 |
logger.error(f"Failed to initialize Gemini client: {e}", exc_info=True)
|
110 |
-
self._genai_client = None
|
111 |
else:
|
112 |
logger.info("GEMINI_API_KEY not set, Gemini client will not be used for captcha.")
|
113 |
|
@@ -116,10 +128,8 @@ class DriverManager:
|
|
116 |
if self._driver is not None:
|
117 |
logger.warning("Driver initialization called but driver already exists.")
|
118 |
return
|
119 |
-
|
120 |
loop = asyncio.get_event_loop()
|
121 |
logger.info("Initializing Selenium driver...")
|
122 |
-
|
123 |
def _sync_initialize_driver_logic():
|
124 |
logger.info("Executing synchronous driver initialization and enhanced readiness check.")
|
125 |
temp_driver = None
|
@@ -128,47 +138,33 @@ class DriverManager:
|
|
128 |
logger.info("Driver instantiated. Opening URL...")
|
129 |
temp_driver.open(config.lmarena_url)
|
130 |
logger.info(f"URL '{config.lmarena_url}' opened.")
|
131 |
-
|
132 |
-
# --- STAGE 1 CAPTCHA HANDLING (Cloudflare / Pre-site) ---
|
133 |
logger.info("Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...")
|
134 |
temp_driver.uc_gui_click_captcha()
|
135 |
logger.info("uc_gui_click_captcha() completed. Main site should be loading now.")
|
136 |
-
# --- END STAGE 1 ---
|
137 |
-
|
138 |
-
# ---- STAGE 2 CAPTCHA HANDLING (On-site "Verify Human" popup) ----
|
139 |
self._perform_sync_captcha_checks(temp_driver)
|
140 |
-
# ---- END STAGE 2 ----
|
141 |
return temp_driver
|
142 |
except Exception as e:
|
143 |
logger.error(f"Synchronous driver initialization failed: {e}", exc_info=True)
|
144 |
if temp_driver: temp_driver.quit()
|
145 |
raise LmArenaError(f"Failed to initialize driver: {e}") from e
|
146 |
-
|
147 |
try:
|
148 |
self._driver = await loop.run_in_executor(None, _sync_initialize_driver_logic)
|
149 |
logger.info("Selenium driver initialization process completed successfully.")
|
150 |
except Exception as e:
|
151 |
logger.error(f"Asynchronous driver initialization failed: {e}", exc_info=True)
|
152 |
-
if self._driver:
|
153 |
-
try:
|
154 |
-
# Ensure self._driver is used if it was assigned before error
|
155 |
driver_to_quit = self._driver
|
156 |
-
self._driver = None
|
157 |
await loop.run_in_executor(None, driver_to_quit.quit)
|
158 |
-
except Exception as quit_e:
|
159 |
logger.error(f"Failed to quit driver after initialization error: {quit_e}")
|
160 |
-
else: # If self._driver was never assigned (e.g. error in Driver() call itself)
|
161 |
-
logger.debug("No driver instance to quit after initialization error.")
|
162 |
-
|
163 |
if isinstance(e, LmArenaError):
|
164 |
raise
|
165 |
raise LmArenaError(f"Failed to initialize driver: {e}") from e
|
166 |
|
167 |
def _human_like_reload(self, driver: Driver):
|
168 |
-
"""Human-like page reload with F5/FN+F5 variation"""
|
169 |
logger.info("Performing human-like page reload")
|
170 |
-
|
171 |
-
# Randomly choose between F5 and FN+F5
|
172 |
if random.random() > 0.5:
|
173 |
logger.info("Using F5 key")
|
174 |
pyautogui.press('f5')
|
@@ -179,165 +175,69 @@ class DriverManager:
|
|
179 |
pyautogui.press('f5')
|
180 |
time.sleep(0.3)
|
181 |
pyautogui.keyUp('ctrl')
|
182 |
-
|
183 |
-
# Add random delay to simulate human behavior
|
184 |
sleep_time = random.uniform(0.5, 2.0)
|
185 |
time.sleep(sleep_time)
|
186 |
logger.info(f"Page reloaded after {sleep_time:.2f}s delay")
|
187 |
|
188 |
-
def _random_mouse_movement(self, driver: Driver):
|
189 |
-
logger.info("Performing natural random mouse movement with pyautogui")
|
190 |
-
try:
|
191 |
-
import pyautogui
|
192 |
-
import random
|
193 |
-
import math
|
194 |
-
import time
|
195 |
-
|
196 |
-
# Get screen dimensions
|
197 |
-
screen_width, screen_height = pyautogui.size()
|
198 |
-
center_x = screen_width // 2
|
199 |
-
center_y = screen_height // 2
|
200 |
-
|
201 |
-
# Generate random movement pattern (combination of arcs and lines)
|
202 |
-
patterns = [
|
203 |
-
# Small circles around center
|
204 |
-
lambda: [(int(center_x + 50 * math.cos(angle)), int(center_y + 50 * math.sin(angle))) for angle in [2 * math.pi * i / 8 for i in range(8)]],
|
205 |
-
|
206 |
-
# Diagonal sweeps
|
207 |
-
lambda: [(100, 100), (screen_width-100, screen_height-100), (100, screen_height-100), (screen_width-100, 100)],
|
208 |
-
|
209 |
-
# Random walk
|
210 |
-
lambda: [(random.randint(100, screen_width-100), random.randint(100, screen_height-100)) for _ in range(5)]
|
211 |
-
]
|
212 |
-
|
213 |
-
# Select and execute random pattern
|
214 |
-
pattern = random.choice(patterns)()
|
215 |
-
total_points = len(pattern)
|
216 |
-
duration = 2.0 / total_points # Total duration ~2 seconds
|
217 |
-
|
218 |
-
for i, (x, y) in enumerate(pattern):
|
219 |
-
# Add slight randomness to movement speed
|
220 |
-
point_duration = duration * random.uniform(0.8, 1.2)
|
221 |
-
pyautogui.moveTo(x, y, duration=point_duration)
|
222 |
-
|
223 |
-
# Random micro-pauses between movements
|
224 |
-
if i < total_points - 1:
|
225 |
-
time.sleep(random.uniform(0.05, 0.15))
|
226 |
-
|
227 |
-
logger.info("Natural mouse movement performed successfully")
|
228 |
-
except Exception as e:
|
229 |
-
logger.warning(f"Random mouse movement failed: {e}", exc_info=True)
|
230 |
-
|
231 |
def _perform_sync_captcha_checks(self, driver: Driver):
|
232 |
-
# This method is now exclusively for the on-site "Verify Human" popup.
|
233 |
logger.info("Checking for on-site ('Verify Human') captcha...")
|
234 |
-
|
235 |
-
# Add random mouse movements at start
|
236 |
-
self._random_mouse_movement(driver)
|
237 |
-
|
238 |
-
# First check if textarea is already interactable AND no captcha popup is visible
|
239 |
try:
|
240 |
textarea_locator = (By.TAG_NAME, "textarea")
|
241 |
-
textarea = WebDriverWait(driver, 5).until(
|
242 |
-
EC.element_to_be_clickable(textarea_locator)
|
243 |
-
)
|
244 |
-
|
245 |
-
# Detect captcha presence with multiple strategies
|
246 |
captcha_present = False
|
247 |
-
|
248 |
-
|
249 |
-
# 1. Check for common captcha iframes
|
250 |
-
try:
|
251 |
-
iframes = driver.find_elements(By.TAG_NAME, 'iframe')
|
252 |
-
for iframe in iframes:
|
253 |
-
src = iframe.get_attribute('src') or ''
|
254 |
-
if any(keyword in src for keyword in ['api2/anchor', 'api2/bframe', 'recaptcha', 'hcaptcha.com']):
|
255 |
-
if iframe.is_displayed():
|
256 |
-
captcha_present = True
|
257 |
-
logger.info(f"On-site captcha iframe detected with src containing keyword.")
|
258 |
-
break
|
259 |
-
except Exception as e:
|
260 |
-
logger.debug(f"Error scanning iframes for on-site captcha: {e}")
|
261 |
-
# 2. Check for captcha container divs
|
262 |
-
if not captcha_present:
|
263 |
try:
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
captcha_present = True
|
268 |
-
logger.info("On-site captcha container detected via CSS selector.")
|
269 |
-
break
|
270 |
-
except Exception as e:
|
271 |
-
logger.debug(f"Error scanning on-site captcha containers: {e}")
|
272 |
-
# 3. Text cues
|
273 |
if not captcha_present:
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
captcha_present = True
|
279 |
-
logger.info(f"On-site captcha text cue '{cue}' detected.")
|
280 |
-
break
|
281 |
-
except Exception:
|
282 |
-
pass
|
283 |
-
|
284 |
if textarea.is_enabled() and textarea.is_displayed() and not captcha_present:
|
285 |
logger.info("No on-site captcha detected. Main UI is ready.")
|
286 |
return
|
287 |
else:
|
288 |
logger.info("Textarea not ready or an on-site captcha indicator was found. Proceeding with AI solver.")
|
289 |
-
|
290 |
except (TimeoutException, NoSuchElementException):
|
291 |
logger.info("Chat input textarea not interactable. Proceeding with AI captcha solver.")
|
292 |
except Exception as e:
|
293 |
logger.warning(f"Unexpected error checking UI state for on-site captcha: {e}", exc_info=True)
|
294 |
-
|
295 |
if not self._genai_client:
|
296 |
logger.error("On-site captcha detected, but Gemini client not available. Cannot proceed.")
|
297 |
raise LmArenaError("AI Captcha solver is required but not configured.")
|
298 |
-
|
299 |
-
# --- AI-based solver for the on-site captcha ---
|
300 |
try:
|
301 |
logger.info("Starting visual AI check for on-site captcha.")
|
302 |
screenshot = computer_control_helper.capture_screen()
|
303 |
if not screenshot:
|
304 |
-
logger.error("Failed to capture screen for AI captcha check.")
|
305 |
-
return
|
306 |
-
|
307 |
img_byte_arr = io.BytesIO()
|
308 |
screenshot.save(img_byte_arr, format='PNG')
|
309 |
-
|
310 |
-
|
311 |
-
model_name = "gemini-2.0-flash"
|
312 |
-
logger.info(f"Using Gemini model: {model_name} for on-site captcha detection.")
|
313 |
-
|
314 |
contents = [
|
315 |
-
types.
|
316 |
-
|
317 |
-
parts=[
|
318 |
-
types.Part.from_bytes(mime_type="image/png", data=img_bytes),
|
319 |
-
types.Part.from_text(text="""find the text "Verify you are human". do not give me the coordinates of the text itself - give me the coordinates of the small box to the LEFT of the text. Example response:
|
320 |
``json
|
321 |
[
|
322 |
{"box_2d": [504, 151, 541, 170], "label": "box"}
|
323 |
]
|
324 |
``
|
325 |
If you cannot find the checkbox, respond with "No checkbox found".
|
326 |
-
"""
|
327 |
-
]
|
328 |
-
),
|
329 |
]
|
330 |
|
331 |
generate_content_config = types.GenerateContentConfig(response_mime_type="text/plain")
|
332 |
logger.info("Sending screenshot to Gemini API for analysis.")
|
333 |
response_stream = self._genai_client.models.generate_content_stream(
|
334 |
-
model=
|
335 |
contents=contents,
|
336 |
config=generate_content_config,
|
337 |
)
|
338 |
full_response_text = "".join(chunk.text for chunk in response_stream)
|
339 |
logger.info(f"Received Gemini response for on-site captcha check: {full_response_text}")
|
340 |
-
|
341 |
if "No checkbox found" in full_response_text:
|
342 |
logger.info("Gemini indicated no checkbox found for on-site captcha.")
|
343 |
else:
|
@@ -348,15 +248,13 @@ If you cannot find the checkbox, respond with "No checkbox found".
|
|
348 |
click_target = parsed_data[0]
|
349 |
elif isinstance(parsed_data, dict) and "box_2d" in parsed_data:
|
350 |
click_target = parsed_data
|
351 |
-
|
352 |
if click_target:
|
353 |
logger.info(f"On-site captcha checkbox found via Gemini. Clicking coordinates: {click_target}")
|
354 |
computer_control_helper.perform_click(click_target)
|
355 |
-
time.sleep(3)
|
356 |
logger.info("Click performed. Now reloading page as requested for post-AI solve.")
|
357 |
self._human_like_reload(driver)
|
358 |
time.sleep(config.page_load_wait_after_refresh_seconds)
|
359 |
-
logger.info("Page reloaded after AI captcha solve.")
|
360 |
else:
|
361 |
logger.info("No valid 'box_2d' data found in Gemini response. Reloading as fallback.")
|
362 |
self._human_like_reload(driver)
|
@@ -367,154 +265,89 @@ If you cannot find the checkbox, respond with "No checkbox found".
|
|
367 |
async with self._lock:
|
368 |
if self._driver:
|
369 |
logger.info("Cleaning up and quitting Selenium driver...")
|
370 |
-
loop = asyncio.get_event_loop()
|
371 |
-
driver_to_quit = self._driver
|
372 |
-
self._driver = None
|
373 |
try:
|
374 |
await loop.run_in_executor(None, driver_to_quit.quit)
|
375 |
logger.info("Driver quit successfully.")
|
376 |
except Exception as e:
|
377 |
logger.error(f"Error during driver cleanup: {e}", exc_info=True)
|
378 |
-
else:
|
379 |
-
logger.info("Cleanup called but no driver was active.")
|
380 |
|
381 |
def get_driver(self) -> Driver:
|
382 |
-
if self._driver is None:
|
383 |
-
logger.error("Attempted to get driver, but it is not available.")
|
384 |
-
raise DriverNotAvailableError("Driver not available")
|
385 |
-
logger.debug("Driver instance requested and provided.")
|
386 |
return self._driver
|
387 |
|
388 |
async def _select_model(self, model_id: str) -> None:
|
389 |
-
"""Select a model on the LmArena page"""
|
390 |
driver = self.get_driver()
|
391 |
logger.info(f"Selecting model: {model_id}")
|
392 |
-
|
393 |
def _sync_select_model_logic(drv: Driver, m_id: str):
|
394 |
-
logger.debug("Starting model selection logic")
|
395 |
try:
|
396 |
-
# Click model dropdown
|
397 |
-
logger.debug("Locating model dropdown")
|
398 |
dropdown_locator = (By.XPATH, "//button[@data-sentry-source-file='select-model.tsx' and @role='combobox']")
|
399 |
-
WebDriverWait(drv, config.driver_timeout).until(
|
400 |
-
EC.element_to_be_clickable(dropdown_locator)
|
401 |
-
).click()
|
402 |
-
logger.debug("Clicked model dropdown")
|
403 |
-
|
404 |
-
# Enter model name
|
405 |
-
logger.debug("Locating model search input")
|
406 |
search_locator = (By.XPATH, "//input[@placeholder='Search models' and @cmdk-input]")
|
407 |
-
search_element = WebDriverWait(drv, config.driver_timeout).until(
|
408 |
-
EC.visibility_of_element_located(search_locator)
|
409 |
-
)
|
410 |
-
logger.debug("Clearing search input")
|
411 |
search_element.clear()
|
412 |
-
logger.debug(f"Typing model name: {m_id}")
|
413 |
search_element.send_keys(m_id)
|
414 |
-
logger.debug("Sending ENTER key")
|
415 |
search_element.send_keys(Keys.ENTER)
|
416 |
logger.info(f"Selected model: {m_id}")
|
417 |
-
|
418 |
-
|
419 |
-
logger.warning(f"Model selection for '{m_id}' failed due to {type(e_se_to).__name__}.")
|
420 |
-
raise ModelSelectionError(f"Failed to select model {m_id}. Original error: {type(e_se_to).__name__}") from e_se_to
|
421 |
except Exception as e_sync:
|
422 |
-
logger.error(f"Model selection failed for {m_id} with an unexpected error: {e_sync}", exc_info=True)
|
423 |
raise ModelSelectionError(f"Failed to select model {m_id}") from e_sync
|
424 |
-
|
425 |
-
loop = asyncio.get_event_loop()
|
426 |
try:
|
427 |
-
await
|
428 |
-
except ModelSelectionError:
|
429 |
-
raise
|
430 |
except Exception as e_exec:
|
431 |
-
logger.error(f"Error executing _select_model in executor: {e_exec}", exc_info=True)
|
432 |
raise ModelSelectionError(f"Failed to select model {model_id} due to executor error: {e_exec}") from e_exec
|
433 |
|
434 |
async def _retry_with_reload(self, driver: Driver, model_id: str):
|
435 |
-
"""Handle ModelSelectionError by reloading and retrying once"""
|
436 |
try:
|
437 |
-
# Manual reload simulation
|
438 |
pyautogui.press('f5')
|
439 |
time.sleep(0.5)
|
440 |
-
|
441 |
-
|
442 |
-
time.sleep(0.3)
|
443 |
-
pyautogui.press('f5')
|
444 |
-
time.sleep(0.3)
|
445 |
-
pyautogui.keyUp('ctrl')
|
446 |
-
|
447 |
-
# Wait for page reload
|
448 |
-
WebDriverWait(driver, config.page_load_wait_after_refresh_seconds).until(
|
449 |
-
EC.presence_of_element_located((By.XPATH, "//input[@placeholder='Search models' and @cmdk-input]"))
|
450 |
-
)
|
451 |
-
|
452 |
-
# Retry model selection
|
453 |
await self._select_model(model_id)
|
454 |
-
|
455 |
except Exception as reload_err:
|
456 |
logger.error(f"Reload and retry failed: {reload_err}", exc_info=True)
|
457 |
-
|
458 |
-
|
459 |
-
|
|
|
460 |
logger.error(f"Screenshot base64 after failed reload: {b64}")
|
461 |
raise ModelSelectionError(f"Failed after reload attempt: {reload_err}") from reload_err
|
462 |
|
463 |
def generate_reload_button_location(self, driver: Driver) -> str:
|
464 |
-
# This function is not used by the refined captcha logic, but keeping it as it might be used elsewhere.
|
465 |
logger.info("Generating reload button location with Gemini")
|
466 |
try:
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
img_byte_arr = io.BytesIO()
|
474 |
-
screenshot.save(img_byte_arr, format='PNG')
|
475 |
-
img_bytes = img_byte_arr.getvalue()
|
476 |
-
|
477 |
-
model_name = "gemini-2.0-flash"
|
478 |
contents = [
|
479 |
-
types.
|
480 |
-
|
481 |
-
parts=[
|
482 |
-
types.Part.from_bytes(mime_type="image/png", data=img_bytes),
|
483 |
-
types.Part.from_text(text="""Find the reload button on the page. It might be labeled with words like "Reload", "Refresh", or have a circular arrow icon. Return the coordinates of the button in the following format:
|
484 |
``json
|
485 |
[
|
486 |
{"box_2d": [x1, y1, x2, y2], "label": "reload button"}
|
487 |
]
|
488 |
``
|
489 |
If you cannot find the reload button, respond with "No reload button found".
|
490 |
-
"""
|
491 |
-
]
|
492 |
-
)
|
493 |
]
|
494 |
-
|
495 |
generate_content_config = types.GenerateContentConfig(response_mime_type="text/plain")
|
496 |
-
response_stream = self._genai_client.models.generate_content_stream(
|
497 |
-
model=model_name,
|
498 |
-
contents=contents,
|
499 |
-
config=generate_content_config,
|
500 |
-
)
|
501 |
full_response_text = "".join(chunk.text for chunk in response_stream)
|
502 |
logger.info(f"Gemini response for reload button: {full_response_text}")
|
503 |
-
# If AI couldn't find the reload button, simulate manual F5 and FN+F5 keystrokes (without .refresh()), take screenshot and log base64
|
504 |
if "No reload button found" in full_response_text:
|
505 |
logger.info("AI did not find reload button, performing manual F5 and FN+F5 reloads.")
|
506 |
try:
|
507 |
-
pyautogui.press('f5')
|
508 |
-
time.sleep(0.5)
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
pyautogui.keyUp('ctrl')
|
514 |
-
time.sleep(0.5)
|
515 |
-
# Capture screenshot after manual reload
|
516 |
-
png2 = driver.get_screenshot_as_png()
|
517 |
-
b64_2 = base64.b64encode(png2).decode('utf-8')
|
518 |
logger.info(f"Screenshot base64 after manual reload: {b64_2}")
|
519 |
except Exception as manu_err:
|
520 |
logger.error(f"Manual reload simulation failed: {manu_err}", exc_info=True)
|
@@ -527,20 +360,19 @@ driver_manager = DriverManager()
|
|
527 |
|
528 |
class ChatHandler:
|
529 |
@staticmethod
|
530 |
-
async def send_message_and_stream_response(prompt: str, model_id: str):
|
531 |
driver = driver_manager.get_driver()
|
532 |
request_id = str(uuid.uuid4())
|
533 |
-
logger.info(f"[{request_id}] Starting chat
|
534 |
try:
|
535 |
if model_id:
|
536 |
-
logger.info(f"[{request_id}] Model specified, selecting '{model_id}'.")
|
537 |
await driver_manager._select_model(model_id)
|
538 |
sanitized_prompt = ChatHandler._sanitize_for_bmp(prompt)
|
539 |
logger.info(f"[{request_id}] Sending prompt (first 50 chars): '{sanitized_prompt[:50]}...'")
|
540 |
await ChatHandler._send_prompt(driver, sanitized_prompt)
|
541 |
await ChatHandler._handle_agreement_dialog(driver)
|
542 |
logger.info(f"[{request_id}] Prompt sent. Streaming response...")
|
543 |
-
async for chunk in ChatHandler._stream_response(driver,
|
544 |
yield chunk
|
545 |
logger.info(f"[{request_id}] Finished streaming response from browser.")
|
546 |
except Exception as e:
|
@@ -555,66 +387,57 @@ class ChatHandler:
|
|
555 |
|
556 |
@staticmethod
|
557 |
def _sanitize_for_bmp(text: str) -> str:
|
558 |
-
# This function is simple, so logging is omitted unless for debugging
|
559 |
return ''.join(c for c in text if ord(c) <= 0xFFFF)
|
560 |
|
561 |
@staticmethod
|
562 |
async def _send_prompt(driver: Driver, prompt: str):
|
563 |
logger.info("Typing prompt into textarea.")
|
564 |
-
|
565 |
-
await loop.run_in_executor(None, lambda: driver.type('textarea', prompt + "\n"))
|
566 |
logger.info("Prompt submitted.")
|
567 |
|
568 |
@staticmethod
|
569 |
async def _handle_agreement_dialog(driver: Driver):
|
570 |
logger.info("Checking for 'Agree' button in dialog.")
|
571 |
-
|
572 |
-
clicked = await loop.run_in_executor(None, lambda: driver.click_if_visible("//button[normalize-space()='Agree']"))
|
573 |
-
if clicked:
|
574 |
logger.info("'Agree' button found and clicked.")
|
575 |
else:
|
576 |
logger.info("'Agree' button not visible, skipping.")
|
577 |
|
578 |
@staticmethod
|
579 |
-
async def _stream_response(driver: Driver,
|
580 |
-
"""Stream response using stabilization-based approach with a corrected locator."""
|
581 |
try:
|
|
|
582 |
content_container_locator = (By.XPATH, "(//ol[contains(@class, 'flex-col-reverse')]/div[.//h2[starts-with(@id, 'radix-')]])[1]//div[contains(@class, 'grid') and contains(@class, 'pt-4')]")
|
583 |
-
|
584 |
-
WebDriverWait(driver, config.response_timeout).until(
|
585 |
-
EC.presence_of_element_located(content_container_locator)
|
586 |
-
)
|
587 |
-
logger.info("Assistant response container found. Starting to poll for text.")
|
588 |
|
589 |
-
|
|
|
|
|
590 |
poll_interval=config.poll_interval,
|
591 |
response_timeout=config.response_timeout,
|
592 |
stabilization_timeout=config.stabilization_timeout,
|
593 |
-
max_inactivity=config.max_inactivity
|
594 |
-
|
|
|
|
|
|
|
595 |
|
596 |
-
|
597 |
driver=driver,
|
598 |
element_locator=content_container_locator
|
599 |
)
|
600 |
|
601 |
-
|
602 |
-
text_stream
|
603 |
-
)
|
604 |
-
|
605 |
-
async for chunk in ChatHandler._sync_to_async(stabilized_stream):
|
606 |
yield chunk
|
607 |
|
608 |
except TimeoutException:
|
609 |
-
logger.error(
|
610 |
-
yield
|
611 |
except Exception as e:
|
612 |
logger.error(f"Streaming error: {e}", exc_info=True)
|
613 |
yield f"\n\nError: {str(e)}"
|
614 |
|
615 |
@staticmethod
|
616 |
async def _sync_to_async(sync_iter: Iterator[str]) -> AsyncGenerator[str, None]:
|
617 |
-
"""Convert synchronous iterator to async generator"""
|
618 |
for item in sync_iter:
|
619 |
yield item
|
620 |
await asyncio.sleep(0)
|
@@ -622,60 +445,32 @@ class ChatHandler:
|
|
622 |
@staticmethod
|
623 |
async def _click_new_chat(driver: Driver, request_id: str):
|
624 |
logger.info(f"[{request_id}] Attempting to click 'New Chat' button.")
|
625 |
-
|
626 |
-
await loop.run_in_executor(None, lambda: driver.click("//a[contains(@class, 'whitespace-nowrap') and .//h2[contains(text(), 'New Chat')]]"))
|
627 |
logger.info(f"[{request_id}] 'New Chat' button clicked successfully.")
|
628 |
|
629 |
async def get_available_models() -> List[str]:
|
630 |
-
"""Scrapes the list of available models from the UI."""
|
631 |
driver = driver_manager.get_driver()
|
632 |
-
loop = asyncio.get_event_loop()
|
633 |
-
|
634 |
def _sync_scrape_models(drv: Driver) -> List[str]:
|
635 |
logger.info("Scraping available models...")
|
636 |
dropdown_locator = (By.XPATH, "//button[@data-sentry-source-file='select-model.tsx' and @role='combobox']")
|
637 |
model_item_locator = (By.XPATH, "//div[@cmdk-item and @data-value]")
|
638 |
-
|
639 |
try:
|
640 |
-
|
641 |
-
dropdown_button
|
642 |
-
|
643 |
-
)
|
644 |
-
dropdown_button.click()
|
645 |
-
logger.info("Model dropdown clicked.")
|
646 |
-
|
647 |
-
# Wait for the list items to be present
|
648 |
-
WebDriverWait(drv, config.driver_timeout).until(
|
649 |
-
EC.presence_of_all_elements_located(model_item_locator)
|
650 |
-
)
|
651 |
-
logger.info("Model list is visible.")
|
652 |
-
time.sleep(0.5) # Brief pause for full render
|
653 |
-
|
654 |
-
# Scrape the model names from the 'data-value' attribute
|
655 |
-
model_elements = drv.find_elements(*model_item_locator)
|
656 |
-
model_ids = [elem.get_attribute('data-value') for elem in model_elements if elem.get_attribute('data-value')]
|
657 |
logger.info(f"Found {len(model_ids)} models.")
|
658 |
-
|
659 |
-
# Click the dropdown again to close it
|
660 |
-
dropdown_button.click()
|
661 |
-
logger.info("Closed model dropdown.")
|
662 |
-
|
663 |
return model_ids
|
664 |
except (TimeoutException, NoSuchElementException) as e:
|
665 |
logger.error(f"Failed to scrape models: {e}", exc_info=True)
|
666 |
-
|
667 |
-
|
668 |
-
drv.find_element(*dropdown_locator).click()
|
669 |
-
except Exception as close_e:
|
670 |
-
logger.warning(f"Could not close model dropdown after error: {close_e}")
|
671 |
raise LmArenaError(f"Could not find or interact with the model dropdown: {e}") from e
|
672 |
-
|
673 |
try:
|
674 |
-
|
675 |
-
return model_list
|
676 |
except Exception as e:
|
677 |
-
logger.error(f"Error executing model scraping in executor: {e}", exc_info=True)
|
678 |
-
raise
|
679 |
|
680 |
@asynccontextmanager
|
681 |
async def lifespan(app: FastAPI):
|
@@ -685,9 +480,7 @@ async def lifespan(app: FastAPI):
|
|
685 |
logger.info("Application startup sequence completed successfully.")
|
686 |
except Exception as e:
|
687 |
logger.critical(f"A critical error occurred during application startup: {e}", exc_info=True)
|
688 |
-
|
689 |
-
await driver_manager.cleanup()
|
690 |
-
raise
|
691 |
yield
|
692 |
logger.info("Application shutdown sequence initiated.")
|
693 |
await driver_manager.cleanup()
|
@@ -695,121 +488,71 @@ async def lifespan(app: FastAPI):
|
|
695 |
|
696 |
app = FastAPI(lifespan=lifespan)
|
697 |
|
698 |
-
@app.middleware("http")
|
699 |
-
async def error_screenshot_middleware(request: Request, call_next):
|
700 |
-
try:
|
701 |
-
return await call_next(request)
|
702 |
-
except Exception as exc:
|
703 |
-
logger.error(f"Unhandled exception: {exc}", exc_info=True)
|
704 |
-
try:
|
705 |
-
driver = driver_manager.get_driver()
|
706 |
-
png = driver.get_screenshot_as_png()
|
707 |
-
b64 = base64.b64encode(png).decode('utf-8')
|
708 |
-
logger.error(f"Screenshot base64: {b64}")
|
709 |
-
except Exception as capture_err:
|
710 |
-
logger.error(f"Failed to capture screenshot: {capture_err}", exc_info=True)
|
711 |
-
raise
|
712 |
-
|
713 |
@app.get("/health")
|
714 |
async def health_check():
|
715 |
-
logger.info("Health check endpoint called.")
|
716 |
try:
|
717 |
driver_manager.get_driver()
|
718 |
-
logger.info("Health check status: healthy, driver is available.")
|
719 |
return {"status": "healthy", "driver": "available"}
|
720 |
except DriverNotAvailableError:
|
721 |
-
logger.warning("Health check status: unhealthy, driver is unavailable.")
|
722 |
return {"status": "unhealthy", "driver": "unavailable"}
|
723 |
|
724 |
@app.get("/models", response_model=ModelListResponse)
|
725 |
async def list_models():
|
726 |
-
"""Returns a list of available models."""
|
727 |
logger.info("Received request for /models endpoint.")
|
728 |
try:
|
729 |
model_ids = await get_available_models()
|
730 |
-
|
731 |
-
return ModelListResponse(data=model_data)
|
732 |
except DriverNotAvailableError:
|
733 |
-
logger.error("Models endpoint called but driver is not available.")
|
734 |
raise HTTPException(status_code=503, detail="Service unavailable: The backend driver is not ready.")
|
735 |
except Exception as e:
|
736 |
logger.error(f"An unexpected error occurred while fetching models: {e}", exc_info=True)
|
737 |
-
|
738 |
-
driver = driver_manager.get_driver()
|
739 |
-
logger.info("Attempting captcha solve after models fetch error...")
|
740 |
-
driver.uc_gui_click_captcha()
|
741 |
-
driver_manager._perform_sync_captcha_checks(driver)
|
742 |
-
png = driver.get_screenshot_as_png()
|
743 |
-
b64 = base64.b64encode(png).decode('utf-8')
|
744 |
-
logger.error(f"Screenshot base64 after captcha solve: {b64}")
|
745 |
-
except Exception as captcha_err:
|
746 |
-
logger.error(f"Failed captcha solve/screenshot in models endpoint: {captcha_err}", exc_info=True)
|
747 |
-
raise HTTPException(status_code=500, detail="An unexpected error occurred while fetching models: {str(e)}")
|
748 |
|
749 |
@app.post("/chat/completions", response_model=ChatCompletionResponse)
|
750 |
async def chat_completions(request: ChatCompletionRequest):
|
751 |
-
completion_id = f"chatcmpl-{uuid.uuid4().hex}"
|
752 |
-
|
753 |
-
logger.info(f"[{completion_id}] Received chat completion request for model '{request.model}', stream={request.stream}.")
|
754 |
full_prompt = "\n".join([msg.content for msg in request.messages if isinstance(msg.content, str)])
|
755 |
try:
|
756 |
driver_manager.get_driver()
|
|
|
|
|
|
|
757 |
if request.stream:
|
758 |
-
logger.info(f"[{completion_id}] Handling as a streaming request.")
|
759 |
return StreamingResponse(
|
760 |
create_streaming_response(
|
761 |
-
completion_id,
|
762 |
-
|
763 |
-
request.model,
|
764 |
-
full_prompt,
|
765 |
-
ChatHandler.send_message_and_stream_response
|
766 |
-
),
|
767 |
-
media_type="text/event-stream"
|
768 |
)
|
769 |
else:
|
770 |
-
logger.info(f"[{completion_id}] Handling as a non-streaming request.")
|
771 |
return await _create_non_streaming_response(
|
772 |
-
completion_id, created_timestamp, request.model, full_prompt
|
773 |
)
|
774 |
except DriverNotAvailableError as e:
|
775 |
-
logger.error(f"[{completion_id}] Service unavailable: The backend driver is not ready. Error: {e}", exc_info=True)
|
776 |
raise HTTPException(status_code=503, detail="Service unavailable: The backend driver is not ready.")
|
777 |
except APIError as e:
|
778 |
-
logger.error(f"[{completion_id}] API Error occurred: {e.message} (Status: {e.status_code})", exc_info=True)
|
779 |
raise HTTPException(status_code=e.status_code, detail=e.message)
|
780 |
except Exception as e:
|
781 |
-
logger.error(f"[{completion_id}] An unexpected processing error occurred: {e}", exc_info=True)
|
782 |
raise HTTPException(status_code=500, detail=f"An unexpected processing error occurred: {str(e)}")
|
783 |
|
784 |
-
async def _create_non_streaming_response(completion_id: str, created: int, model: str, prompt: str) -> ChatCompletionResponse:
|
785 |
-
logger.info(f"[{completion_id}] Creating non-streaming response.")
|
786 |
try:
|
787 |
-
content_parts = [chunk async for chunk in ChatHandler.send_message_and_stream_response(prompt, model)]
|
788 |
final_content = "".join(content_parts)
|
789 |
-
logger.info(f"[{completion_id}] Non-streaming response generated successfully. Content length: {len(final_content)}.")
|
790 |
return ChatCompletionResponse(
|
791 |
-
id=completion_id,
|
792 |
-
object="chat.completion",
|
793 |
-
created=created,
|
794 |
-
model=model,
|
795 |
choices=[Choice(index=0, message={"role": "assistant", "content": final_content}, finish_reason="stop")],
|
796 |
usage=Usage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
|
797 |
)
|
798 |
-
except APIError:
|
799 |
-
logger.error(f"[{completion_id}] APIError during non-streaming response creation.", exc_info=True)
|
800 |
-
raise
|
801 |
except Exception as e:
|
802 |
logger.error(f"[{completion_id}] Exception during non-streaming response creation: {e}", exc_info=True)
|
803 |
raise HTTPException(status_code=500, detail="Error processing non-streaming request.")
|
804 |
|
805 |
if __name__ == "__main__":
|
806 |
import uvicorn
|
807 |
-
logger.info("Starting application...")
|
808 |
if not os.getenv("GEMINI_API_KEY"):
|
809 |
logger.error("FATAL: GEMINI_API_KEY environment variable not set. Captcha solving will be disabled.")
|
810 |
-
print("ERROR: GEMINI_API_KEY environment variable not set.")
|
811 |
else:
|
812 |
logger.info("GEMINI_API_KEY is set.")
|
813 |
-
|
814 |
logger.info("Starting Uvicorn server on 0.0.0.0:8000.")
|
815 |
uvicorn.run(app, host="0.0.0.0", port=8000)
|
|
|
21 |
from selenium.common.exceptions import TimeoutException, NoSuchElementException, StaleElementReferenceException
|
22 |
import random
|
23 |
import pyautogui
|
24 |
+
# Import the fully updated streaming module
|
25 |
from streaming import StreamProcessor, create_streaming_response, StreamConfig, StreamingResponseGenerator
|
26 |
import base64
|
27 |
+
import mss
|
28 |
+
import mss.tools
|
29 |
+
from PIL import Image
|
30 |
|
31 |
# Virtual display setup for Linux headless environments
|
32 |
import platform
|
|
|
40 |
logging.basicConfig(
|
41 |
level=logging.INFO,
|
42 |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
43 |
+
handlers=[
|
44 |
+
logging.StreamHandler(),
|
45 |
+
logging.FileHandler("lmarena.log")
|
46 |
+
],
|
47 |
+
force=True
|
48 |
)
|
49 |
logger = logging.getLogger(__name__)
|
50 |
|
|
|
77 |
class Message(BaseModel):
|
78 |
role: str
|
79 |
content: Union[str, List[Dict[str, str]]]
|
80 |
+
|
81 |
class ChatCompletionRequest(BaseModel):
|
82 |
messages: List[Message]
|
83 |
model: str
|
84 |
stream: Optional[bool] = False
|
85 |
+
stream_raw_html: Optional[bool] = False
|
86 |
+
convert_html_to_markdown: Optional[bool] = True
|
87 |
+
|
88 |
class Usage(BaseModel):
|
89 |
prompt_tokens: int; completion_tokens: int; total_tokens: int
|
90 |
class Choice(BaseModel):
|
|
|
107 |
object: str = "list"
|
108 |
data: List[ModelInfo]
|
109 |
|
|
|
110 |
class DriverManager:
|
111 |
def __init__(self):
|
112 |
logger.info("DriverManager instance created.")
|
|
|
119 |
logger.info("Gemini client initialized successfully.")
|
120 |
except Exception as e:
|
121 |
logger.error(f"Failed to initialize Gemini client: {e}", exc_info=True)
|
122 |
+
self._genai_client = None
|
123 |
else:
|
124 |
logger.info("GEMINI_API_KEY not set, Gemini client will not be used for captcha.")
|
125 |
|
|
|
128 |
if self._driver is not None:
|
129 |
logger.warning("Driver initialization called but driver already exists.")
|
130 |
return
|
|
|
131 |
loop = asyncio.get_event_loop()
|
132 |
logger.info("Initializing Selenium driver...")
|
|
|
133 |
def _sync_initialize_driver_logic():
|
134 |
logger.info("Executing synchronous driver initialization and enhanced readiness check.")
|
135 |
temp_driver = None
|
|
|
138 |
logger.info("Driver instantiated. Opening URL...")
|
139 |
temp_driver.open(config.lmarena_url)
|
140 |
logger.info(f"URL '{config.lmarena_url}' opened.")
|
|
|
|
|
141 |
logger.info("Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...")
|
142 |
temp_driver.uc_gui_click_captcha()
|
143 |
logger.info("uc_gui_click_captcha() completed. Main site should be loading now.")
|
|
|
|
|
|
|
144 |
self._perform_sync_captcha_checks(temp_driver)
|
|
|
145 |
return temp_driver
|
146 |
except Exception as e:
|
147 |
logger.error(f"Synchronous driver initialization failed: {e}", exc_info=True)
|
148 |
if temp_driver: temp_driver.quit()
|
149 |
raise LmArenaError(f"Failed to initialize driver: {e}") from e
|
|
|
150 |
try:
|
151 |
self._driver = await loop.run_in_executor(None, _sync_initialize_driver_logic)
|
152 |
logger.info("Selenium driver initialization process completed successfully.")
|
153 |
except Exception as e:
|
154 |
logger.error(f"Asynchronous driver initialization failed: {e}", exc_info=True)
|
155 |
+
if self._driver:
|
156 |
+
try:
|
|
|
157 |
driver_to_quit = self._driver
|
158 |
+
self._driver = None
|
159 |
await loop.run_in_executor(None, driver_to_quit.quit)
|
160 |
+
except Exception as quit_e:
|
161 |
logger.error(f"Failed to quit driver after initialization error: {quit_e}")
|
|
|
|
|
|
|
162 |
if isinstance(e, LmArenaError):
|
163 |
raise
|
164 |
raise LmArenaError(f"Failed to initialize driver: {e}") from e
|
165 |
|
166 |
def _human_like_reload(self, driver: Driver):
|
|
|
167 |
logger.info("Performing human-like page reload")
|
|
|
|
|
168 |
if random.random() > 0.5:
|
169 |
logger.info("Using F5 key")
|
170 |
pyautogui.press('f5')
|
|
|
175 |
pyautogui.press('f5')
|
176 |
time.sleep(0.3)
|
177 |
pyautogui.keyUp('ctrl')
|
|
|
|
|
178 |
sleep_time = random.uniform(0.5, 2.0)
|
179 |
time.sleep(sleep_time)
|
180 |
logger.info(f"Page reloaded after {sleep_time:.2f}s delay")
|
181 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
182 |
def _perform_sync_captcha_checks(self, driver: Driver):
|
|
|
183 |
logger.info("Checking for on-site ('Verify Human') captcha...")
|
|
|
|
|
|
|
|
|
|
|
184 |
try:
|
185 |
textarea_locator = (By.TAG_NAME, "textarea")
|
186 |
+
textarea = WebDriverWait(driver, 5).until(EC.element_to_be_clickable(textarea_locator))
|
|
|
|
|
|
|
|
|
187 |
captcha_present = False
|
188 |
+
time.sleep(10)
|
189 |
+
for selector in ['iframe[src*="api2/anchor"]', 'iframe[src*="recaptcha"]', '.g-recaptcha', '.h-captcha']:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
190 |
try:
|
191 |
+
if driver.find_element(By.CSS_SELECTOR, selector).is_displayed():
|
192 |
+
captcha_present = True; break
|
193 |
+
except NoSuchElementException: continue
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
if not captcha_present:
|
195 |
+
try:
|
196 |
+
if driver.find_element(By.XPATH, "//*[contains(text(), 'Verify you are human')]").is_displayed():
|
197 |
+
captcha_present = True
|
198 |
+
except NoSuchElementException: pass
|
|
|
|
|
|
|
|
|
|
|
|
|
199 |
if textarea.is_enabled() and textarea.is_displayed() and not captcha_present:
|
200 |
logger.info("No on-site captcha detected. Main UI is ready.")
|
201 |
return
|
202 |
else:
|
203 |
logger.info("Textarea not ready or an on-site captcha indicator was found. Proceeding with AI solver.")
|
|
|
204 |
except (TimeoutException, NoSuchElementException):
|
205 |
logger.info("Chat input textarea not interactable. Proceeding with AI captcha solver.")
|
206 |
except Exception as e:
|
207 |
logger.warning(f"Unexpected error checking UI state for on-site captcha: {e}", exc_info=True)
|
|
|
208 |
if not self._genai_client:
|
209 |
logger.error("On-site captcha detected, but Gemini client not available. Cannot proceed.")
|
210 |
raise LmArenaError("AI Captcha solver is required but not configured.")
|
|
|
|
|
211 |
try:
|
212 |
logger.info("Starting visual AI check for on-site captcha.")
|
213 |
screenshot = computer_control_helper.capture_screen()
|
214 |
if not screenshot:
|
215 |
+
logger.error("Failed to capture screen for AI captcha check."); return
|
|
|
|
|
216 |
img_byte_arr = io.BytesIO()
|
217 |
screenshot.save(img_byte_arr, format='PNG')
|
218 |
+
|
219 |
+
# *** CORRECTED GEMINI API CALL ***
|
|
|
|
|
|
|
220 |
contents = [
|
221 |
+
types.Part.from_bytes(mime_type="image/png", data=img_byte_arr.getvalue()),
|
222 |
+
"""find the text "Verify you are human". do not give me the coordinates of the text itself - give me the coordinates of the small box to the LEFT of the text. Example response:
|
|
|
|
|
|
|
223 |
``json
|
224 |
[
|
225 |
{"box_2d": [504, 151, 541, 170], "label": "box"}
|
226 |
]
|
227 |
``
|
228 |
If you cannot find the checkbox, respond with "No checkbox found".
|
229 |
+
"""
|
|
|
|
|
230 |
]
|
231 |
|
232 |
generate_content_config = types.GenerateContentConfig(response_mime_type="text/plain")
|
233 |
logger.info("Sending screenshot to Gemini API for analysis.")
|
234 |
response_stream = self._genai_client.models.generate_content_stream(
|
235 |
+
model="gemini-2.0-flash",
|
236 |
contents=contents,
|
237 |
config=generate_content_config,
|
238 |
)
|
239 |
full_response_text = "".join(chunk.text for chunk in response_stream)
|
240 |
logger.info(f"Received Gemini response for on-site captcha check: {full_response_text}")
|
|
|
241 |
if "No checkbox found" in full_response_text:
|
242 |
logger.info("Gemini indicated no checkbox found for on-site captcha.")
|
243 |
else:
|
|
|
248 |
click_target = parsed_data[0]
|
249 |
elif isinstance(parsed_data, dict) and "box_2d" in parsed_data:
|
250 |
click_target = parsed_data
|
|
|
251 |
if click_target:
|
252 |
logger.info(f"On-site captcha checkbox found via Gemini. Clicking coordinates: {click_target}")
|
253 |
computer_control_helper.perform_click(click_target)
|
254 |
+
time.sleep(3)
|
255 |
logger.info("Click performed. Now reloading page as requested for post-AI solve.")
|
256 |
self._human_like_reload(driver)
|
257 |
time.sleep(config.page_load_wait_after_refresh_seconds)
|
|
|
258 |
else:
|
259 |
logger.info("No valid 'box_2d' data found in Gemini response. Reloading as fallback.")
|
260 |
self._human_like_reload(driver)
|
|
|
265 |
async with self._lock:
|
266 |
if self._driver:
|
267 |
logger.info("Cleaning up and quitting Selenium driver...")
|
268 |
+
loop, driver_to_quit, self._driver = asyncio.get_event_loop(), self._driver, None
|
|
|
|
|
269 |
try:
|
270 |
await loop.run_in_executor(None, driver_to_quit.quit)
|
271 |
logger.info("Driver quit successfully.")
|
272 |
except Exception as e:
|
273 |
logger.error(f"Error during driver cleanup: {e}", exc_info=True)
|
|
|
|
|
274 |
|
275 |
def get_driver(self) -> Driver:
|
276 |
+
if self._driver is None: raise DriverNotAvailableError("Driver not available")
|
|
|
|
|
|
|
277 |
return self._driver
|
278 |
|
279 |
async def _select_model(self, model_id: str) -> None:
|
|
|
280 |
driver = self.get_driver()
|
281 |
logger.info(f"Selecting model: {model_id}")
|
|
|
282 |
def _sync_select_model_logic(drv: Driver, m_id: str):
|
|
|
283 |
try:
|
|
|
|
|
284 |
dropdown_locator = (By.XPATH, "//button[@data-sentry-source-file='select-model.tsx' and @role='combobox']")
|
285 |
+
WebDriverWait(drv, config.driver_timeout).until(EC.element_to_be_clickable(dropdown_locator)).click()
|
|
|
|
|
|
|
|
|
|
|
|
|
286 |
search_locator = (By.XPATH, "//input[@placeholder='Search models' and @cmdk-input]")
|
287 |
+
search_element = WebDriverWait(drv, config.driver_timeout).until(EC.visibility_of_element_located(search_locator))
|
|
|
|
|
|
|
288 |
search_element.clear()
|
|
|
289 |
search_element.send_keys(m_id)
|
|
|
290 |
search_element.send_keys(Keys.ENTER)
|
291 |
logger.info(f"Selected model: {m_id}")
|
292 |
+
except (NoSuchElementException, TimeoutException) as e:
|
293 |
+
raise ModelSelectionError(f"Failed to select model {m_id}. Original error: {type(e).__name__}") from e
|
|
|
|
|
294 |
except Exception as e_sync:
|
|
|
295 |
raise ModelSelectionError(f"Failed to select model {m_id}") from e_sync
|
|
|
|
|
296 |
try:
|
297 |
+
await asyncio.get_event_loop().run_in_executor(None, _sync_select_model_logic, driver, model_id)
|
298 |
+
except ModelSelectionError: raise
|
|
|
299 |
except Exception as e_exec:
|
|
|
300 |
raise ModelSelectionError(f"Failed to select model {model_id} due to executor error: {e_exec}") from e_exec
|
301 |
|
302 |
async def _retry_with_reload(self, driver: Driver, model_id: str):
|
|
|
303 |
try:
|
|
|
304 |
pyautogui.press('f5')
|
305 |
time.sleep(0.5)
|
306 |
+
pyautogui.keyDown('ctrl'); time.sleep(0.3); pyautogui.press('f5'); time.sleep(0.3); pyautogui.keyUp('ctrl')
|
307 |
+
WebDriverWait(driver, config.page_load_wait_after_refresh_seconds).until(EC.presence_of_element_located((By.XPATH, "//input[@placeholder='Search models' and @cmdk-input]")))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
308 |
await self._select_model(model_id)
|
|
|
309 |
except Exception as reload_err:
|
310 |
logger.error(f"Reload and retry failed: {reload_err}", exc_info=True)
|
311 |
+
with mss.mss() as sct:
|
312 |
+
img = Image.frombytes("RGB", sct.grab(sct.monitors[1]).size, sct.grab(sct.monitors[1]).bgra, "raw", "BGRX")
|
313 |
+
img_bytes = io.BytesIO(); img.save(img_bytes, format="PNG")
|
314 |
+
b64 = base64.b64encode(img_bytes.getvalue()).decode('utf-8')
|
315 |
logger.error(f"Screenshot base64 after failed reload: {b64}")
|
316 |
raise ModelSelectionError(f"Failed after reload attempt: {reload_err}") from reload_err
|
317 |
|
318 |
def generate_reload_button_location(self, driver: Driver) -> str:
|
|
|
319 |
logger.info("Generating reload button location with Gemini")
|
320 |
try:
|
321 |
+
with mss.mss() as sct:
|
322 |
+
img = Image.frombytes("RGB", sct.grab(sct.monitors[1]).size, sct.grab(sct.monitors[1]).bgra, "raw", "BGRX")
|
323 |
+
img_bytes = io.BytesIO(); img.save(img_bytes, format="PNG"); img_bytes = img_bytes.getvalue()
|
324 |
+
|
325 |
+
# *** CORRECTED GEMINI API CALL ***
|
|
|
|
|
|
|
|
|
|
|
|
|
326 |
contents = [
|
327 |
+
types.Part.from_bytes(mime_type="image/png", data=img_bytes),
|
328 |
+
"""Find the reload button on the page. It might be labeled with words like "Reload", "Refresh", or have a circular arrow icon. Return the coordinates of the button in the following format:
|
|
|
|
|
|
|
329 |
``json
|
330 |
[
|
331 |
{"box_2d": [x1, y1, x2, y2], "label": "reload button"}
|
332 |
]
|
333 |
``
|
334 |
If you cannot find the reload button, respond with "No reload button found".
|
335 |
+
"""
|
|
|
|
|
336 |
]
|
337 |
+
|
338 |
generate_content_config = types.GenerateContentConfig(response_mime_type="text/plain")
|
339 |
+
response_stream = self._genai_client.models.generate_content_stream(model="gemini-2.0-flash", contents=contents, config=generate_content_config)
|
|
|
|
|
|
|
|
|
340 |
full_response_text = "".join(chunk.text for chunk in response_stream)
|
341 |
logger.info(f"Gemini response for reload button: {full_response_text}")
|
|
|
342 |
if "No reload button found" in full_response_text:
|
343 |
logger.info("AI did not find reload button, performing manual F5 and FN+F5 reloads.")
|
344 |
try:
|
345 |
+
pyautogui.press('f5'); time.sleep(0.5)
|
346 |
+
pyautogui.keyDown('ctrl'); time.sleep(0.3); pyautogui.press('f5'); time.sleep(0.3); pyautogui.keyUp('ctrl'); time.sleep(0.5)
|
347 |
+
with mss.mss() as sct:
|
348 |
+
img = Image.frombytes("RGB", sct.grab(sct.monitors[1]).size, sct.grab(sct.monitors[1]).bgra, "raw", "BGRX")
|
349 |
+
img_bytes = io.BytesIO(); img.save(img_bytes, format="PNG")
|
350 |
+
b64_2 = base64.b64encode(img_bytes.getvalue()).decode('utf-8')
|
|
|
|
|
|
|
|
|
|
|
351 |
logger.info(f"Screenshot base64 after manual reload: {b64_2}")
|
352 |
except Exception as manu_err:
|
353 |
logger.error(f"Manual reload simulation failed: {manu_err}", exc_info=True)
|
|
|
360 |
|
361 |
class ChatHandler:
|
362 |
@staticmethod
|
363 |
+
async def send_message_and_stream_response(prompt: str, model_id: str, stream_raw_html: bool = False, convert_html_to_markdown: bool = True):
|
364 |
driver = driver_manager.get_driver()
|
365 |
request_id = str(uuid.uuid4())
|
366 |
+
logger.info(f"[{request_id}] Starting chat. Model: '{model_id}', RawHTML: {stream_raw_html}, MarkdownMode: {convert_html_to_markdown}.")
|
367 |
try:
|
368 |
if model_id:
|
|
|
369 |
await driver_manager._select_model(model_id)
|
370 |
sanitized_prompt = ChatHandler._sanitize_for_bmp(prompt)
|
371 |
logger.info(f"[{request_id}] Sending prompt (first 50 chars): '{sanitized_prompt[:50]}...'")
|
372 |
await ChatHandler._send_prompt(driver, sanitized_prompt)
|
373 |
await ChatHandler._handle_agreement_dialog(driver)
|
374 |
logger.info(f"[{request_id}] Prompt sent. Streaming response...")
|
375 |
+
async for chunk in ChatHandler._stream_response(driver, stream_raw_html, convert_html_to_markdown):
|
376 |
yield chunk
|
377 |
logger.info(f"[{request_id}] Finished streaming response from browser.")
|
378 |
except Exception as e:
|
|
|
387 |
|
388 |
@staticmethod
|
389 |
def _sanitize_for_bmp(text: str) -> str:
|
|
|
390 |
return ''.join(c for c in text if ord(c) <= 0xFFFF)
|
391 |
|
392 |
@staticmethod
|
393 |
async def _send_prompt(driver: Driver, prompt: str):
|
394 |
logger.info("Typing prompt into textarea.")
|
395 |
+
await asyncio.get_event_loop().run_in_executor(None, lambda: driver.type('textarea', prompt + "\n"))
|
|
|
396 |
logger.info("Prompt submitted.")
|
397 |
|
398 |
@staticmethod
|
399 |
async def _handle_agreement_dialog(driver: Driver):
|
400 |
logger.info("Checking for 'Agree' button in dialog.")
|
401 |
+
if await asyncio.get_event_loop().run_in_executor(None, lambda: driver.click_if_visible("//button[normalize-space()='Agree']", timeout=1)):
|
|
|
|
|
402 |
logger.info("'Agree' button found and clicked.")
|
403 |
else:
|
404 |
logger.info("'Agree' button not visible, skipping.")
|
405 |
|
406 |
@staticmethod
|
407 |
+
async def _stream_response(driver: Driver, stream_raw_html: bool, convert_html_to_markdown: bool) -> AsyncGenerator[str, None]:
|
|
|
408 |
try:
|
409 |
+
# *** CORRECTED XPATH ***: Restored your original, more robust XPath selector.
|
410 |
content_container_locator = (By.XPATH, "(//ol[contains(@class, 'flex-col-reverse')]/div[.//h2[starts-with(@id, 'radix-')]])[1]//div[contains(@class, 'grid') and contains(@class, 'pt-4')]")
|
|
|
|
|
|
|
|
|
|
|
411 |
|
412 |
+
WebDriverWait(driver, config.response_timeout).until(EC.presence_of_element_located(content_container_locator))
|
413 |
+
|
414 |
+
stream_config = StreamConfig(
|
415 |
poll_interval=config.poll_interval,
|
416 |
response_timeout=config.response_timeout,
|
417 |
stabilization_timeout=config.stabilization_timeout,
|
418 |
+
max_inactivity=config.max_inactivity,
|
419 |
+
stream_raw_html=stream_raw_html,
|
420 |
+
convert_html_to_markdown=convert_html_to_markdown
|
421 |
+
)
|
422 |
+
stream_processor = StreamProcessor(config=stream_config)
|
423 |
|
424 |
+
processed_stream_iterator = stream_processor.get_processed_text_stream(
|
425 |
driver=driver,
|
426 |
element_locator=content_container_locator
|
427 |
)
|
428 |
|
429 |
+
async for chunk in ChatHandler._sync_to_async(processed_stream_iterator):
|
|
|
|
|
|
|
|
|
430 |
yield chunk
|
431 |
|
432 |
except TimeoutException:
|
433 |
+
logger.error("Streaming error: Timed out waiting for response container to appear.", exc_info=True)
|
434 |
+
yield "\n\nError: Timed out waiting for response from the page."
|
435 |
except Exception as e:
|
436 |
logger.error(f"Streaming error: {e}", exc_info=True)
|
437 |
yield f"\n\nError: {str(e)}"
|
438 |
|
439 |
@staticmethod
|
440 |
async def _sync_to_async(sync_iter: Iterator[str]) -> AsyncGenerator[str, None]:
|
|
|
441 |
for item in sync_iter:
|
442 |
yield item
|
443 |
await asyncio.sleep(0)
|
|
|
445 |
@staticmethod
|
446 |
async def _click_new_chat(driver: Driver, request_id: str):
|
447 |
logger.info(f"[{request_id}] Attempting to click 'New Chat' button.")
|
448 |
+
await asyncio.get_event_loop().run_in_executor(None, lambda: driver.click("//a[contains(@class, 'whitespace-nowrap') and .//h2[contains(text(), 'New Chat')]]"))
|
|
|
449 |
logger.info(f"[{request_id}] 'New Chat' button clicked successfully.")
|
450 |
|
451 |
async def get_available_models() -> List[str]:
|
|
|
452 |
driver = driver_manager.get_driver()
|
|
|
|
|
453 |
def _sync_scrape_models(drv: Driver) -> List[str]:
|
454 |
logger.info("Scraping available models...")
|
455 |
dropdown_locator = (By.XPATH, "//button[@data-sentry-source-file='select-model.tsx' and @role='combobox']")
|
456 |
model_item_locator = (By.XPATH, "//div[@cmdk-item and @data-value]")
|
|
|
457 |
try:
|
458 |
+
dropdown_button = WebDriverWait(drv, config.driver_timeout).until(EC.element_to_be_clickable(dropdown_locator))
|
459 |
+
dropdown_button.click(); logger.info("Model dropdown clicked.")
|
460 |
+
WebDriverWait(drv, config.driver_timeout).until(EC.presence_of_all_elements_located(model_item_locator)); time.sleep(0.5)
|
461 |
+
model_ids = [elem.get_attribute('data-value') for elem in drv.find_elements(*model_item_locator) if elem.get_attribute('data-value')]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
462 |
logger.info(f"Found {len(model_ids)} models.")
|
463 |
+
dropdown_button.click(); logger.info("Closed model dropdown.")
|
|
|
|
|
|
|
|
|
464 |
return model_ids
|
465 |
except (TimeoutException, NoSuchElementException) as e:
|
466 |
logger.error(f"Failed to scrape models: {e}", exc_info=True)
|
467 |
+
try: drv.find_element(*dropdown_locator).click()
|
468 |
+
except Exception as close_e: logger.warning(f"Could not close model dropdown after error: {close_e}")
|
|
|
|
|
|
|
469 |
raise LmArenaError(f"Could not find or interact with the model dropdown: {e}") from e
|
|
|
470 |
try:
|
471 |
+
return await asyncio.get_event_loop().run_in_executor(None, _sync_scrape_models, driver)
|
|
|
472 |
except Exception as e:
|
473 |
+
logger.error(f"Error executing model scraping in executor: {e}", exc_info=True); raise
|
|
|
474 |
|
475 |
@asynccontextmanager
|
476 |
async def lifespan(app: FastAPI):
|
|
|
480 |
logger.info("Application startup sequence completed successfully.")
|
481 |
except Exception as e:
|
482 |
logger.critical(f"A critical error occurred during application startup: {e}", exc_info=True)
|
483 |
+
await driver_manager.cleanup(); raise
|
|
|
|
|
484 |
yield
|
485 |
logger.info("Application shutdown sequence initiated.")
|
486 |
await driver_manager.cleanup()
|
|
|
488 |
|
489 |
app = FastAPI(lifespan=lifespan)
|
490 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
491 |
@app.get("/health")
|
492 |
async def health_check():
|
|
|
493 |
try:
|
494 |
driver_manager.get_driver()
|
|
|
495 |
return {"status": "healthy", "driver": "available"}
|
496 |
except DriverNotAvailableError:
|
|
|
497 |
return {"status": "unhealthy", "driver": "unavailable"}
|
498 |
|
499 |
@app.get("/models", response_model=ModelListResponse)
|
500 |
async def list_models():
|
|
|
501 |
logger.info("Received request for /models endpoint.")
|
502 |
try:
|
503 |
model_ids = await get_available_models()
|
504 |
+
return ModelListResponse(data=[ModelInfo(id=model_id) for model_id in model_ids])
|
|
|
505 |
except DriverNotAvailableError:
|
|
|
506 |
raise HTTPException(status_code=503, detail="Service unavailable: The backend driver is not ready.")
|
507 |
except Exception as e:
|
508 |
logger.error(f"An unexpected error occurred while fetching models: {e}", exc_info=True)
|
509 |
+
raise HTTPException(status_code=500, detail=f"An unexpected error occurred while fetching models: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
510 |
|
511 |
@app.post("/chat/completions", response_model=ChatCompletionResponse)
|
512 |
async def chat_completions(request: ChatCompletionRequest):
|
513 |
+
completion_id, created_timestamp = f"chatcmpl-{uuid.uuid4().hex}", int(time.time())
|
514 |
+
logger.info(f"[{completion_id}] Received chat completion request: model='{request.model}', stream={request.stream}, md_convert={request.convert_html_to_markdown}")
|
|
|
515 |
full_prompt = "\n".join([msg.content for msg in request.messages if isinstance(msg.content, str)])
|
516 |
try:
|
517 |
driver_manager.get_driver()
|
518 |
+
send_message_func = lambda p, m: ChatHandler.send_message_and_stream_response(
|
519 |
+
prompt=p, model_id=m, stream_raw_html=request.stream_raw_html, convert_html_to_markdown=request.convert_html_to_markdown
|
520 |
+
)
|
521 |
if request.stream:
|
|
|
522 |
return StreamingResponse(
|
523 |
create_streaming_response(
|
524 |
+
completion_id=completion_id, created=created_timestamp, model=request.model, prompt=full_prompt, send_message_func=send_message_func
|
525 |
+
), media_type="text/event-stream"
|
|
|
|
|
|
|
|
|
|
|
526 |
)
|
527 |
else:
|
|
|
528 |
return await _create_non_streaming_response(
|
529 |
+
completion_id, created_timestamp, request.model, full_prompt, request.convert_html_to_markdown
|
530 |
)
|
531 |
except DriverNotAvailableError as e:
|
|
|
532 |
raise HTTPException(status_code=503, detail="Service unavailable: The backend driver is not ready.")
|
533 |
except APIError as e:
|
|
|
534 |
raise HTTPException(status_code=e.status_code, detail=e.message)
|
535 |
except Exception as e:
|
|
|
536 |
raise HTTPException(status_code=500, detail=f"An unexpected processing error occurred: {str(e)}")
|
537 |
|
538 |
+
async def _create_non_streaming_response(completion_id: str, created: int, model: str, prompt: str, convert_html_to_markdown: bool) -> ChatCompletionResponse:
|
|
|
539 |
try:
|
540 |
+
content_parts = [chunk async for chunk in ChatHandler.send_message_and_stream_response(prompt, model, stream_raw_html=False, convert_html_to_markdown=convert_html_to_markdown)]
|
541 |
final_content = "".join(content_parts)
|
|
|
542 |
return ChatCompletionResponse(
|
543 |
+
id=completion_id, object="chat.completion", created=created, model=model,
|
|
|
|
|
|
|
544 |
choices=[Choice(index=0, message={"role": "assistant", "content": final_content}, finish_reason="stop")],
|
545 |
usage=Usage(prompt_tokens=0, completion_tokens=0, total_tokens=0)
|
546 |
)
|
|
|
|
|
|
|
547 |
except Exception as e:
|
548 |
logger.error(f"[{completion_id}] Exception during non-streaming response creation: {e}", exc_info=True)
|
549 |
raise HTTPException(status_code=500, detail="Error processing non-streaming request.")
|
550 |
|
551 |
if __name__ == "__main__":
|
552 |
import uvicorn
|
|
|
553 |
if not os.getenv("GEMINI_API_KEY"):
|
554 |
logger.error("FATAL: GEMINI_API_KEY environment variable not set. Captcha solving will be disabled.")
|
|
|
555 |
else:
|
556 |
logger.info("GEMINI_API_KEY is set.")
|
|
|
557 |
logger.info("Starting Uvicorn server on 0.0.0.0:8000.")
|
558 |
uvicorn.run(app, host="0.0.0.0", port=8000)
|
computer_control_helper.py
CHANGED
@@ -3,21 +3,9 @@
|
|
3 |
from PIL import Image, ImageDraw, ImageFont
|
4 |
import pyautogui
|
5 |
import mss # For screen capture
|
6 |
-
import io
|
7 |
import json
|
8 |
import os
|
9 |
import time, datetime
|
10 |
-
import sys
|
11 |
-
|
12 |
-
|
13 |
-
DEBUG_DIR = "debug_screenshots"
|
14 |
-
try:
|
15 |
-
os.makedirs(DEBUG_DIR, exist_ok=True)
|
16 |
-
print(f"Debug screenshots will be saved to: {os.path.abspath(DEBUG_DIR)}")
|
17 |
-
except OSError as e:
|
18 |
-
print(f"Error creating debug directory '{DEBUG_DIR}': {e}")
|
19 |
-
# Decide if you want to exit or just continue without saving debug images
|
20 |
-
DEBUG_DIR = None # Disable saving if directory creation fails
|
21 |
|
22 |
def parse_json_safely(json_string: str) -> dict:
|
23 |
"""
|
@@ -568,89 +556,6 @@ def perform_press_key(location_data: dict):
|
|
568 |
return False
|
569 |
|
570 |
|
571 |
-
def save_debug_screenshot(image: Image.Image, location_data: dict, task_desc: str, step: int, output_dir: str):
|
572 |
-
"""Saves the image with the detected bounding box drawn on it."""
|
573 |
-
if not output_dir:
|
574 |
-
print("Debug directory not configured. Skipping debug image save.")
|
575 |
-
return
|
576 |
-
if not image:
|
577 |
-
print("No image provided to save_debug_screenshot.")
|
578 |
-
return
|
579 |
-
if not location_data or "box_2d" not in location_data:
|
580 |
-
print("No bounding box data to draw for debug image.")
|
581 |
-
# Optionally save the plain screenshot anyway
|
582 |
-
# filename = f"debug_{datetime.datetime.now():%Y%m%d_%H%M%S}_step{step}_nobox.png"
|
583 |
-
# image.save(os.path.join(output_dir, filename))
|
584 |
-
return
|
585 |
-
|
586 |
-
try:
|
587 |
-
# Apply grid overlay first
|
588 |
-
img_copy = draw_grid_overlay(image)
|
589 |
-
|
590 |
-
draw = ImageDraw.Draw(img_copy)
|
591 |
-
box = location_data["box_2d"]
|
592 |
-
label = location_data.get("label", "Unknown")
|
593 |
-
|
594 |
-
# Use image dimensions for denormalization when drawing
|
595 |
-
width, height = img_copy.size
|
596 |
-
|
597 |
-
# Denormalize coordinates (Gemini uses y_min, x_min, y_max, x_max from 0-1000)
|
598 |
-
y_min_norm, x_min_norm, y_max_norm, x_max_norm = box
|
599 |
-
|
600 |
-
# Validation (optional but good)
|
601 |
-
if not all(isinstance(coord, (int, float)) and 0 <= coord <= 1000 for coord in box):
|
602 |
-
print(f"Warning: Invalid normalized coordinates in debug save: {box}")
|
603 |
-
return
|
604 |
-
if x_min_norm >= x_max_norm or y_min_norm >= y_max_norm:
|
605 |
-
print(f"Warning: Invalid box (min>=max) in debug save: {box}")
|
606 |
-
# Decide if you still want to draw/save
|
607 |
-
|
608 |
-
abs_x_min = int(x_min_norm / 1000 * width)
|
609 |
-
abs_y_min = int(y_min_norm / 1000 * height)
|
610 |
-
abs_x_max = int(x_max_norm / 1000 * width)
|
611 |
-
abs_y_max = int(y_max_norm / 1000 * height)
|
612 |
-
|
613 |
-
# Draw rectangle
|
614 |
-
outline_color = "red"
|
615 |
-
draw.rectangle(
|
616 |
-
((abs_x_min, abs_y_min), (abs_x_max, abs_y_max)),
|
617 |
-
outline=outline_color,
|
618 |
-
width=3 # Make it visible
|
619 |
-
)
|
620 |
-
|
621 |
-
# Draw label (optional, requires a font)
|
622 |
-
try:
|
623 |
-
# Try a common system font, adjust path if needed or remove if font hassle is too much
|
624 |
-
font_size = 16
|
625 |
-
font = ImageFont.truetype("arial.ttf", font_size) # Adjust path/name as needed
|
626 |
-
text_position = (abs_x_min + 5, abs_y_min - font_size - 2 if abs_y_min > font_size else abs_y_min + 5)
|
627 |
-
# Simple background for text readability
|
628 |
-
text_bbox = draw.textbbox(text_position, label, font=font)
|
629 |
-
draw.rectangle(text_bbox, fill="white")
|
630 |
-
draw.text(text_position, label, fill=outline_color, font=font)
|
631 |
-
except IOError:
|
632 |
-
print("Warning: Font for debug label not found. Skipping label.")
|
633 |
-
# Fallback: draw text without specific font (might be tiny/ugly)
|
634 |
-
# draw.text((abs_x_min + 5, abs_y_min + 5), label, fill=outline_color)
|
635 |
-
except Exception as e:
|
636 |
-
print(f"Warning: Error loading font or drawing text: {e}")
|
637 |
-
|
638 |
-
|
639 |
-
# Create filename
|
640 |
-
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:-3] # Added milliseconds
|
641 |
-
# Sanitize task description slightly for filename
|
642 |
-
safe_task_desc = "".join(c if c.isalnum() else "_" for c in task_desc[:5]) # Limit length
|
643 |
-
filename = f"debug_{timestamp}_step{step}_{safe_task_desc}.png"
|
644 |
-
filepath = os.path.join(output_dir, filename)
|
645 |
-
|
646 |
-
# Save the image
|
647 |
-
img_copy.save(filepath)
|
648 |
-
print(f"Debug image saved: {filepath}")
|
649 |
-
|
650 |
-
except Exception as e:
|
651 |
-
print(f"Error saving debug screenshot: {e}")
|
652 |
-
|
653 |
-
|
654 |
max_steps = 100 # Safety break to prevent infinite loops
|
655 |
|
656 |
|
@@ -669,10 +574,6 @@ def do_task(original_task, task_not_complted):
|
|
669 |
# Parse the json input
|
670 |
step_info = parse_json_safely(original_task)
|
671 |
|
672 |
-
# Save debug screenshot *with* action info if available
|
673 |
-
if DEBUG_DIR and screen_image:
|
674 |
-
save_debug_screenshot(screen_image, step_info, original_task, step_count, DEBUG_DIR)
|
675 |
-
|
676 |
# 3. Extract info from Gemini's response
|
677 |
action_type = step_info.get("action_type") # Get the action type
|
678 |
task_not_completed = step_info.get("task_not_completed", False) # Default to False if missing
|
|
|
3 |
from PIL import Image, ImageDraw, ImageFont
|
4 |
import pyautogui
|
5 |
import mss # For screen capture
|
|
|
6 |
import json
|
7 |
import os
|
8 |
import time, datetime
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
def parse_json_safely(json_string: str) -> dict:
|
11 |
"""
|
|
|
556 |
return False
|
557 |
|
558 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
559 |
max_steps = 100 # Safety break to prevent infinite loops
|
560 |
|
561 |
|
|
|
574 |
# Parse the json input
|
575 |
step_info = parse_json_safely(original_task)
|
576 |
|
|
|
|
|
|
|
|
|
577 |
# 3. Extract info from Gemini's response
|
578 |
action_type = step_info.get("action_type") # Get the action type
|
579 |
task_not_completed = step_info.get("task_not_completed", False) # Default to False if missing
|
downloaded_files/driver_fixing.lock
DELETED
File without changes
|
downloaded_files/pipfinding.lock
DELETED
File without changes
|
downloaded_files/pyautogui.lock
DELETED
File without changes
|
lmarena.log
ADDED
@@ -0,0 +1,790 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2025-06-09 12:15:57,895 - __main__ - INFO - Configuration loaded.
|
2 |
+
2025-06-09 12:15:57,902 - __main__ - INFO - DriverManager instance created.
|
3 |
+
2025-06-09 12:15:58,475 - __main__ - INFO - Gemini client initialized successfully.
|
4 |
+
2025-06-09 12:15:58,532 - __main__ - INFO - GEMINI_API_KEY is set.
|
5 |
+
2025-06-09 12:15:58,532 - __main__ - INFO - Starting Uvicorn server on 0.0.0.0:8000.
|
6 |
+
2025-06-09 12:15:58,569 - __main__ - INFO - Application startup sequence initiated.
|
7 |
+
2025-06-09 12:15:58,570 - __main__ - INFO - Initializing Selenium driver...
|
8 |
+
2025-06-09 12:15:58,572 - __main__ - INFO - Executing synchronous driver initialization and enhanced readiness check.
|
9 |
+
2025-06-09 12:16:02,376 - __main__ - INFO - Driver instantiated. Opening URL...
|
10 |
+
2025-06-09 12:16:07,588 - __main__ - INFO - URL 'https://beta.lmarena.ai/?mode=direct' opened.
|
11 |
+
2025-06-09 12:16:07,589 - __main__ - INFO - Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...
|
12 |
+
2025-06-09 12:16:23,430 - __main__ - INFO - uc_gui_click_captcha() completed. Main site should be loading now.
|
13 |
+
2025-06-09 12:16:23,431 - __main__ - INFO - Checking for on-site ('Verify Human') captcha...
|
14 |
+
2025-06-09 12:16:27,502 - __main__ - WARNING - Unexpected error checking UI state for on-site captcha: Message: invalid session id: session deleted as the browser has closed the connection
|
15 |
+
from disconnected: not connected to DevTools
|
16 |
+
(Session info: chrome=137.0.7151.69)
|
17 |
+
Stacktrace:
|
18 |
+
GetHandleVerifier [0x0x7ff748e66f65+78965]
|
19 |
+
GetHandleVerifier [0x0x7ff748e66fc0+79056]
|
20 |
+
(No symbol) [0x0x7ff748bf9dda]
|
21 |
+
(No symbol) [0x0x7ff748be5bc5]
|
22 |
+
(No symbol) [0x0x7ff748c0ac04]
|
23 |
+
(No symbol) [0x0x7ff748c80195]
|
24 |
+
(No symbol) [0x0x7ff748ca06cd]
|
25 |
+
(No symbol) [0x0x7ff748c78443]
|
26 |
+
(No symbol) [0x0x7ff748c41311]
|
27 |
+
(No symbol) [0x0x7ff748c420a3]
|
28 |
+
GetHandleVerifier [0x0x7ff74911e26d+2926461]
|
29 |
+
GetHandleVerifier [0x0x7ff749118993+2903715]
|
30 |
+
GetHandleVerifier [0x0x7ff749136aed+3026941]
|
31 |
+
GetHandleVerifier [0x0x7ff748e816fe+187406]
|
32 |
+
GetHandleVerifier [0x0x7ff748e896ef+220159]
|
33 |
+
GetHandleVerifier [0x0x7ff748e6faf4+114692]
|
34 |
+
GetHandleVerifier [0x0x7ff748e6fca9+115129]
|
35 |
+
GetHandleVerifier [0x0x7ff748e564d8+10728]
|
36 |
+
BaseThreadInitThunk [0x0x7ffd6f06e8d7+23]
|
37 |
+
RtlUserThreadStart [0x0x7ffd6fafc5dc+44]
|
38 |
+
Traceback (most recent call last):
|
39 |
+
File "c:\Users\caree\Code\Lmarena\api.py", line 186, in _perform_sync_captcha_checks
|
40 |
+
textarea = WebDriverWait(driver, 5).until(EC.element_to_be_clickable(textarea_locator))
|
41 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\selenium\webdriver\support\wait.py", line 129, in until
|
42 |
+
value = method(self._driver)
|
43 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\selenium\webdriver\support\expected_conditions.py", line 622, in _predicate
|
44 |
+
target = driver.find_element(*target) # grab element at locator
|
45 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\seleniumbase\core\sb_driver.py", line 27, in find_element
|
46 |
+
return self.driver.default_find_element(by=by, value=value)
|
47 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^
|
48 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\selenium\webdriver\remote\webdriver.py", line 914, in find_element
|
49 |
+
return self.execute(Command.FIND_ELEMENT, {"using": by, "value": value})["value"]
|
50 |
+
~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
51 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\selenium\webdriver\remote\webdriver.py", line 447, in execute
|
52 |
+
self.error_handler.check_response(response)
|
53 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^
|
54 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\selenium\webdriver\remote\errorhandler.py", line 232, in check_response
|
55 |
+
raise exception_class(message, screen, stacktrace)
|
56 |
+
selenium.common.exceptions.InvalidSessionIdException: Message: invalid session id: session deleted as the browser has closed the connection
|
57 |
+
from disconnected: not connected to DevTools
|
58 |
+
(Session info: chrome=137.0.7151.69)
|
59 |
+
Stacktrace:
|
60 |
+
GetHandleVerifier [0x0x7ff748e66f65+78965]
|
61 |
+
GetHandleVerifier [0x0x7ff748e66fc0+79056]
|
62 |
+
(No symbol) [0x0x7ff748bf9dda]
|
63 |
+
(No symbol) [0x0x7ff748be5bc5]
|
64 |
+
(No symbol) [0x0x7ff748c0ac04]
|
65 |
+
(No symbol) [0x0x7ff748c80195]
|
66 |
+
(No symbol) [0x0x7ff748ca06cd]
|
67 |
+
(No symbol) [0x0x7ff748c78443]
|
68 |
+
(No symbol) [0x0x7ff748c41311]
|
69 |
+
(No symbol) [0x0x7ff748c420a3]
|
70 |
+
GetHandleVerifier [0x0x7ff74911e26d+2926461]
|
71 |
+
GetHandleVerifier [0x0x7ff749118993+2903715]
|
72 |
+
GetHandleVerifier [0x0x7ff749136aed+3026941]
|
73 |
+
GetHandleVerifier [0x0x7ff748e816fe+187406]
|
74 |
+
GetHandleVerifier [0x0x7ff748e896ef+220159]
|
75 |
+
GetHandleVerifier [0x0x7ff748e6faf4+114692]
|
76 |
+
GetHandleVerifier [0x0x7ff748e6fca9+115129]
|
77 |
+
GetHandleVerifier [0x0x7ff748e564d8+10728]
|
78 |
+
BaseThreadInitThunk [0x0x7ffd6f06e8d7+23]
|
79 |
+
RtlUserThreadStart [0x0x7ffd6fafc5dc+44]
|
80 |
+
|
81 |
+
2025-06-09 12:16:27,507 - __main__ - INFO - Starting visual AI check for on-site captcha.
|
82 |
+
2025-06-09 12:16:27,663 - __main__ - INFO - Sending screenshot to Gemini API for analysis.
|
83 |
+
2025-06-09 12:16:27,663 - google_genai.models - INFO - AFC is enabled with max remote calls: 10.
|
84 |
+
2025-06-09 12:16:27,664 - google_genai.models - INFO - AFC remote call 1 is done.
|
85 |
+
2025-06-09 12:16:30,777 - httpx - INFO - HTTP Request: POST https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse "HTTP/1.1 200 OK"
|
86 |
+
2025-06-09 12:16:30,849 - __main__ - INFO - Received Gemini response for on-site captcha check: ```json
|
87 |
+
[
|
88 |
+
{"box_2d": [495, 138, 527, 151], "label": "box"}
|
89 |
+
]
|
90 |
+
```
|
91 |
+
2025-06-09 12:16:30,849 - __main__ - INFO - On-site captcha checkbox found via Gemini. Clicking coordinates: {'box_2d': [495, 138, 527, 151], 'label': 'box'}
|
92 |
+
2025-06-09 12:16:42,537 - __main__ - INFO - Configuration loaded.
|
93 |
+
2025-06-09 12:16:42,542 - __main__ - INFO - DriverManager instance created.
|
94 |
+
2025-06-09 12:16:43,027 - __main__ - INFO - Gemini client initialized successfully.
|
95 |
+
2025-06-09 12:16:43,065 - __main__ - INFO - GEMINI_API_KEY is set.
|
96 |
+
2025-06-09 12:16:43,065 - __main__ - INFO - Starting Uvicorn server on 0.0.0.0:8000.
|
97 |
+
2025-06-09 12:16:43,090 - __main__ - INFO - Application startup sequence initiated.
|
98 |
+
2025-06-09 12:16:43,091 - __main__ - INFO - Initializing Selenium driver...
|
99 |
+
2025-06-09 12:16:43,092 - __main__ - INFO - Executing synchronous driver initialization and enhanced readiness check.
|
100 |
+
2025-06-09 12:16:46,172 - __main__ - INFO - Driver instantiated. Opening URL...
|
101 |
+
2025-06-09 12:16:53,140 - __main__ - INFO - URL 'https://beta.lmarena.ai/?mode=direct' opened.
|
102 |
+
2025-06-09 12:16:53,141 - __main__ - INFO - Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...
|
103 |
+
2025-06-09 12:16:53,588 - __main__ - INFO - uc_gui_click_captcha() completed. Main site should be loading now.
|
104 |
+
2025-06-09 12:16:53,589 - __main__ - INFO - Checking for on-site ('Verify Human') captcha...
|
105 |
+
2025-06-09 12:16:55,688 - __main__ - INFO - No on-site captcha detected. Main UI is ready.
|
106 |
+
2025-06-09 12:16:55,688 - __main__ - INFO - Selenium driver initialization process completed successfully.
|
107 |
+
2025-06-09 12:16:55,689 - __main__ - INFO - Application startup sequence completed successfully.
|
108 |
+
2025-06-09 12:17:39,593 - __main__ - INFO - Application shutdown sequence initiated.
|
109 |
+
2025-06-09 12:17:39,593 - __main__ - INFO - Cleaning up and quitting Selenium driver...
|
110 |
+
2025-06-09 12:17:41,909 - __main__ - INFO - Driver quit successfully.
|
111 |
+
2025-06-09 12:17:41,909 - __main__ - INFO - Application shutdown sequence completed.
|
112 |
+
2025-06-09 12:18:55,881 - __main__ - INFO - Configuration loaded.
|
113 |
+
2025-06-09 12:18:55,887 - __main__ - INFO - DriverManager instance created.
|
114 |
+
2025-06-09 12:18:56,388 - __main__ - INFO - Gemini client initialized successfully.
|
115 |
+
2025-06-09 12:18:56,424 - __main__ - INFO - GEMINI_API_KEY is set.
|
116 |
+
2025-06-09 12:18:56,425 - __main__ - INFO - Starting Uvicorn server on 0.0.0.0:8000.
|
117 |
+
2025-06-09 12:18:56,450 - __main__ - INFO - Application startup sequence initiated.
|
118 |
+
2025-06-09 12:18:56,451 - __main__ - INFO - Initializing Selenium driver...
|
119 |
+
2025-06-09 12:18:56,452 - __main__ - INFO - Executing synchronous driver initialization and enhanced readiness check.
|
120 |
+
2025-06-09 12:18:59,568 - __main__ - INFO - Driver instantiated. Opening URL...
|
121 |
+
2025-06-09 12:19:07,244 - __main__ - INFO - URL 'https://beta.lmarena.ai/?mode=direct' opened.
|
122 |
+
2025-06-09 12:19:07,244 - __main__ - INFO - Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...
|
123 |
+
2025-06-09 12:19:07,251 - __main__ - INFO - uc_gui_click_captcha() completed. Main site should be loading now.
|
124 |
+
2025-06-09 12:19:07,252 - __main__ - INFO - Checking for on-site ('Verify Human') captcha...
|
125 |
+
2025-06-09 12:19:12,372 - __main__ - INFO - Chat input textarea not interactable. Proceeding with AI captcha solver.
|
126 |
+
2025-06-09 12:19:12,374 - __main__ - INFO - Starting visual AI check for on-site captcha.
|
127 |
+
2025-06-09 12:19:12,539 - __main__ - INFO - Sending screenshot to Gemini API for analysis.
|
128 |
+
2025-06-09 12:19:12,539 - google_genai.models - INFO - AFC is enabled with max remote calls: 10.
|
129 |
+
2025-06-09 12:19:12,539 - google_genai.models - INFO - AFC remote call 1 is done.
|
130 |
+
2025-06-09 12:19:15,352 - httpx - INFO - HTTP Request: POST https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse "HTTP/1.1 200 OK"
|
131 |
+
2025-06-09 12:19:15,478 - __main__ - INFO - Received Gemini response for on-site captcha check: ```json
|
132 |
+
[
|
133 |
+
{"box_2d": [578, 166, 612, 185], "label": "box"}
|
134 |
+
]
|
135 |
+
```
|
136 |
+
2025-06-09 12:19:15,478 - __main__ - INFO - On-site captcha checkbox found via Gemini. Clicking coordinates: {'box_2d': [578, 166, 612, 185], 'label': 'box'}
|
137 |
+
2025-06-09 12:19:19,272 - __main__ - INFO - Click performed. Now reloading page as requested for post-AI solve.
|
138 |
+
2025-06-09 12:19:19,272 - __main__ - INFO - Performing human-like page reload
|
139 |
+
2025-06-09 12:19:19,273 - __main__ - INFO - Using FN+F5 key combination
|
140 |
+
2025-06-09 12:19:21,438 - __main__ - INFO - Page reloaded after 1.26s delay
|
141 |
+
2025-06-09 12:19:26,439 - __main__ - INFO - Selenium driver initialization process completed successfully.
|
142 |
+
2025-06-09 12:19:26,439 - __main__ - INFO - Application startup sequence completed successfully.
|
143 |
+
2025-06-09 12:19:44,775 - __main__ - INFO - [chatcmpl-1503f2e8146f468993892ee257265847] Received chat completion request: model='qwen no think', stream=True, md_convert=True
|
144 |
+
2025-06-09 12:19:44,788 - streaming - INFO - [chatcmpl-1503f2e8146f468993892ee257265847] Starting streaming response generation for model 'qwen no think'.
|
145 |
+
2025-06-09 12:19:44,788 - __main__ - INFO - [98806519-9b36-4d7a-9c31-66529703a72f] Starting chat. Model: 'qwen no think', RawHTML: False, MarkdownMode: True.
|
146 |
+
2025-06-09 12:19:44,788 - __main__ - INFO - Selecting model: qwen no think
|
147 |
+
2025-06-09 12:19:45,064 - __main__ - INFO - Selected model: qwen no think
|
148 |
+
2025-06-09 12:19:45,065 - __main__ - INFO - [98806519-9b36-4d7a-9c31-66529703a72f] Sending prompt (first 50 chars): 'python pascals tree gen small concise code no comm...'
|
149 |
+
2025-06-09 12:19:45,065 - __main__ - INFO - Typing prompt into textarea.
|
150 |
+
2025-06-09 12:19:45,465 - __main__ - INFO - Prompt submitted.
|
151 |
+
2025-06-09 12:19:45,466 - __main__ - INFO - Checking for 'Agree' button in dialog.
|
152 |
+
2025-06-09 12:19:45,559 - __main__ - INFO - 'Agree' button not visible, skipping.
|
153 |
+
2025-06-09 12:19:45,559 - __main__ - INFO - [98806519-9b36-4d7a-9c31-66529703a72f] Prompt sent. Streaming response...
|
154 |
+
2025-06-09 12:19:51,721 - streaming - INFO - [StreamProc/Process] Starting stream in Markdown conversion mode.
|
155 |
+
2025-06-09 12:20:05,631 - streaming - INFO - [StreamProc/PollStream] Content hasn't changed for 10.00s. Assuming stable.
|
156 |
+
2025-06-09 12:20:05,632 - __main__ - INFO - [98806519-9b36-4d7a-9c31-66529703a72f] Finished streaming response from browser.
|
157 |
+
2025-06-09 12:20:05,632 - __main__ - INFO - [98806519-9b36-4d7a-9c31-66529703a72f] Cleaning up chat session by clicking 'New Chat'.
|
158 |
+
2025-06-09 12:20:05,633 - __main__ - INFO - [98806519-9b36-4d7a-9c31-66529703a72f] Attempting to click 'New Chat' button.
|
159 |
+
2025-06-09 12:20:05,720 - __main__ - INFO - [98806519-9b36-4d7a-9c31-66529703a72f] 'New Chat' button clicked successfully.
|
160 |
+
2025-06-09 12:20:05,721 - streaming - INFO - [chatcmpl-1503f2e8146f468993892ee257265847] Yielding final chunk. Total content length: 356 chars.
|
161 |
+
2025-06-09 12:20:05,721 - streaming - INFO - [chatcmpl-1503f2e8146f468993892ee257265847] Yielding [DONE] signal.
|
162 |
+
2025-06-09 12:25:57,144 - __main__ - INFO - Application shutdown sequence initiated.
|
163 |
+
2025-06-09 12:25:57,145 - __main__ - INFO - Cleaning up and quitting Selenium driver...
|
164 |
+
2025-06-09 12:25:59,457 - __main__ - INFO - Driver quit successfully.
|
165 |
+
2025-06-09 12:25:59,458 - __main__ - INFO - Application shutdown sequence completed.
|
166 |
+
2025-06-09 12:26:36,462 - __main__ - INFO - Configuration loaded.
|
167 |
+
2025-06-09 12:26:36,472 - __main__ - INFO - DriverManager instance created.
|
168 |
+
2025-06-09 12:26:36,981 - __main__ - INFO - Gemini client initialized successfully.
|
169 |
+
2025-06-09 12:26:37,098 - __main__ - INFO - GEMINI_API_KEY is set.
|
170 |
+
2025-06-09 12:26:37,098 - __main__ - INFO - Starting Uvicorn server on 0.0.0.0:8000.
|
171 |
+
2025-06-09 12:26:37,172 - __main__ - INFO - Application startup sequence initiated.
|
172 |
+
2025-06-09 12:26:37,173 - __main__ - INFO - Initializing Selenium driver...
|
173 |
+
2025-06-09 12:26:37,175 - __main__ - INFO - Executing synchronous driver initialization and enhanced readiness check.
|
174 |
+
2025-06-09 12:26:41,365 - __main__ - INFO - Driver instantiated. Opening URL...
|
175 |
+
2025-06-09 12:26:46,688 - __main__ - INFO - URL 'https://beta.lmarena.ai/?mode=direct' opened.
|
176 |
+
2025-06-09 12:26:46,689 - __main__ - INFO - Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...
|
177 |
+
2025-06-09 12:26:58,535 - __main__ - INFO - uc_gui_click_captcha() completed. Main site should be loading now.
|
178 |
+
2025-06-09 12:26:58,535 - __main__ - INFO - Checking for on-site ('Verify Human') captcha...
|
179 |
+
2025-06-09 12:27:08,679 - __main__ - INFO - No on-site captcha detected. Main UI is ready.
|
180 |
+
2025-06-09 12:27:08,680 - __main__ - INFO - Selenium driver initialization process completed successfully.
|
181 |
+
2025-06-09 12:27:08,680 - __main__ - INFO - Application startup sequence completed successfully.
|
182 |
+
2025-06-09 12:27:08,776 - __main__ - INFO - [chatcmpl-41dd7f08cfdf4fbe82c61425ad970193] Received chat completion request: model='qwen no think', stream=True, md_convert=True
|
183 |
+
2025-06-09 12:27:08,782 - streaming - INFO - [chatcmpl-41dd7f08cfdf4fbe82c61425ad970193] Starting streaming response generation for model 'qwen no think'.
|
184 |
+
2025-06-09 12:27:08,782 - __main__ - INFO - [9a7a0c77-671d-451c-8839-28f19cc10692] Starting chat. Model: 'qwen no think', RawHTML: False, MarkdownMode: True.
|
185 |
+
2025-06-09 12:27:08,783 - __main__ - INFO - Selecting model: qwen no think
|
186 |
+
2025-06-09 12:27:09,091 - __main__ - INFO - Selected model: qwen no think
|
187 |
+
2025-06-09 12:27:09,092 - __main__ - INFO - [9a7a0c77-671d-451c-8839-28f19cc10692] Sending prompt (first 50 chars): 'python pascals tree gen small concise code no comm...'
|
188 |
+
2025-06-09 12:27:09,092 - __main__ - INFO - Typing prompt into textarea.
|
189 |
+
2025-06-09 12:27:09,593 - __main__ - INFO - Prompt submitted.
|
190 |
+
2025-06-09 12:27:09,593 - __main__ - INFO - Checking for 'Agree' button in dialog.
|
191 |
+
2025-06-09 12:27:09,719 - __main__ - INFO - 'Agree' button not visible, skipping.
|
192 |
+
2025-06-09 12:27:09,719 - __main__ - INFO - [9a7a0c77-671d-451c-8839-28f19cc10692] Prompt sent. Streaming response...
|
193 |
+
2025-06-09 12:27:13,322 - streaming - INFO - [StreamProc/Process] Starting stream in Markdown conversion mode.
|
194 |
+
2025-06-09 12:27:29,116 - streaming - INFO - [StreamProc/PollStream] Content hasn't changed for 10.00s. Assuming stable.
|
195 |
+
2025-06-09 12:27:29,116 - __main__ - INFO - [9a7a0c77-671d-451c-8839-28f19cc10692] Finished streaming response from browser.
|
196 |
+
2025-06-09 12:27:29,117 - __main__ - INFO - [9a7a0c77-671d-451c-8839-28f19cc10692] Cleaning up chat session by clicking 'New Chat'.
|
197 |
+
2025-06-09 12:27:29,117 - __main__ - INFO - [9a7a0c77-671d-451c-8839-28f19cc10692] Attempting to click 'New Chat' button.
|
198 |
+
2025-06-09 12:27:29,217 - __main__ - INFO - [9a7a0c77-671d-451c-8839-28f19cc10692] 'New Chat' button clicked successfully.
|
199 |
+
2025-06-09 12:27:29,218 - streaming - INFO - [chatcmpl-41dd7f08cfdf4fbe82c61425ad970193] Yielding final chunk. Total content length: 354 chars.
|
200 |
+
2025-06-09 12:27:29,218 - streaming - INFO - [chatcmpl-41dd7f08cfdf4fbe82c61425ad970193] Yielding [DONE] signal.
|
201 |
+
2025-06-09 12:40:32,330 - __main__ - INFO - Application shutdown sequence initiated.
|
202 |
+
2025-06-09 12:40:32,331 - __main__ - INFO - Cleaning up and quitting Selenium driver...
|
203 |
+
2025-06-09 12:40:34,681 - __main__ - INFO - Driver quit successfully.
|
204 |
+
2025-06-09 12:40:34,681 - __main__ - INFO - Application shutdown sequence completed.
|
205 |
+
2025-06-09 12:40:42,096 - __main__ - INFO - Configuration loaded.
|
206 |
+
2025-06-09 12:40:42,104 - __main__ - INFO - DriverManager instance created.
|
207 |
+
2025-06-09 12:40:42,595 - __main__ - INFO - Gemini client initialized successfully.
|
208 |
+
2025-06-09 12:40:42,628 - __main__ - INFO - GEMINI_API_KEY is set.
|
209 |
+
2025-06-09 12:40:42,629 - __main__ - INFO - Starting Uvicorn server on 0.0.0.0:8000.
|
210 |
+
2025-06-09 12:40:42,654 - __main__ - INFO - Application startup sequence initiated.
|
211 |
+
2025-06-09 12:40:42,654 - __main__ - INFO - Initializing Selenium driver...
|
212 |
+
2025-06-09 12:40:42,656 - __main__ - INFO - Executing synchronous driver initialization and enhanced readiness check.
|
213 |
+
2025-06-09 12:40:45,740 - __main__ - INFO - Driver instantiated. Opening URL...
|
214 |
+
2025-06-09 12:40:51,056 - __main__ - INFO - URL 'https://beta.lmarena.ai/?mode=direct' opened.
|
215 |
+
2025-06-09 12:40:51,056 - __main__ - INFO - Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...
|
216 |
+
2025-06-09 12:40:51,062 - __main__ - INFO - uc_gui_click_captcha() completed. Main site should be loading now.
|
217 |
+
2025-06-09 12:40:51,062 - __main__ - INFO - Checking for on-site ('Verify Human') captcha...
|
218 |
+
2025-06-09 12:41:03,731 - __main__ - INFO - No on-site captcha detected. Main UI is ready.
|
219 |
+
2025-06-09 12:41:03,732 - __main__ - INFO - Selenium driver initialization process completed successfully.
|
220 |
+
2025-06-09 12:41:03,732 - __main__ - INFO - Application startup sequence completed successfully.
|
221 |
+
2025-06-09 12:41:40,833 - __main__ - INFO - Configuration loaded.
|
222 |
+
2025-06-09 12:41:40,842 - __main__ - INFO - DriverManager instance created.
|
223 |
+
2025-06-09 12:41:41,356 - __main__ - INFO - Gemini client initialized successfully.
|
224 |
+
2025-06-09 12:41:41,389 - __main__ - INFO - GEMINI_API_KEY is set.
|
225 |
+
2025-06-09 12:41:41,390 - __main__ - INFO - Starting Uvicorn server on 0.0.0.0:8000.
|
226 |
+
2025-06-09 12:41:41,416 - __main__ - INFO - Application startup sequence initiated.
|
227 |
+
2025-06-09 12:41:41,417 - __main__ - INFO - Initializing Selenium driver...
|
228 |
+
2025-06-09 12:41:41,418 - __main__ - INFO - Executing synchronous driver initialization and enhanced readiness check.
|
229 |
+
2025-06-09 12:41:44,457 - __main__ - INFO - Driver instantiated. Opening URL...
|
230 |
+
2025-06-09 12:41:50,311 - __main__ - INFO - URL 'https://beta.lmarena.ai/?mode=direct' opened.
|
231 |
+
2025-06-09 12:41:50,311 - __main__ - INFO - Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...
|
232 |
+
2025-06-09 12:41:50,724 - __main__ - INFO - uc_gui_click_captcha() completed. Main site should be loading now.
|
233 |
+
2025-06-09 12:41:50,725 - __main__ - INFO - Checking for on-site ('Verify Human') captcha...
|
234 |
+
2025-06-09 12:42:00,824 - __main__ - INFO - Textarea not ready or an on-site captcha indicator was found. Proceeding with AI solver.
|
235 |
+
2025-06-09 12:42:00,824 - __main__ - INFO - Starting visual AI check for on-site captcha.
|
236 |
+
2025-06-09 12:42:01,009 - __main__ - INFO - Sending screenshot to Gemini API for analysis.
|
237 |
+
2025-06-09 12:42:01,009 - google_genai.models - INFO - AFC is enabled with max remote calls: 10.
|
238 |
+
2025-06-09 12:42:01,010 - google_genai.models - INFO - AFC remote call 1 is done.
|
239 |
+
2025-06-09 12:42:03,591 - httpx - INFO - HTTP Request: POST https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse "HTTP/1.1 200 OK"
|
240 |
+
2025-06-09 12:42:03,744 - __main__ - INFO - Received Gemini response for on-site captcha check: ```json
|
241 |
+
[
|
242 |
+
{"box_2d": [721, 412, 755, 431], "label": "box"}
|
243 |
+
]
|
244 |
+
```
|
245 |
+
2025-06-09 12:42:03,744 - __main__ - INFO - On-site captcha checkbox found via Gemini. Clicking coordinates: {'box_2d': [721, 412, 755, 431], 'label': 'box'}
|
246 |
+
2025-06-09 12:42:07,526 - __main__ - INFO - Click performed. Now reloading page as requested for post-AI solve.
|
247 |
+
2025-06-09 12:42:07,527 - __main__ - INFO - Performing human-like page reload
|
248 |
+
2025-06-09 12:42:07,527 - __main__ - INFO - Using FN+F5 key combination
|
249 |
+
2025-06-09 12:42:09,853 - __main__ - INFO - Page reloaded after 1.42s delay
|
250 |
+
2025-06-09 12:42:14,855 - __main__ - INFO - Selenium driver initialization process completed successfully.
|
251 |
+
2025-06-09 12:42:14,856 - __main__ - INFO - Application startup sequence completed successfully.
|
252 |
+
2025-06-09 12:42:38,986 - __main__ - INFO - [chatcmpl-0e4becfc8b714621adc3a8e28fa6c832] Received chat completion request: model='Gemini 2.0 Flash', stream=True, md_convert=True
|
253 |
+
2025-06-09 12:42:38,992 - streaming - INFO - [chatcmpl-0e4becfc8b714621adc3a8e28fa6c832] Starting streaming response generation for model 'Gemini 2.0 Flash'.
|
254 |
+
2025-06-09 12:42:38,992 - __main__ - INFO - [65ec22f0-f7c9-4c84-97ea-9d279cd1a901] Starting chat. Model: 'Gemini 2.0 Flash', RawHTML: False, MarkdownMode: True.
|
255 |
+
2025-06-09 12:42:38,992 - __main__ - INFO - Selecting model: Gemini 2.0 Flash
|
256 |
+
2025-06-09 12:42:39,305 - __main__ - INFO - Selected model: Gemini 2.0 Flash
|
257 |
+
2025-06-09 12:42:39,306 - __main__ - INFO - [65ec22f0-f7c9-4c84-97ea-9d279cd1a901] Sending prompt (first 50 chars): 'python pascals tree gen small concise code no comm...'
|
258 |
+
2025-06-09 12:42:39,306 - __main__ - INFO - Typing prompt into textarea.
|
259 |
+
2025-06-09 12:42:39,815 - __main__ - INFO - Prompt submitted.
|
260 |
+
2025-06-09 12:42:39,815 - __main__ - INFO - Checking for 'Agree' button in dialog.
|
261 |
+
2025-06-09 12:42:39,917 - __main__ - INFO - 'Agree' button not visible, skipping.
|
262 |
+
2025-06-09 12:42:39,917 - __main__ - INFO - [65ec22f0-f7c9-4c84-97ea-9d279cd1a901] Prompt sent. Streaming response...
|
263 |
+
2025-06-09 12:42:52,374 - streaming - INFO - [StreamProc/PollStream] Content stable for 10.00s. Ending poll.
|
264 |
+
2025-06-09 12:42:52,378 - __main__ - INFO - [65ec22f0-f7c9-4c84-97ea-9d279cd1a901] Finished streaming response from browser.
|
265 |
+
2025-06-09 12:42:52,378 - __main__ - INFO - [65ec22f0-f7c9-4c84-97ea-9d279cd1a901] Cleaning up chat session by clicking 'New Chat'.
|
266 |
+
2025-06-09 12:42:52,378 - __main__ - INFO - [65ec22f0-f7c9-4c84-97ea-9d279cd1a901] Attempting to click 'New Chat' button.
|
267 |
+
2025-06-09 12:42:52,462 - __main__ - INFO - [65ec22f0-f7c9-4c84-97ea-9d279cd1a901] 'New Chat' button clicked successfully.
|
268 |
+
2025-06-09 12:42:52,463 - streaming - INFO - [chatcmpl-0e4becfc8b714621adc3a8e28fa6c832] Yielding final chunk. Total content length: 147 chars.
|
269 |
+
2025-06-09 12:42:52,463 - streaming - INFO - [chatcmpl-0e4becfc8b714621adc3a8e28fa6c832] Yielding [DONE] signal.
|
270 |
+
2025-06-09 12:43:26,774 - __main__ - INFO - [chatcmpl-3e2d2d8cded1427495834f07c4c5758b] Received chat completion request: model='Qwen No Think', stream=True, md_convert=True
|
271 |
+
2025-06-09 12:43:26,775 - streaming - INFO - [chatcmpl-3e2d2d8cded1427495834f07c4c5758b] Starting streaming response generation for model 'Qwen No Think'.
|
272 |
+
2025-06-09 12:43:26,775 - __main__ - INFO - [2ea7dfd7-36df-44f6-b3c7-5b3b8e4c10b3] Starting chat. Model: 'Qwen No Think', RawHTML: False, MarkdownMode: True.
|
273 |
+
2025-06-09 12:43:26,776 - __main__ - INFO - Selecting model: Qwen No Think
|
274 |
+
2025-06-09 12:43:27,035 - __main__ - INFO - Selected model: Qwen No Think
|
275 |
+
2025-06-09 12:43:27,036 - __main__ - INFO - [2ea7dfd7-36df-44f6-b3c7-5b3b8e4c10b3] Sending prompt (first 50 chars): 'python pascals tree gen long code no comments...'
|
276 |
+
2025-06-09 12:43:27,036 - __main__ - INFO - Typing prompt into textarea.
|
277 |
+
2025-06-09 12:43:27,342 - __main__ - INFO - Prompt submitted.
|
278 |
+
2025-06-09 12:43:27,342 - __main__ - INFO - Checking for 'Agree' button in dialog.
|
279 |
+
2025-06-09 12:43:28,373 - __main__ - INFO - 'Agree' button not visible, skipping.
|
280 |
+
2025-06-09 12:43:28,374 - __main__ - INFO - [2ea7dfd7-36df-44f6-b3c7-5b3b8e4c10b3] Prompt sent. Streaming response...
|
281 |
+
2025-06-09 12:43:48,331 - streaming - INFO - [StreamProc/PollStream] Content stable for 10.00s. Ending poll.
|
282 |
+
2025-06-09 12:43:48,343 - __main__ - INFO - [2ea7dfd7-36df-44f6-b3c7-5b3b8e4c10b3] Finished streaming response from browser.
|
283 |
+
2025-06-09 12:43:48,343 - __main__ - INFO - [2ea7dfd7-36df-44f6-b3c7-5b3b8e4c10b3] Cleaning up chat session by clicking 'New Chat'.
|
284 |
+
2025-06-09 12:43:48,343 - __main__ - INFO - [2ea7dfd7-36df-44f6-b3c7-5b3b8e4c10b3] Attempting to click 'New Chat' button.
|
285 |
+
2025-06-09 12:43:48,425 - __main__ - INFO - [2ea7dfd7-36df-44f6-b3c7-5b3b8e4c10b3] 'New Chat' button clicked successfully.
|
286 |
+
2025-06-09 12:43:48,425 - streaming - INFO - [chatcmpl-3e2d2d8cded1427495834f07c4c5758b] Yielding final chunk. Total content length: 1323 chars.
|
287 |
+
2025-06-09 12:43:48,425 - streaming - INFO - [chatcmpl-3e2d2d8cded1427495834f07c4c5758b] Yielding [DONE] signal.
|
288 |
+
2025-06-09 12:45:57,240 - __main__ - INFO - Configuration loaded.
|
289 |
+
2025-06-09 12:45:57,248 - __main__ - INFO - DriverManager instance created.
|
290 |
+
2025-06-09 12:45:57,740 - __main__ - INFO - Gemini client initialized successfully.
|
291 |
+
2025-06-09 12:45:57,774 - __main__ - INFO - GEMINI_API_KEY is set.
|
292 |
+
2025-06-09 12:45:57,774 - __main__ - INFO - Starting Uvicorn server on 0.0.0.0:8000.
|
293 |
+
2025-06-09 12:45:57,799 - __main__ - INFO - Application startup sequence initiated.
|
294 |
+
2025-06-09 12:45:57,799 - __main__ - INFO - Initializing Selenium driver...
|
295 |
+
2025-06-09 12:45:57,801 - __main__ - INFO - Executing synchronous driver initialization and enhanced readiness check.
|
296 |
+
2025-06-09 12:46:00,849 - __main__ - INFO - Driver instantiated. Opening URL...
|
297 |
+
2025-06-09 12:46:07,223 - __main__ - INFO - URL 'https://beta.lmarena.ai/?mode=direct' opened.
|
298 |
+
2025-06-09 12:46:07,223 - __main__ - INFO - Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...
|
299 |
+
2025-06-09 12:46:07,632 - __main__ - INFO - uc_gui_click_captcha() completed. Main site should be loading now.
|
300 |
+
2025-06-09 12:46:07,632 - __main__ - INFO - Checking for on-site ('Verify Human') captcha...
|
301 |
+
2025-06-09 12:46:17,736 - __main__ - INFO - Textarea not ready or an on-site captcha indicator was found. Proceeding with AI solver.
|
302 |
+
2025-06-09 12:46:17,736 - __main__ - INFO - Starting visual AI check for on-site captcha.
|
303 |
+
2025-06-09 12:46:17,898 - __main__ - INFO - Sending screenshot to Gemini API for analysis.
|
304 |
+
2025-06-09 12:46:17,899 - google_genai.models - INFO - AFC is enabled with max remote calls: 10.
|
305 |
+
2025-06-09 12:46:17,899 - google_genai.models - INFO - AFC remote call 1 is done.
|
306 |
+
2025-06-09 12:46:20,718 - httpx - INFO - HTTP Request: POST https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse "HTTP/1.1 200 OK"
|
307 |
+
2025-06-09 12:46:20,809 - __main__ - INFO - Received Gemini response for on-site captcha check: ```json
|
308 |
+
[
|
309 |
+
{"box_2d": [728, 414, 747, 426], "label": "box"}
|
310 |
+
]
|
311 |
+
```
|
312 |
+
2025-06-09 12:46:20,809 - __main__ - INFO - On-site captcha checkbox found via Gemini. Clicking coordinates: {'box_2d': [728, 414, 747, 426], 'label': 'box'}
|
313 |
+
2025-06-09 12:46:24,589 - __main__ - INFO - Click performed. Now reloading page as requested for post-AI solve.
|
314 |
+
2025-06-09 12:46:24,590 - __main__ - INFO - Performing human-like page reload
|
315 |
+
2025-06-09 12:46:24,591 - __main__ - INFO - Using FN+F5 key combination
|
316 |
+
2025-06-09 12:46:26,398 - __main__ - INFO - Page reloaded after 0.90s delay
|
317 |
+
2025-06-09 12:46:31,400 - __main__ - INFO - Selenium driver initialization process completed successfully.
|
318 |
+
2025-06-09 12:46:31,401 - __main__ - INFO - Application startup sequence completed successfully.
|
319 |
+
2025-06-09 12:46:39,072 - __main__ - INFO - [chatcmpl-61168434d655486299ab964cfa879aaa] Received chat completion request: model='Qwen No Think', stream=True, md_convert=True
|
320 |
+
2025-06-09 12:46:39,078 - streaming - INFO - [chatcmpl-61168434d655486299ab964cfa879aaa] Starting streaming response generation for model 'Qwen No Think'.
|
321 |
+
2025-06-09 12:46:39,079 - __main__ - INFO - [876f2d65-bd15-445f-bf51-12d77336ca0c] Starting chat. Model: 'Qwen No Think', RawHTML: False, MarkdownMode: True.
|
322 |
+
2025-06-09 12:46:39,079 - __main__ - INFO - Selecting model: Qwen No Think
|
323 |
+
2025-06-09 12:46:39,440 - __main__ - INFO - Selected model: Qwen No Think
|
324 |
+
2025-06-09 12:46:39,441 - __main__ - INFO - [876f2d65-bd15-445f-bf51-12d77336ca0c] Sending prompt (first 50 chars): 'python pascals tree gen long code no comments...'
|
325 |
+
2025-06-09 12:46:39,441 - __main__ - INFO - Typing prompt into textarea.
|
326 |
+
2025-06-09 12:46:39,832 - __main__ - INFO - Prompt submitted.
|
327 |
+
2025-06-09 12:46:39,833 - __main__ - INFO - Checking for 'Agree' button in dialog.
|
328 |
+
2025-06-09 12:46:39,925 - __main__ - INFO - 'Agree' button not visible, skipping.
|
329 |
+
2025-06-09 12:46:39,926 - __main__ - INFO - [876f2d65-bd15-445f-bf51-12d77336ca0c] Prompt sent. Streaming response...
|
330 |
+
2025-06-09 12:46:43,010 - __main__ - ERROR - Streaming error: StreamConfig.__init__() got an unexpected keyword argument 'stream_raw_html'
|
331 |
+
Traceback (most recent call last):
|
332 |
+
File "c:\Users\caree\Code\Lmarena\api.py", line 414, in _stream_response
|
333 |
+
stream_config = StreamConfig(
|
334 |
+
poll_interval=config.poll_interval,
|
335 |
+
...<4 lines>...
|
336 |
+
convert_html_to_markdown=convert_html_to_markdown
|
337 |
+
)
|
338 |
+
TypeError: StreamConfig.__init__() got an unexpected keyword argument 'stream_raw_html'
|
339 |
+
2025-06-09 12:46:43,012 - __main__ - INFO - [876f2d65-bd15-445f-bf51-12d77336ca0c] Finished streaming response from browser.
|
340 |
+
2025-06-09 12:46:43,012 - __main__ - INFO - [876f2d65-bd15-445f-bf51-12d77336ca0c] Cleaning up chat session by clicking 'New Chat'.
|
341 |
+
2025-06-09 12:46:43,012 - __main__ - INFO - [876f2d65-bd15-445f-bf51-12d77336ca0c] Attempting to click 'New Chat' button.
|
342 |
+
2025-06-09 12:46:43,117 - __main__ - INFO - [876f2d65-bd15-445f-bf51-12d77336ca0c] 'New Chat' button clicked successfully.
|
343 |
+
2025-06-09 13:13:16,829 - __main__ - INFO - Application shutdown sequence initiated.
|
344 |
+
2025-06-09 13:13:16,830 - __main__ - INFO - Cleaning up and quitting Selenium driver...
|
345 |
+
2025-06-09 13:13:19,250 - __main__ - INFO - Driver quit successfully.
|
346 |
+
2025-06-09 13:13:19,251 - __main__ - INFO - Application shutdown sequence completed.
|
347 |
+
2025-06-09 13:13:22,817 - __main__ - INFO - Configuration loaded.
|
348 |
+
2025-06-09 13:13:22,825 - __main__ - INFO - DriverManager instance created.
|
349 |
+
2025-06-09 13:13:23,338 - __main__ - INFO - Gemini client initialized successfully.
|
350 |
+
2025-06-09 13:13:23,372 - __main__ - INFO - GEMINI_API_KEY is set.
|
351 |
+
2025-06-09 13:13:23,372 - __main__ - INFO - Starting Uvicorn server on 0.0.0.0:8000.
|
352 |
+
2025-06-09 13:13:23,398 - __main__ - INFO - Application startup sequence initiated.
|
353 |
+
2025-06-09 13:13:23,399 - __main__ - INFO - Initializing Selenium driver...
|
354 |
+
2025-06-09 13:13:23,400 - __main__ - INFO - Executing synchronous driver initialization and enhanced readiness check.
|
355 |
+
2025-06-09 13:13:26,459 - __main__ - INFO - Driver instantiated. Opening URL...
|
356 |
+
2025-06-09 13:13:31,687 - __main__ - INFO - URL 'https://beta.lmarena.ai/?mode=direct' opened.
|
357 |
+
2025-06-09 13:13:31,687 - __main__ - INFO - Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...
|
358 |
+
2025-06-09 13:13:42,971 - __main__ - INFO - uc_gui_click_captcha() completed. Main site should be loading now.
|
359 |
+
2025-06-09 13:13:42,971 - __main__ - INFO - Checking for on-site ('Verify Human') captcha...
|
360 |
+
2025-06-09 13:13:53,072 - __main__ - INFO - No on-site captcha detected. Main UI is ready.
|
361 |
+
2025-06-09 13:13:53,073 - __main__ - INFO - Selenium driver initialization process completed successfully.
|
362 |
+
2025-06-09 13:13:53,073 - __main__ - INFO - Application startup sequence completed successfully.
|
363 |
+
2025-06-09 13:14:00,232 - __main__ - INFO - [chatcmpl-a016b7ca118c4d9b94482a2fc53638ca] Received chat completion request: model='Qwen No Think', stream=True, md_convert=True
|
364 |
+
2025-06-09 13:14:00,238 - streaming - INFO - [chatcmpl-a016b7ca118c4d9b94482a2fc53638ca] Starting streaming response generation for model 'Qwen No Think'.
|
365 |
+
2025-06-09 13:14:00,238 - __main__ - INFO - [0e1927b9-8699-4b08-9338-c2962ebdc6d9] Starting chat. Model: 'Qwen No Think', RawHTML: False, MarkdownMode: True.
|
366 |
+
2025-06-09 13:14:00,239 - __main__ - INFO - Selecting model: Qwen No Think
|
367 |
+
2025-06-09 13:14:00,505 - __main__ - INFO - Selected model: Qwen No Think
|
368 |
+
2025-06-09 13:14:00,506 - __main__ - INFO - [0e1927b9-8699-4b08-9338-c2962ebdc6d9] Sending prompt (first 50 chars): 'python pascals tree gen long code no comments...'
|
369 |
+
2025-06-09 13:14:00,506 - __main__ - INFO - Typing prompt into textarea.
|
370 |
+
2025-06-09 13:14:00,862 - __main__ - INFO - Prompt submitted.
|
371 |
+
2025-06-09 13:14:00,862 - __main__ - INFO - Checking for 'Agree' button in dialog.
|
372 |
+
2025-06-09 13:14:01,106 - __main__ - INFO - 'Agree' button not visible, skipping.
|
373 |
+
2025-06-09 13:14:01,106 - __main__ - INFO - [0e1927b9-8699-4b08-9338-c2962ebdc6d9] Prompt sent. Streaming response...
|
374 |
+
2025-06-09 13:17:05,718 - streaming - ERROR - [StreamProc/PollStream] Unexpected error polling: Message: no such window: target window already closed
|
375 |
+
from unknown error: web view not found
|
376 |
+
(Session info: chrome=137.0.7151.69)
|
377 |
+
Stacktrace:
|
378 |
+
GetHandleVerifier [0x0x7ff748e66f65+78965]
|
379 |
+
GetHandleVerifier [0x0x7ff748e66fc0+79056]
|
380 |
+
(No symbol) [0x0x7ff748bf9dda]
|
381 |
+
(No symbol) [0x0x7ff748bd20d1]
|
382 |
+
(No symbol) [0x0x7ff748c7ff4e]
|
383 |
+
(No symbol) [0x0x7ff748ca06cd]
|
384 |
+
(No symbol) [0x0x7ff748c78443]
|
385 |
+
(No symbol) [0x0x7ff748c41311]
|
386 |
+
(No symbol) [0x0x7ff748c420a3]
|
387 |
+
GetHandleVerifier [0x0x7ff74911e26d+2926461]
|
388 |
+
GetHandleVerifier [0x0x7ff749118993+2903715]
|
389 |
+
GetHandleVerifier [0x0x7ff749136aed+3026941]
|
390 |
+
GetHandleVerifier [0x0x7ff748e816fe+187406]
|
391 |
+
GetHandleVerifier [0x0x7ff748e896ef+220159]
|
392 |
+
GetHandleVerifier [0x0x7ff748e6faf4+114692]
|
393 |
+
GetHandleVerifier [0x0x7ff748e6fca9+115129]
|
394 |
+
GetHandleVerifier [0x0x7ff748e564d8+10728]
|
395 |
+
BaseThreadInitThunk [0x0x7ffd6f06e8d7+23]
|
396 |
+
RtlUserThreadStart [0x0x7ffd6fafc5dc+44]
|
397 |
+
Traceback (most recent call last):
|
398 |
+
File "c:\Users\caree\Code\Lmarena\streaming.py", line 190, in _poll_element_content_stream
|
399 |
+
# Convert all finalized elements to markdown
|
400 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\selenium\webdriver\support\wait.py", line 129, in until
|
401 |
+
value = method(self._driver)
|
402 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\selenium\webdriver\support\expected_conditions.py", line 104, in _predicate
|
403 |
+
return driver.find_element(*locator)
|
404 |
+
~~~~~~~~~~~~~~~~~~~^^^^^^^^^^
|
405 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\seleniumbase\core\sb_driver.py", line 27, in find_element
|
406 |
+
return self.driver.default_find_element(by=by, value=value)
|
407 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^
|
408 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\selenium\webdriver\remote\webdriver.py", line 914, in find_element
|
409 |
+
return self.execute(Command.FIND_ELEMENT, {"using": by, "value": value})["value"]
|
410 |
+
~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
411 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\selenium\webdriver\remote\webdriver.py", line 447, in execute
|
412 |
+
self.error_handler.check_response(response)
|
413 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^
|
414 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\selenium\webdriver\remote\errorhandler.py", line 232, in check_response
|
415 |
+
raise exception_class(message, screen, stacktrace)
|
416 |
+
selenium.common.exceptions.NoSuchWindowException: Message: no such window: target window already closed
|
417 |
+
from unknown error: web view not found
|
418 |
+
(Session info: chrome=137.0.7151.69)
|
419 |
+
Stacktrace:
|
420 |
+
GetHandleVerifier [0x0x7ff748e66f65+78965]
|
421 |
+
GetHandleVerifier [0x0x7ff748e66fc0+79056]
|
422 |
+
(No symbol) [0x0x7ff748bf9dda]
|
423 |
+
(No symbol) [0x0x7ff748bd20d1]
|
424 |
+
(No symbol) [0x0x7ff748c7ff4e]
|
425 |
+
(No symbol) [0x0x7ff748ca06cd]
|
426 |
+
(No symbol) [0x0x7ff748c78443]
|
427 |
+
(No symbol) [0x0x7ff748c41311]
|
428 |
+
(No symbol) [0x0x7ff748c420a3]
|
429 |
+
GetHandleVerifier [0x0x7ff74911e26d+2926461]
|
430 |
+
GetHandleVerifier [0x0x7ff749118993+2903715]
|
431 |
+
GetHandleVerifier [0x0x7ff749136aed+3026941]
|
432 |
+
GetHandleVerifier [0x0x7ff748e816fe+187406]
|
433 |
+
GetHandleVerifier [0x0x7ff748e896ef+220159]
|
434 |
+
GetHandleVerifier [0x0x7ff748e6faf4+114692]
|
435 |
+
GetHandleVerifier [0x0x7ff748e6fca9+115129]
|
436 |
+
GetHandleVerifier [0x0x7ff748e564d8+10728]
|
437 |
+
BaseThreadInitThunk [0x0x7ffd6f06e8d7+23]
|
438 |
+
RtlUserThreadStart [0x0x7ffd6fafc5dc+44]
|
439 |
+
|
440 |
+
2025-06-09 13:17:05,797 - __main__ - INFO - [0e1927b9-8699-4b08-9338-c2962ebdc6d9] Finished streaming response from browser.
|
441 |
+
2025-06-09 13:17:05,797 - __main__ - INFO - [0e1927b9-8699-4b08-9338-c2962ebdc6d9] Cleaning up chat session by clicking 'New Chat'.
|
442 |
+
2025-06-09 13:17:05,798 - __main__ - INFO - [0e1927b9-8699-4b08-9338-c2962ebdc6d9] Attempting to click 'New Chat' button.
|
443 |
+
2025-06-09 13:17:12,857 - __main__ - ERROR - [0e1927b9-8699-4b08-9338-c2962ebdc6d9] Error clicking 'New Chat' during cleanup: Message:
|
444 |
+
Element {//a[contains(@class, 'whitespace-nowrap') and .//h2[contains(text(), 'New Chat')]]} was not present after 7 seconds!
|
445 |
+
Traceback (most recent call last):
|
446 |
+
File "c:\Users\caree\Code\Lmarena\api.py", line 384, in send_message_and_stream_response
|
447 |
+
await ChatHandler._click_new_chat(driver, request_id)
|
448 |
+
File "c:\Users\caree\Code\Lmarena\api.py", line 442, in _click_new_chat
|
449 |
+
yield item
|
450 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\concurrent\futures\thread.py", line 59, in run
|
451 |
+
result = self.fn(*self.args, **self.kwargs)
|
452 |
+
File "c:\Users\caree\Code\Lmarena\api.py", line 442, in <lambda>
|
453 |
+
yield item
|
454 |
+
|
455 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\seleniumbase\core\sb_driver.py", line 82, in click
|
456 |
+
page_actions.click(self.driver, *args, **kwargs)
|
457 |
+
~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
458 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\seleniumbase\fixtures\page_actions.py", line 1595, in click
|
459 |
+
element = wait_for_element_clickable(
|
460 |
+
driver, selector, by=by, timeout=timeout
|
461 |
+
)
|
462 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\seleniumbase\fixtures\page_actions.py", line 910, in wait_for_element_clickable
|
463 |
+
timeout_exception(NoSuchElementException, message)
|
464 |
+
~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
465 |
+
File "C:\Users\caree\AppData\Local\Programs\Python\Python313\Lib\site-packages\seleniumbase\fixtures\page_actions.py", line 267, in timeout_exception
|
466 |
+
raise exc(msg)
|
467 |
+
seleniumbase.common.exceptions.NoSuchElementException: Message:
|
468 |
+
Element {//a[contains(@class, 'whitespace-nowrap') and .//h2[contains(text(), 'New Chat')]]} was not present after 7 seconds!
|
469 |
+
|
470 |
+
2025-06-09 13:17:16,355 - __main__ - INFO - Configuration loaded.
|
471 |
+
2025-06-09 13:17:16,363 - __main__ - INFO - DriverManager instance created.
|
472 |
+
2025-06-09 13:17:16,867 - __main__ - INFO - Gemini client initialized successfully.
|
473 |
+
2025-06-09 13:17:16,902 - __main__ - INFO - GEMINI_API_KEY is set.
|
474 |
+
2025-06-09 13:17:16,902 - __main__ - INFO - Starting Uvicorn server on 0.0.0.0:8000.
|
475 |
+
2025-06-09 13:17:16,929 - __main__ - INFO - Application startup sequence initiated.
|
476 |
+
2025-06-09 13:17:16,930 - __main__ - INFO - Initializing Selenium driver...
|
477 |
+
2025-06-09 13:17:16,931 - __main__ - INFO - Executing synchronous driver initialization and enhanced readiness check.
|
478 |
+
2025-06-09 13:17:19,968 - __main__ - INFO - Driver instantiated. Opening URL...
|
479 |
+
2025-06-09 13:17:25,172 - __main__ - INFO - URL 'https://beta.lmarena.ai/?mode=direct' opened.
|
480 |
+
2025-06-09 13:17:25,172 - __main__ - INFO - Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...
|
481 |
+
2025-06-09 13:17:36,533 - __main__ - INFO - uc_gui_click_captcha() completed. Main site should be loading now.
|
482 |
+
2025-06-09 13:17:36,534 - __main__ - INFO - Checking for on-site ('Verify Human') captcha...
|
483 |
+
2025-06-09 13:17:46,626 - __main__ - INFO - No on-site captcha detected. Main UI is ready.
|
484 |
+
2025-06-09 13:17:46,627 - __main__ - INFO - Selenium driver initialization process completed successfully.
|
485 |
+
2025-06-09 13:17:46,627 - __main__ - INFO - Application startup sequence completed successfully.
|
486 |
+
2025-06-09 13:17:47,719 - __main__ - INFO - [chatcmpl-dad296131df54a5d9fc92ff2820fc08f] Received chat completion request: model='Qwen No Think', stream=True, md_convert=True
|
487 |
+
2025-06-09 13:17:47,725 - streaming - INFO - [chatcmpl-dad296131df54a5d9fc92ff2820fc08f] Starting streaming response generation for model 'Qwen No Think'.
|
488 |
+
2025-06-09 13:17:47,725 - __main__ - INFO - [8382e611-556c-4cc7-a67a-2efc5d9f5ad3] Starting chat. Model: 'Qwen No Think', RawHTML: False, MarkdownMode: True.
|
489 |
+
2025-06-09 13:17:47,725 - __main__ - INFO - Selecting model: Qwen No Think
|
490 |
+
2025-06-09 13:17:48,003 - __main__ - INFO - Selected model: Qwen No Think
|
491 |
+
2025-06-09 13:17:48,004 - __main__ - INFO - [8382e611-556c-4cc7-a67a-2efc5d9f5ad3] Sending prompt (first 50 chars): 'python pascals tree gen long code no comments...'
|
492 |
+
2025-06-09 13:17:48,004 - __main__ - INFO - Typing prompt into textarea.
|
493 |
+
2025-06-09 13:17:48,357 - __main__ - INFO - Prompt submitted.
|
494 |
+
2025-06-09 13:17:48,357 - __main__ - INFO - Checking for 'Agree' button in dialog.
|
495 |
+
2025-06-09 13:17:48,587 - __main__ - INFO - 'Agree' button not visible, skipping.
|
496 |
+
2025-06-09 13:17:48,588 - __main__ - INFO - [8382e611-556c-4cc7-a67a-2efc5d9f5ad3] Prompt sent. Streaming response...
|
497 |
+
2025-06-09 13:18:11,478 - streaming - INFO - [StreamProc/PollStream] Content stable for 10.00s. Ending poll.
|
498 |
+
2025-06-09 13:18:11,489 - __main__ - INFO - [8382e611-556c-4cc7-a67a-2efc5d9f5ad3] Finished streaming response from browser.
|
499 |
+
2025-06-09 13:18:11,489 - __main__ - INFO - [8382e611-556c-4cc7-a67a-2efc5d9f5ad3] Cleaning up chat session by clicking 'New Chat'.
|
500 |
+
2025-06-09 13:18:11,490 - __main__ - INFO - [8382e611-556c-4cc7-a67a-2efc5d9f5ad3] Attempting to click 'New Chat' button.
|
501 |
+
2025-06-09 13:18:11,577 - __main__ - INFO - [8382e611-556c-4cc7-a67a-2efc5d9f5ad3] 'New Chat' button clicked successfully.
|
502 |
+
2025-06-09 13:18:11,578 - streaming - INFO - [chatcmpl-dad296131df54a5d9fc92ff2820fc08f] Yielding final chunk. Total content length: 1566 chars.
|
503 |
+
2025-06-09 13:18:11,578 - streaming - INFO - [chatcmpl-dad296131df54a5d9fc92ff2820fc08f] Yielding [DONE] signal.
|
504 |
+
2025-06-09 13:21:08,343 - __main__ - INFO - Configuration loaded.
|
505 |
+
2025-06-09 13:21:08,351 - __main__ - INFO - DriverManager instance created.
|
506 |
+
2025-06-09 13:21:08,842 - __main__ - INFO - Gemini client initialized successfully.
|
507 |
+
2025-06-09 13:21:08,875 - __main__ - INFO - GEMINI_API_KEY is set.
|
508 |
+
2025-06-09 13:21:08,876 - __main__ - INFO - Starting Uvicorn server on 0.0.0.0:8000.
|
509 |
+
2025-06-09 13:21:08,902 - __main__ - INFO - Application startup sequence initiated.
|
510 |
+
2025-06-09 13:21:08,902 - __main__ - INFO - Initializing Selenium driver...
|
511 |
+
2025-06-09 13:21:08,904 - __main__ - INFO - Executing synchronous driver initialization and enhanced readiness check.
|
512 |
+
2025-06-09 13:21:11,994 - __main__ - INFO - Driver instantiated. Opening URL...
|
513 |
+
2025-06-09 13:21:17,315 - __main__ - INFO - URL 'https://beta.lmarena.ai/?mode=direct' opened.
|
514 |
+
2025-06-09 13:21:17,316 - __main__ - INFO - Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...
|
515 |
+
2025-06-09 13:21:17,324 - __main__ - INFO - uc_gui_click_captcha() completed. Main site should be loading now.
|
516 |
+
2025-06-09 13:21:17,325 - __main__ - INFO - Checking for on-site ('Verify Human') captcha...
|
517 |
+
2025-06-09 13:21:22,605 - __main__ - INFO - Chat input textarea not interactable. Proceeding with AI captcha solver.
|
518 |
+
2025-06-09 13:21:22,605 - __main__ - INFO - Starting visual AI check for on-site captcha.
|
519 |
+
2025-06-09 13:21:22,740 - __main__ - INFO - Sending screenshot to Gemini API for analysis.
|
520 |
+
2025-06-09 13:21:22,740 - google_genai.models - INFO - AFC is enabled with max remote calls: 10.
|
521 |
+
2025-06-09 13:21:22,741 - google_genai.models - INFO - AFC remote call 1 is done.
|
522 |
+
2025-06-09 13:21:25,207 - httpx - INFO - HTTP Request: POST https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse "HTTP/1.1 200 OK"
|
523 |
+
2025-06-09 13:21:25,465 - __main__ - INFO - Received Gemini response for on-site captcha check: ```json
|
524 |
+
[
|
525 |
+
{"box_2d": [578, 164, 614, 186], "label": "box"}
|
526 |
+
]
|
527 |
+
```
|
528 |
+
2025-06-09 13:21:25,466 - __main__ - INFO - On-site captcha checkbox found via Gemini. Clicking coordinates: {'box_2d': [578, 164, 614, 186], 'label': 'box'}
|
529 |
+
2025-06-09 13:21:29,140 - __main__ - INFO - Click performed. Now reloading page as requested for post-AI solve.
|
530 |
+
2025-06-09 13:21:29,140 - __main__ - INFO - Performing human-like page reload
|
531 |
+
2025-06-09 13:21:29,141 - __main__ - INFO - Using FN+F5 key combination
|
532 |
+
2025-06-09 13:21:30,930 - __main__ - INFO - Page reloaded after 0.88s delay
|
533 |
+
2025-06-09 13:21:35,931 - __main__ - INFO - Selenium driver initialization process completed successfully.
|
534 |
+
2025-06-09 13:21:35,931 - __main__ - INFO - Application startup sequence completed successfully.
|
535 |
+
2025-06-09 13:21:49,966 - __main__ - INFO - [chatcmpl-d6223c77e0534551872ec565df4666fa] Received chat completion request: model='Qwen No Think', stream=True, md_convert=True
|
536 |
+
2025-06-09 13:21:49,972 - streaming - INFO - [chatcmpl-d6223c77e0534551872ec565df4666fa] Starting streaming response generation for model 'Qwen No Think'.
|
537 |
+
2025-06-09 13:21:49,972 - __main__ - INFO - [ffe50244-a76d-4a41-b938-5f943206d84f] Starting chat. Model: 'Qwen No Think', RawHTML: False, MarkdownMode: True.
|
538 |
+
2025-06-09 13:21:49,972 - __main__ - INFO - Selecting model: Qwen No Think
|
539 |
+
2025-06-09 13:21:50,279 - __main__ - INFO - Selected model: Qwen No Think
|
540 |
+
2025-06-09 13:21:50,279 - __main__ - INFO - [ffe50244-a76d-4a41-b938-5f943206d84f] Sending prompt (first 50 chars): 'python pascals tree gen long code no comments...'
|
541 |
+
2025-06-09 13:21:50,280 - __main__ - INFO - Typing prompt into textarea.
|
542 |
+
2025-06-09 13:21:50,640 - __main__ - INFO - Prompt submitted.
|
543 |
+
2025-06-09 13:21:50,641 - __main__ - INFO - Checking for 'Agree' button in dialog.
|
544 |
+
2025-06-09 13:21:50,772 - __main__ - INFO - 'Agree' button not visible, skipping.
|
545 |
+
2025-06-09 13:21:50,772 - __main__ - INFO - [ffe50244-a76d-4a41-b938-5f943206d84f] Prompt sent. Streaming response...
|
546 |
+
2025-06-09 13:22:11,608 - streaming - INFO - [StreamProc/PollStream] Content stable for 10.00s. Ending poll.
|
547 |
+
2025-06-09 13:22:11,616 - __main__ - INFO - [ffe50244-a76d-4a41-b938-5f943206d84f] Finished streaming response from browser.
|
548 |
+
2025-06-09 13:22:11,616 - __main__ - INFO - [ffe50244-a76d-4a41-b938-5f943206d84f] Cleaning up chat session by clicking 'New Chat'.
|
549 |
+
2025-06-09 13:22:11,617 - __main__ - INFO - [ffe50244-a76d-4a41-b938-5f943206d84f] Attempting to click 'New Chat' button.
|
550 |
+
2025-06-09 13:22:11,701 - __main__ - INFO - [ffe50244-a76d-4a41-b938-5f943206d84f] 'New Chat' button clicked successfully.
|
551 |
+
2025-06-09 13:22:11,701 - streaming - INFO - [chatcmpl-d6223c77e0534551872ec565df4666fa] Yielding final chunk. Total content length: 1249 chars.
|
552 |
+
2025-06-09 13:22:11,701 - streaming - INFO - [chatcmpl-d6223c77e0534551872ec565df4666fa] Yielding [DONE] signal.
|
553 |
+
2025-06-09 13:22:51,763 - __main__ - INFO - [chatcmpl-906b9b8c6d38464f8416cf58c56947bc] Received chat completion request: model='Claude Opus 4 Thinking', stream=True, md_convert=True
|
554 |
+
2025-06-09 13:22:51,764 - streaming - INFO - [chatcmpl-906b9b8c6d38464f8416cf58c56947bc] Starting streaming response generation for model 'Claude Opus 4 Thinking'.
|
555 |
+
2025-06-09 13:22:51,765 - __main__ - INFO - [b9ae0a31-1028-443b-a3dc-aa74a12fedef] Starting chat. Model: 'Claude Opus 4 Thinking', RawHTML: False, MarkdownMode: True.
|
556 |
+
2025-06-09 13:22:51,765 - __main__ - INFO - Selecting model: Claude Opus 4 Thinking
|
557 |
+
2025-06-09 13:22:52,143 - __main__ - INFO - Selected model: Claude Opus 4 Thinking
|
558 |
+
2025-06-09 13:22:52,143 - __main__ - INFO - [b9ae0a31-1028-443b-a3dc-aa74a12fedef] Sending prompt (first 50 chars): 'python pascals tree gen long code no comments...'
|
559 |
+
2025-06-09 13:22:52,144 - __main__ - INFO - Typing prompt into textarea.
|
560 |
+
2025-06-09 13:22:52,487 - __main__ - INFO - Prompt submitted.
|
561 |
+
2025-06-09 13:22:52,487 - __main__ - INFO - Checking for 'Agree' button in dialog.
|
562 |
+
2025-06-09 13:22:53,513 - __main__ - INFO - 'Agree' button not visible, skipping.
|
563 |
+
2025-06-09 13:22:53,513 - __main__ - INFO - [b9ae0a31-1028-443b-a3dc-aa74a12fedef] Prompt sent. Streaming response...
|
564 |
+
2025-06-09 13:24:08,306 - streaming - INFO - [StreamProc/PollStream] Content stable for 10.00s. Ending poll.
|
565 |
+
2025-06-09 13:24:08,404 - __main__ - INFO - [b9ae0a31-1028-443b-a3dc-aa74a12fedef] Finished streaming response from browser.
|
566 |
+
2025-06-09 13:24:08,404 - __main__ - INFO - [b9ae0a31-1028-443b-a3dc-aa74a12fedef] Cleaning up chat session by clicking 'New Chat'.
|
567 |
+
2025-06-09 13:24:08,404 - __main__ - INFO - [b9ae0a31-1028-443b-a3dc-aa74a12fedef] Attempting to click 'New Chat' button.
|
568 |
+
2025-06-09 13:24:09,131 - __main__ - INFO - [b9ae0a31-1028-443b-a3dc-aa74a12fedef] 'New Chat' button clicked successfully.
|
569 |
+
2025-06-09 13:24:09,132 - streaming - INFO - [chatcmpl-906b9b8c6d38464f8416cf58c56947bc] Yielding final chunk. Total content length: 12889 chars.
|
570 |
+
2025-06-09 13:24:09,132 - streaming - INFO - [chatcmpl-906b9b8c6d38464f8416cf58c56947bc] Yielding [DONE] signal.
|
571 |
+
2025-06-09 13:24:37,914 - __main__ - INFO - [chatcmpl-8397b786dfc94d6fa186bf302fdb5091] Received chat completion request: model='Claude Opus 4 Thinking', stream=True, md_convert=True
|
572 |
+
2025-06-09 13:24:37,915 - streaming - INFO - [chatcmpl-8397b786dfc94d6fa186bf302fdb5091] Starting streaming response generation for model 'Claude Opus 4 Thinking'.
|
573 |
+
2025-06-09 13:24:37,916 - __main__ - INFO - [ddd1b72b-d54f-46d0-a3c9-32760b378dce] Starting chat. Model: 'Claude Opus 4 Thinking', RawHTML: False, MarkdownMode: True.
|
574 |
+
2025-06-09 13:24:37,916 - __main__ - INFO - Selecting model: Claude Opus 4 Thinking
|
575 |
+
2025-06-09 13:24:38,290 - __main__ - INFO - Selected model: Claude Opus 4 Thinking
|
576 |
+
2025-06-09 13:24:38,291 - __main__ - INFO - [ddd1b72b-d54f-46d0-a3c9-32760b378dce] Sending prompt (first 50 chars): 'python pascals tree gen long code explain everythi...'
|
577 |
+
2025-06-09 13:24:38,291 - __main__ - INFO - Typing prompt into textarea.
|
578 |
+
2025-06-09 13:24:38,870 - __main__ - INFO - Prompt submitted.
|
579 |
+
2025-06-09 13:24:38,871 - __main__ - INFO - Checking for 'Agree' button in dialog.
|
580 |
+
2025-06-09 13:24:39,905 - __main__ - INFO - 'Agree' button not visible, skipping.
|
581 |
+
2025-06-09 13:24:39,906 - __main__ - INFO - [ddd1b72b-d54f-46d0-a3c9-32760b378dce] Prompt sent. Streaming response...
|
582 |
+
2025-06-09 13:27:21,691 - streaming - INFO - [StreamProc/PollStream] Content stable for 10.00s. Ending poll.
|
583 |
+
2025-06-09 13:27:22,031 - __main__ - INFO - [ddd1b72b-d54f-46d0-a3c9-32760b378dce] Finished streaming response from browser.
|
584 |
+
2025-06-09 13:27:22,031 - __main__ - INFO - [ddd1b72b-d54f-46d0-a3c9-32760b378dce] Cleaning up chat session by clicking 'New Chat'.
|
585 |
+
2025-06-09 13:27:22,031 - __main__ - INFO - [ddd1b72b-d54f-46d0-a3c9-32760b378dce] Attempting to click 'New Chat' button.
|
586 |
+
2025-06-09 13:27:22,120 - __main__ - INFO - [ddd1b72b-d54f-46d0-a3c9-32760b378dce] 'New Chat' button clicked successfully.
|
587 |
+
2025-06-09 13:27:22,120 - streaming - INFO - [chatcmpl-8397b786dfc94d6fa186bf302fdb5091] Yielding final chunk. Total content length: 26526 chars.
|
588 |
+
2025-06-09 13:27:22,121 - streaming - INFO - [chatcmpl-8397b786dfc94d6fa186bf302fdb5091] Yielding [DONE] signal.
|
589 |
+
2025-06-09 13:29:20,701 - __main__ - INFO - Application shutdown sequence initiated.
|
590 |
+
2025-06-09 13:29:20,702 - __main__ - INFO - Cleaning up and quitting Selenium driver...
|
591 |
+
2025-06-09 13:29:23,130 - __main__ - INFO - Driver quit successfully.
|
592 |
+
2025-06-09 13:29:23,130 - __main__ - INFO - Application shutdown sequence completed.
|
593 |
+
2025-06-09 13:29:42,779 - __main__ - INFO - Configuration loaded.
|
594 |
+
2025-06-09 13:29:42,787 - __main__ - INFO - DriverManager instance created.
|
595 |
+
2025-06-09 13:29:43,278 - __main__ - INFO - Gemini client initialized successfully.
|
596 |
+
2025-06-09 13:29:43,312 - __main__ - INFO - GEMINI_API_KEY is set.
|
597 |
+
2025-06-09 13:29:43,312 - __main__ - INFO - Starting Uvicorn server on 0.0.0.0:8000.
|
598 |
+
2025-06-09 13:29:43,338 - __main__ - INFO - Application startup sequence initiated.
|
599 |
+
2025-06-09 13:29:43,339 - __main__ - INFO - Initializing Selenium driver...
|
600 |
+
2025-06-09 13:29:43,340 - __main__ - INFO - Executing synchronous driver initialization and enhanced readiness check.
|
601 |
+
2025-06-09 13:29:46,450 - __main__ - INFO - Driver instantiated. Opening URL...
|
602 |
+
2025-06-09 13:29:51,744 - __main__ - INFO - URL 'https://beta.lmarena.ai/?mode=direct' opened.
|
603 |
+
2025-06-09 13:29:51,745 - __main__ - INFO - Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...
|
604 |
+
2025-06-09 13:30:03,036 - __main__ - INFO - uc_gui_click_captcha() completed. Main site should be loading now.
|
605 |
+
2025-06-09 13:30:03,036 - __main__ - INFO - Checking for on-site ('Verify Human') captcha...
|
606 |
+
2025-06-09 13:30:13,135 - __main__ - INFO - No on-site captcha detected. Main UI is ready.
|
607 |
+
2025-06-09 13:30:13,136 - __main__ - INFO - Selenium driver initialization process completed successfully.
|
608 |
+
2025-06-09 13:30:13,136 - __main__ - INFO - Application startup sequence completed successfully.
|
609 |
+
2025-06-09 13:30:42,946 - __main__ - INFO - [chatcmpl-0977bf5aff9b43f3889a3eee2ff0c0c8] Received chat completion request: model='Qwen no Think', stream=True, md_convert=True
|
610 |
+
2025-06-09 13:30:42,953 - streaming - INFO - [chatcmpl-0977bf5aff9b43f3889a3eee2ff0c0c8] Starting streaming response for model 'Qwen no Think'
|
611 |
+
2025-06-09 13:30:42,953 - __main__ - INFO - [8dca5736-454b-4a53-afc7-9a1356f5609a] Starting chat. Model: 'Qwen no Think', RawHTML: False, MarkdownMode: True.
|
612 |
+
2025-06-09 13:30:42,953 - __main__ - INFO - Selecting model: Qwen no Think
|
613 |
+
2025-06-09 13:30:43,303 - __main__ - INFO - Selected model: Qwen no Think
|
614 |
+
2025-06-09 13:30:43,304 - __main__ - INFO - [8dca5736-454b-4a53-afc7-9a1356f5609a] Sending prompt (first 50 chars): 'python pascals tree gen long code explain everythi...'
|
615 |
+
2025-06-09 13:30:43,304 - __main__ - INFO - Typing prompt into textarea.
|
616 |
+
2025-06-09 13:30:43,961 - __main__ - INFO - Prompt submitted.
|
617 |
+
2025-06-09 13:30:43,962 - __main__ - INFO - Checking for 'Agree' button in dialog.
|
618 |
+
2025-06-09 13:30:44,084 - __main__ - INFO - 'Agree' button not visible, skipping.
|
619 |
+
2025-06-09 13:30:44,084 - __main__ - INFO - [8dca5736-454b-4a53-afc7-9a1356f5609a] Prompt sent. Streaming response...
|
620 |
+
2025-06-09 13:30:47,172 - streaming - INFO - [StreamProc/Poll] Starting content polling
|
621 |
+
2025-06-09 13:31:42,729 - streaming - INFO - [StreamProc/Poll] Content stable for 11.1s, ending stream
|
622 |
+
2025-06-09 13:31:42,763 - streaming - INFO - Stream metrics: 1253 polls, 539 changes, 29,442,690 bytes in 55.59s
|
623 |
+
2025-06-09 13:31:42,763 - __main__ - INFO - [8dca5736-454b-4a53-afc7-9a1356f5609a] Finished streaming response from browser.
|
624 |
+
2025-06-09 13:31:42,763 - __main__ - INFO - [8dca5736-454b-4a53-afc7-9a1356f5609a] Cleaning up chat session by clicking 'New Chat'.
|
625 |
+
2025-06-09 13:31:42,764 - __main__ - INFO - [8dca5736-454b-4a53-afc7-9a1356f5609a] Attempting to click 'New Chat' button.
|
626 |
+
2025-06-09 13:31:42,872 - __main__ - INFO - [8dca5736-454b-4a53-afc7-9a1356f5609a] 'New Chat' button clicked successfully.
|
627 |
+
2025-06-09 13:31:42,873 - streaming - INFO - [chatcmpl-0977bf5aff9b43f3889a3eee2ff0c0c8] Streaming complete. Total content: 360 chars
|
628 |
+
2025-06-09 13:33:24,447 - __main__ - INFO - Application shutdown sequence initiated.
|
629 |
+
2025-06-09 13:33:24,447 - __main__ - INFO - Cleaning up and quitting Selenium driver...
|
630 |
+
2025-06-09 13:33:26,769 - __main__ - INFO - Driver quit successfully.
|
631 |
+
2025-06-09 13:33:26,769 - __main__ - INFO - Application shutdown sequence completed.
|
632 |
+
2025-06-09 13:34:01,581 - __main__ - INFO - Configuration loaded.
|
633 |
+
2025-06-09 13:34:01,589 - __main__ - INFO - DriverManager instance created.
|
634 |
+
2025-06-09 13:34:02,081 - __main__ - INFO - Gemini client initialized successfully.
|
635 |
+
2025-06-09 13:34:02,114 - __main__ - INFO - GEMINI_API_KEY is set.
|
636 |
+
2025-06-09 13:34:02,115 - __main__ - INFO - Starting Uvicorn server on 0.0.0.0:8000.
|
637 |
+
2025-06-09 13:34:02,141 - __main__ - INFO - Application startup sequence initiated.
|
638 |
+
2025-06-09 13:34:02,141 - __main__ - INFO - Initializing Selenium driver...
|
639 |
+
2025-06-09 13:34:02,143 - __main__ - INFO - Executing synchronous driver initialization and enhanced readiness check.
|
640 |
+
2025-06-09 13:34:05,208 - __main__ - INFO - Driver instantiated. Opening URL...
|
641 |
+
2025-06-09 13:34:10,548 - __main__ - INFO - URL 'https://beta.lmarena.ai/?mode=direct' opened.
|
642 |
+
2025-06-09 13:34:10,549 - __main__ - INFO - Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...
|
643 |
+
2025-06-09 13:34:10,922 - __main__ - INFO - uc_gui_click_captcha() completed. Main site should be loading now.
|
644 |
+
2025-06-09 13:34:10,922 - __main__ - INFO - Checking for on-site ('Verify Human') captcha...
|
645 |
+
2025-06-09 13:34:21,031 - __main__ - INFO - Textarea not ready or an on-site captcha indicator was found. Proceeding with AI solver.
|
646 |
+
2025-06-09 13:34:21,031 - __main__ - INFO - Starting visual AI check for on-site captcha.
|
647 |
+
2025-06-09 13:34:21,207 - __main__ - INFO - Sending screenshot to Gemini API for analysis.
|
648 |
+
2025-06-09 13:34:21,208 - google_genai.models - INFO - AFC is enabled with max remote calls: 10.
|
649 |
+
2025-06-09 13:34:21,208 - google_genai.models - INFO - AFC remote call 1 is done.
|
650 |
+
2025-06-09 13:34:23,882 - httpx - INFO - HTTP Request: POST https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse "HTTP/1.1 200 OK"
|
651 |
+
2025-06-09 13:34:24,045 - __main__ - INFO - Received Gemini response for on-site captcha check: ```json
|
652 |
+
[
|
653 |
+
{"box_2d": [720, 412, 757, 431], "label": "box"}
|
654 |
+
]
|
655 |
+
```
|
656 |
+
2025-06-09 13:34:24,045 - __main__ - INFO - On-site captcha checkbox found via Gemini. Clicking coordinates: {'box_2d': [720, 412, 757, 431], 'label': 'box'}
|
657 |
+
2025-06-09 13:34:27,822 - __main__ - INFO - Click performed. Now reloading page as requested for post-AI solve.
|
658 |
+
2025-06-09 13:34:27,823 - __main__ - INFO - Performing human-like page reload
|
659 |
+
2025-06-09 13:34:27,823 - __main__ - INFO - Using F5 key
|
660 |
+
2025-06-09 13:34:28,965 - __main__ - INFO - Page reloaded after 1.04s delay
|
661 |
+
2025-06-09 13:34:33,967 - __main__ - INFO - Selenium driver initialization process completed successfully.
|
662 |
+
2025-06-09 13:34:33,968 - __main__ - INFO - Application startup sequence completed successfully.
|
663 |
+
2025-06-09 13:34:42,463 - __main__ - INFO - [chatcmpl-7a355706384e4df3a77f1c6dc66ca4be] Received chat completion request: model='Qwen no Think', stream=True, md_convert=True
|
664 |
+
2025-06-09 13:34:42,470 - streaming - INFO - [chatcmpl-7a355706384e4df3a77f1c6dc66ca4be] Starting streaming response for model 'Qwen no Think'
|
665 |
+
2025-06-09 13:34:42,470 - __main__ - INFO - [fa42552d-73dc-4eee-bc0f-55a4ce19a39a] Starting chat. Model: 'Qwen no Think', RawHTML: False, MarkdownMode: True.
|
666 |
+
2025-06-09 13:34:42,471 - __main__ - INFO - Selecting model: Qwen no Think
|
667 |
+
2025-06-09 13:34:42,761 - __main__ - INFO - Selected model: Qwen no Think
|
668 |
+
2025-06-09 13:34:42,761 - __main__ - INFO - [fa42552d-73dc-4eee-bc0f-55a4ce19a39a] Sending prompt (first 50 chars): 'bionomial thoerem calc in python concise code but ...'
|
669 |
+
2025-06-09 13:34:42,762 - __main__ - INFO - Typing prompt into textarea.
|
670 |
+
2025-06-09 13:34:43,244 - __main__ - INFO - Prompt submitted.
|
671 |
+
2025-06-09 13:34:43,244 - __main__ - INFO - Checking for 'Agree' button in dialog.
|
672 |
+
2025-06-09 13:34:43,345 - __main__ - INFO - 'Agree' button not visible, skipping.
|
673 |
+
2025-06-09 13:34:43,345 - __main__ - INFO - [fa42552d-73dc-4eee-bc0f-55a4ce19a39a] Prompt sent. Streaming response...
|
674 |
+
2025-06-09 13:34:45,905 - __main__ - ERROR - Streaming error: StreamConfig.__init__() got an unexpected keyword argument 'stabilization_timeout'
|
675 |
+
Traceback (most recent call last):
|
676 |
+
File "c:\Users\caree\Code\Lmarena\api.py", line 414, in _stream_response
|
677 |
+
stream_config = StreamConfig(
|
678 |
+
poll_interval=config.poll_interval,
|
679 |
+
...<4 lines>...
|
680 |
+
convert_html_to_markdown=convert_html_to_markdown
|
681 |
+
)
|
682 |
+
TypeError: StreamConfig.__init__() got an unexpected keyword argument 'stabilization_timeout'
|
683 |
+
2025-06-09 13:34:45,907 - __main__ - INFO - [fa42552d-73dc-4eee-bc0f-55a4ce19a39a] Finished streaming response from browser.
|
684 |
+
2025-06-09 13:34:45,907 - __main__ - INFO - [fa42552d-73dc-4eee-bc0f-55a4ce19a39a] Cleaning up chat session by clicking 'New Chat'.
|
685 |
+
2025-06-09 13:34:45,907 - __main__ - INFO - [fa42552d-73dc-4eee-bc0f-55a4ce19a39a] Attempting to click 'New Chat' button.
|
686 |
+
2025-06-09 13:34:46,048 - __main__ - INFO - [fa42552d-73dc-4eee-bc0f-55a4ce19a39a] 'New Chat' button clicked successfully.
|
687 |
+
2025-06-09 13:34:46,049 - streaming - INFO - [chatcmpl-7a355706384e4df3a77f1c6dc66ca4be] Streaming complete. Sent 1 chunks, total: 91 chars
|
688 |
+
2025-06-09 13:36:40,674 - __main__ - INFO - Configuration loaded.
|
689 |
+
2025-06-09 13:36:40,682 - __main__ - INFO - DriverManager instance created.
|
690 |
+
2025-06-09 13:36:41,181 - __main__ - INFO - Gemini client initialized successfully.
|
691 |
+
2025-06-09 13:36:41,214 - __main__ - INFO - GEMINI_API_KEY is set.
|
692 |
+
2025-06-09 13:36:41,214 - __main__ - INFO - Starting Uvicorn server on 0.0.0.0:8000.
|
693 |
+
2025-06-09 13:36:41,240 - __main__ - INFO - Application startup sequence initiated.
|
694 |
+
2025-06-09 13:36:41,241 - __main__ - INFO - Initializing Selenium driver...
|
695 |
+
2025-06-09 13:36:41,243 - __main__ - INFO - Executing synchronous driver initialization and enhanced readiness check.
|
696 |
+
2025-06-09 13:36:44,311 - __main__ - INFO - Driver instantiated. Opening URL...
|
697 |
+
2025-06-09 13:36:50,037 - __main__ - INFO - URL 'https://beta.lmarena.ai/?mode=direct' opened.
|
698 |
+
2025-06-09 13:36:50,037 - __main__ - INFO - Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...
|
699 |
+
2025-06-09 13:36:50,534 - __main__ - INFO - uc_gui_click_captcha() completed. Main site should be loading now.
|
700 |
+
2025-06-09 13:36:50,534 - __main__ - INFO - Checking for on-site ('Verify Human') captcha...
|
701 |
+
2025-06-09 13:37:00,639 - __main__ - INFO - Textarea not ready or an on-site captcha indicator was found. Proceeding with AI solver.
|
702 |
+
2025-06-09 13:37:00,639 - __main__ - INFO - Starting visual AI check for on-site captcha.
|
703 |
+
2025-06-09 13:37:00,795 - __main__ - INFO - Sending screenshot to Gemini API for analysis.
|
704 |
+
2025-06-09 13:37:00,796 - google_genai.models - INFO - AFC is enabled with max remote calls: 10.
|
705 |
+
2025-06-09 13:37:00,796 - google_genai.models - INFO - AFC remote call 1 is done.
|
706 |
+
2025-06-09 13:37:03,575 - httpx - INFO - HTTP Request: POST https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse "HTTP/1.1 200 OK"
|
707 |
+
2025-06-09 13:37:03,688 - __main__ - INFO - Received Gemini response for on-site captcha check: ```json
|
708 |
+
[
|
709 |
+
{"box_2d": [722, 412, 754, 431], "label": "box"}
|
710 |
+
]
|
711 |
+
```
|
712 |
+
2025-06-09 13:37:03,689 - __main__ - INFO - On-site captcha checkbox found via Gemini. Clicking coordinates: {'box_2d': [722, 412, 754, 431], 'label': 'box'}
|
713 |
+
2025-06-09 13:37:07,369 - __main__ - INFO - Click performed. Now reloading page as requested for post-AI solve.
|
714 |
+
2025-06-09 13:37:07,370 - __main__ - INFO - Performing human-like page reload
|
715 |
+
2025-06-09 13:37:07,370 - __main__ - INFO - Using FN+F5 key combination
|
716 |
+
2025-06-09 13:37:08,832 - __main__ - INFO - Page reloaded after 0.56s delay
|
717 |
+
2025-06-09 13:37:13,834 - __main__ - INFO - Selenium driver initialization process completed successfully.
|
718 |
+
2025-06-09 13:37:13,834 - __main__ - INFO - Application startup sequence completed successfully.
|
719 |
+
2025-06-09 13:37:17,850 - __main__ - INFO - [chatcmpl-5b3f2aabac7849ddb66938c955880bf8] Received chat completion request: model='Gemini 2.0', stream=True, md_convert=True
|
720 |
+
2025-06-09 13:37:17,857 - streaming - INFO - [chatcmpl-5b3f2aabac7849ddb66938c955880bf8] Starting streaming response for model 'Gemini 2.0'
|
721 |
+
2025-06-09 13:37:17,857 - __main__ - INFO - [7060fd0f-7d80-4957-a475-8cd2ab15676c] Starting chat. Model: 'Gemini 2.0', RawHTML: False, MarkdownMode: True.
|
722 |
+
2025-06-09 13:37:17,857 - __main__ - INFO - Selecting model: Gemini 2.0
|
723 |
+
2025-06-09 13:37:18,193 - __main__ - INFO - Selected model: Gemini 2.0
|
724 |
+
2025-06-09 13:37:18,194 - __main__ - INFO - [7060fd0f-7d80-4957-a475-8cd2ab15676c] Sending prompt (first 50 chars): 'bionomial thoerem calc in python concise code but ...'
|
725 |
+
2025-06-09 13:37:18,194 - __main__ - INFO - Typing prompt into textarea.
|
726 |
+
2025-06-09 13:37:18,784 - __main__ - INFO - Prompt submitted.
|
727 |
+
2025-06-09 13:37:18,785 - __main__ - INFO - Checking for 'Agree' button in dialog.
|
728 |
+
2025-06-09 13:37:18,884 - __main__ - INFO - 'Agree' button not visible, skipping.
|
729 |
+
2025-06-09 13:37:18,884 - __main__ - INFO - [7060fd0f-7d80-4957-a475-8cd2ab15676c] Prompt sent. Streaming response...
|
730 |
+
2025-06-09 13:37:29,096 - streaming - INFO - [StreamProc/Poll] Starting content polling
|
731 |
+
2025-06-09 13:37:43,597 - streaming - INFO - [StreamProc/Poll] Content stable for 10.0s, ending stream
|
732 |
+
2025-06-09 13:37:43,598 - streaming - INFO - [StreamProc/Poll] Polling ended after 14.5s
|
733 |
+
2025-06-09 13:37:43,598 - __main__ - INFO - [7060fd0f-7d80-4957-a475-8cd2ab15676c] Finished streaming response from browser.
|
734 |
+
2025-06-09 13:37:43,599 - __main__ - INFO - [7060fd0f-7d80-4957-a475-8cd2ab15676c] Cleaning up chat session by clicking 'New Chat'.
|
735 |
+
2025-06-09 13:37:43,599 - __main__ - INFO - [7060fd0f-7d80-4957-a475-8cd2ab15676c] Attempting to click 'New Chat' button.
|
736 |
+
2025-06-09 13:37:43,686 - __main__ - INFO - [7060fd0f-7d80-4957-a475-8cd2ab15676c] 'New Chat' button clicked successfully.
|
737 |
+
2025-06-09 13:37:43,687 - streaming - INFO - [chatcmpl-5b3f2aabac7849ddb66938c955880bf8] Streaming complete. Sent 26 chunks, total: 30702 chars
|
738 |
+
2025-06-09 13:37:57,826 - __main__ - INFO - Application shutdown sequence initiated.
|
739 |
+
2025-06-09 13:37:57,827 - __main__ - INFO - Cleaning up and quitting Selenium driver...
|
740 |
+
2025-06-09 13:38:00,251 - __main__ - INFO - Driver quit successfully.
|
741 |
+
2025-06-09 13:38:00,252 - __main__ - INFO - Application shutdown sequence completed.
|
742 |
+
2025-06-09 13:38:05,462 - __main__ - INFO - Configuration loaded.
|
743 |
+
2025-06-09 13:38:05,476 - __main__ - INFO - DriverManager instance created.
|
744 |
+
2025-06-09 13:38:05,982 - __main__ - INFO - Gemini client initialized successfully.
|
745 |
+
2025-06-09 13:38:06,014 - __main__ - INFO - GEMINI_API_KEY is set.
|
746 |
+
2025-06-09 13:38:06,015 - __main__ - INFO - Starting Uvicorn server on 0.0.0.0:8000.
|
747 |
+
2025-06-09 13:38:06,040 - __main__ - INFO - Application startup sequence initiated.
|
748 |
+
2025-06-09 13:38:06,041 - __main__ - INFO - Initializing Selenium driver...
|
749 |
+
2025-06-09 13:38:06,042 - __main__ - INFO - Executing synchronous driver initialization and enhanced readiness check.
|
750 |
+
2025-06-09 13:38:09,109 - __main__ - INFO - Driver instantiated. Opening URL...
|
751 |
+
2025-06-09 13:38:16,487 - __main__ - INFO - URL 'https://beta.lmarena.ai/?mode=direct' opened.
|
752 |
+
2025-06-09 13:38:16,488 - __main__ - INFO - Attempting to solve initial (Cloudflare-style) captcha with uc_gui_click_captcha()...
|
753 |
+
2025-06-09 13:38:17,034 - __main__ - INFO - uc_gui_click_captcha() completed. Main site should be loading now.
|
754 |
+
2025-06-09 13:38:17,034 - __main__ - INFO - Checking for on-site ('Verify Human') captcha...
|
755 |
+
2025-06-09 13:38:27,140 - __main__ - INFO - Textarea not ready or an on-site captcha indicator was found. Proceeding with AI solver.
|
756 |
+
2025-06-09 13:38:27,141 - __main__ - INFO - Starting visual AI check for on-site captcha.
|
757 |
+
2025-06-09 13:38:27,292 - __main__ - INFO - Sending screenshot to Gemini API for analysis.
|
758 |
+
2025-06-09 13:38:27,293 - google_genai.models - INFO - AFC is enabled with max remote calls: 10.
|
759 |
+
2025-06-09 13:38:27,294 - google_genai.models - INFO - AFC remote call 1 is done.
|
760 |
+
2025-06-09 13:38:30,360 - httpx - INFO - HTTP Request: POST https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse "HTTP/1.1 200 OK"
|
761 |
+
2025-06-09 13:38:30,419 - __main__ - INFO - Received Gemini response for on-site captcha check: ```json
|
762 |
+
[
|
763 |
+
{"box_2d": [715, 412, 758, 432], "label": "box"}
|
764 |
+
]
|
765 |
+
```
|
766 |
+
2025-06-09 13:38:30,419 - __main__ - INFO - On-site captcha checkbox found via Gemini. Clicking coordinates: {'box_2d': [715, 412, 758, 432], 'label': 'box'}
|
767 |
+
2025-06-09 13:38:34,201 - __main__ - INFO - Click performed. Now reloading page as requested for post-AI solve.
|
768 |
+
2025-06-09 13:38:34,202 - __main__ - INFO - Performing human-like page reload
|
769 |
+
2025-06-09 13:38:34,202 - __main__ - INFO - Using FN+F5 key combination
|
770 |
+
2025-06-09 13:38:36,047 - __main__ - INFO - Page reloaded after 0.94s delay
|
771 |
+
2025-06-09 13:38:41,049 - __main__ - INFO - Selenium driver initialization process completed successfully.
|
772 |
+
2025-06-09 13:38:41,050 - __main__ - INFO - Application startup sequence completed successfully.
|
773 |
+
2025-06-09 13:38:44,622 - __main__ - INFO - [chatcmpl-28b698e36b09432383823c19da3c0335] Received chat completion request: model='Gemini 2.0 Flash', stream=True, md_convert=True
|
774 |
+
2025-06-09 13:38:44,629 - streaming - INFO - [chatcmpl-28b698e36b09432383823c19da3c0335] Starting streaming response generation for model 'Gemini 2.0 Flash'.
|
775 |
+
2025-06-09 13:38:44,629 - __main__ - INFO - [0c17e458-0e06-47bf-ba50-a1bc3da3d518] Starting chat. Model: 'Gemini 2.0 Flash', RawHTML: False, MarkdownMode: True.
|
776 |
+
2025-06-09 13:38:44,630 - __main__ - INFO - Selecting model: Gemini 2.0 Flash
|
777 |
+
2025-06-09 13:38:44,985 - __main__ - INFO - Selected model: Gemini 2.0 Flash
|
778 |
+
2025-06-09 13:38:44,986 - __main__ - INFO - [0c17e458-0e06-47bf-ba50-a1bc3da3d518] Sending prompt (first 50 chars): 'bionomial thoerem calc in python concise code but ...'
|
779 |
+
2025-06-09 13:38:44,986 - __main__ - INFO - Typing prompt into textarea.
|
780 |
+
2025-06-09 13:38:45,522 - __main__ - INFO - Prompt submitted.
|
781 |
+
2025-06-09 13:38:45,523 - __main__ - INFO - Checking for 'Agree' button in dialog.
|
782 |
+
2025-06-09 13:38:45,612 - __main__ - INFO - 'Agree' button not visible, skipping.
|
783 |
+
2025-06-09 13:38:45,612 - __main__ - INFO - [0c17e458-0e06-47bf-ba50-a1bc3da3d518] Prompt sent. Streaming response...
|
784 |
+
2025-06-09 13:39:06,675 - streaming - INFO - [StreamProc/PollStream] Content stable for 10.00s. Ending poll.
|
785 |
+
2025-06-09 13:39:06,688 - __main__ - INFO - [0c17e458-0e06-47bf-ba50-a1bc3da3d518] Finished streaming response from browser.
|
786 |
+
2025-06-09 13:39:06,689 - __main__ - INFO - [0c17e458-0e06-47bf-ba50-a1bc3da3d518] Cleaning up chat session by clicking 'New Chat'.
|
787 |
+
2025-06-09 13:39:06,689 - __main__ - INFO - [0c17e458-0e06-47bf-ba50-a1bc3da3d518] Attempting to click 'New Chat' button.
|
788 |
+
2025-06-09 13:39:06,765 - __main__ - INFO - [0c17e458-0e06-47bf-ba50-a1bc3da3d518] 'New Chat' button clicked successfully.
|
789 |
+
2025-06-09 13:39:06,766 - streaming - INFO - [chatcmpl-28b698e36b09432383823c19da3c0335] Yielding final chunk. Total content length: 3937 chars.
|
790 |
+
2025-06-09 13:39:06,766 - streaming - INFO - [chatcmpl-28b698e36b09432383823c19da3c0335] Yielding [DONE] signal.
|
requirements.txt
CHANGED
@@ -10,4 +10,5 @@ starlette
|
|
10 |
openai
|
11 |
pyvirtualdisplay
|
12 |
mss
|
|
|
13 |
PyAutoGUI
|
|
|
10 |
openai
|
11 |
pyvirtualdisplay
|
12 |
mss
|
13 |
+
beautifulsoup4
|
14 |
PyAutoGUI
|
sdfhsdf.py
ADDED
@@ -0,0 +1,676 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Pascal's Tree Generator - A Comprehensive Implementation
|
3 |
+
======================================================
|
4 |
+
|
5 |
+
This module implements multiple interpretations of Pascal's tree:
|
6 |
+
1. Binary tree where each node contains Pascal's triangle values
|
7 |
+
2. Tree representation showing the recursive nature of Pascal's triangle
|
8 |
+
3. Path-based tree showing how Pascal values are constructed
|
9 |
+
"""
|
10 |
+
|
11 |
+
import math
|
12 |
+
from typing import List, Optional, Tuple, Dict, Any
|
13 |
+
from collections import deque
|
14 |
+
import matplotlib.pyplot as plt
|
15 |
+
import networkx as nx
|
16 |
+
from dataclasses import dataclass
|
17 |
+
import numpy as np
|
18 |
+
|
19 |
+
|
20 |
+
@dataclass
|
21 |
+
class TreeNode:
|
22 |
+
"""
|
23 |
+
Represents a node in Pascal's tree.
|
24 |
+
|
25 |
+
Attributes:
|
26 |
+
value: The numeric value at this node (from Pascal's triangle)
|
27 |
+
row: The row in Pascal's triangle (0-indexed)
|
28 |
+
col: The column in Pascal's triangle (0-indexed)
|
29 |
+
left: Left child node
|
30 |
+
right: Right child node
|
31 |
+
parent: Parent node reference
|
32 |
+
coefficient: The binomial coefficient C(row, col)
|
33 |
+
"""
|
34 |
+
value: int
|
35 |
+
row: int
|
36 |
+
col: int
|
37 |
+
left: Optional['TreeNode'] = None
|
38 |
+
right: Optional['TreeNode'] = None
|
39 |
+
parent: Optional['TreeNode'] = None
|
40 |
+
coefficient: Optional[int] = None
|
41 |
+
|
42 |
+
def __post_init__(self):
|
43 |
+
# Calculate binomial coefficient if not provided
|
44 |
+
if self.coefficient is None:
|
45 |
+
self.coefficient = self._calculate_binomial_coefficient()
|
46 |
+
|
47 |
+
def _calculate_binomial_coefficient(self) -> int:
|
48 |
+
"""
|
49 |
+
Calculate C(n, k) = n! / (k! * (n-k)!)
|
50 |
+
Using the multiplicative formula for efficiency
|
51 |
+
"""
|
52 |
+
if self.col > self.row or self.col < 0:
|
53 |
+
return 0
|
54 |
+
if self.col == 0 or self.col == self.row:
|
55 |
+
return 1
|
56 |
+
|
57 |
+
# Use symmetry property: C(n, k) = C(n, n-k)
|
58 |
+
k = min(self.col, self.row - self.col)
|
59 |
+
|
60 |
+
result = 1
|
61 |
+
for i in range(k):
|
62 |
+
result = result * (self.row - i) // (i + 1)
|
63 |
+
|
64 |
+
return result
|
65 |
+
|
66 |
+
|
67 |
+
class PascalTreeGenerator:
|
68 |
+
"""
|
69 |
+
Main class for generating various forms of Pascal's tree.
|
70 |
+
|
71 |
+
This class implements multiple algorithms for constructing Pascal's tree,
|
72 |
+
each highlighting different mathematical properties and relationships.
|
73 |
+
"""
|
74 |
+
|
75 |
+
def __init__(self):
|
76 |
+
self.root = None
|
77 |
+
self.nodes_by_position = {} # (row, col) -> TreeNode
|
78 |
+
self.level_nodes = {} # row -> List[TreeNode]
|
79 |
+
|
80 |
+
def generate_recursive_tree(self, max_depth: int) -> TreeNode:
|
81 |
+
"""
|
82 |
+
Generate Pascal's tree using recursive construction.
|
83 |
+
|
84 |
+
This method creates a binary tree where:
|
85 |
+
- Each node (n, k) has value C(n, k)
|
86 |
+
- Left child is (n+1, k)
|
87 |
+
- Right child is (n+1, k+1)
|
88 |
+
|
89 |
+
This represents the recursive property:
|
90 |
+
C(n+1, k) + C(n+1, k+1) = C(n+2, k+1)
|
91 |
+
|
92 |
+
Args:
|
93 |
+
max_depth: Maximum depth of the tree
|
94 |
+
|
95 |
+
Returns:
|
96 |
+
Root node of the tree
|
97 |
+
"""
|
98 |
+
self.root = self._build_recursive_node(0, 0, max_depth)
|
99 |
+
return self.root
|
100 |
+
|
101 |
+
def _build_recursive_node(self, row: int, col: int, max_depth: int,
|
102 |
+
parent: Optional[TreeNode] = None) -> Optional[TreeNode]:
|
103 |
+
"""
|
104 |
+
Recursively build tree nodes.
|
105 |
+
|
106 |
+
The mathematical insight here is that Pascal's triangle can be
|
107 |
+
constructed by the recurrence relation:
|
108 |
+
C(n, k) = C(n-1, k-1) + C(n-1, k)
|
109 |
+
|
110 |
+
We're building the tree in the forward direction, showing how
|
111 |
+
each value contributes to values in subsequent rows.
|
112 |
+
"""
|
113 |
+
if row > max_depth or col > row or col < 0:
|
114 |
+
return None
|
115 |
+
|
116 |
+
# Create node with binomial coefficient value
|
117 |
+
node = TreeNode(
|
118 |
+
value=math.comb(row, col), # Python 3.8+ built-in
|
119 |
+
row=row,
|
120 |
+
col=col,
|
121 |
+
parent=parent
|
122 |
+
)
|
123 |
+
|
124 |
+
# Store node reference for later access
|
125 |
+
self.nodes_by_position[(row, col)] = node
|
126 |
+
|
127 |
+
if row not in self.level_nodes:
|
128 |
+
self.level_nodes[row] = []
|
129 |
+
self.level_nodes[row].append(node)
|
130 |
+
|
131 |
+
# Build children representing contributions to next row
|
132 |
+
node.left = self._build_recursive_node(row + 1, col, max_depth, node)
|
133 |
+
node.right = self._build_recursive_node(row + 1, col + 1, max_depth, node)
|
134 |
+
|
135 |
+
return node
|
136 |
+
|
137 |
+
def generate_path_sum_tree(self, max_depth: int) -> TreeNode:
|
138 |
+
"""
|
139 |
+
Generate a tree where paths from root represent the construction
|
140 |
+
of Pascal's triangle values through path counting.
|
141 |
+
|
142 |
+
Mathematical principle: C(n, k) equals the number of paths from
|
143 |
+
(0, 0) to (n, k) moving only right or down in a grid.
|
144 |
+
|
145 |
+
In tree form:
|
146 |
+
- Left branch represents "choosing" an element
|
147 |
+
- Right branch represents "not choosing" an element
|
148 |
+
"""
|
149 |
+
self.root = TreeNode(value=1, row=0, col=0)
|
150 |
+
self._build_path_tree(self.root, max_depth)
|
151 |
+
return self.root
|
152 |
+
|
153 |
+
def _build_path_tree(self, node: TreeNode, remaining_depth: int):
|
154 |
+
"""
|
155 |
+
Build tree based on path counting interpretation.
|
156 |
+
|
157 |
+
Each path from root to a node at depth n with k left turns
|
158 |
+
represents one of the C(n, k) ways to choose k items from n items.
|
159 |
+
"""
|
160 |
+
if remaining_depth <= 0:
|
161 |
+
return
|
162 |
+
|
163 |
+
# Left child: "choose" (increment both row and col)
|
164 |
+
node.left = TreeNode(
|
165 |
+
value=node.value, # Path count remains same along each path
|
166 |
+
row=node.row + 1,
|
167 |
+
col=node.col + 1,
|
168 |
+
parent=node
|
169 |
+
)
|
170 |
+
|
171 |
+
# Right child: "don't choose" (increment only row)
|
172 |
+
node.right = TreeNode(
|
173 |
+
value=node.value,
|
174 |
+
row=node.row + 1,
|
175 |
+
col=node.col,
|
176 |
+
parent=node
|
177 |
+
)
|
178 |
+
|
179 |
+
# Recursively build subtrees
|
180 |
+
self._build_path_tree(node.left, remaining_depth - 1)
|
181 |
+
self._build_path_tree(node.right, remaining_depth - 1)
|
182 |
+
|
183 |
+
def generate_sierpinski_tree(self, max_depth: int) -> TreeNode:
|
184 |
+
"""
|
185 |
+
Generate Pascal's tree with Sierpinski triangle properties.
|
186 |
+
|
187 |
+
Mathematical insight: When Pascal's triangle values are taken
|
188 |
+
modulo 2, the resulting pattern is the Sierpinski triangle.
|
189 |
+
|
190 |
+
This tree structure highlights the fractal nature of Pascal's
|
191 |
+
triangle and its connection to cellular automata.
|
192 |
+
"""
|
193 |
+
self.root = TreeNode(value=1, row=0, col=0)
|
194 |
+
self._build_sierpinski_node(self.root, max_depth)
|
195 |
+
return self.root
|
196 |
+
|
197 |
+
def _build_sierpinski_node(self, node: TreeNode, remaining_depth: int):
|
198 |
+
"""
|
199 |
+
Build tree with Sierpinski triangle properties.
|
200 |
+
|
201 |
+
Odd values in Pascal's triangle form the Sierpinski triangle
|
202 |
+
pattern. This is related to Lucas' theorem about binomial
|
203 |
+
coefficients modulo primes.
|
204 |
+
"""
|
205 |
+
if remaining_depth <= 0:
|
206 |
+
return
|
207 |
+
|
208 |
+
# Calculate children values using the recurrence relation
|
209 |
+
left_val = self._get_pascal_value(node.row + 1, node.col)
|
210 |
+
right_val = self._get_pascal_value(node.row + 1, node.col + 1)
|
211 |
+
|
212 |
+
# Create children with modulo 2 coloring for Sierpinski
|
213 |
+
if left_val > 0: # Valid position in triangle
|
214 |
+
node.left = TreeNode(
|
215 |
+
value=left_val,
|
216 |
+
row=node.row + 1,
|
217 |
+
col=node.col,
|
218 |
+
parent=node
|
219 |
+
)
|
220 |
+
node.left.sierpinski_bit = left_val % 2
|
221 |
+
self._build_sierpinski_node(node.left, remaining_depth - 1)
|
222 |
+
|
223 |
+
if right_val > 0: # Valid position in triangle
|
224 |
+
node.right = TreeNode(
|
225 |
+
value=right_val,
|
226 |
+
row=node.row + 1,
|
227 |
+
col=node.col + 1,
|
228 |
+
parent=node
|
229 |
+
)
|
230 |
+
node.right.sierpinski_bit = right_val % 2
|
231 |
+
self._build_sierpinski_node(node.right, remaining_depth - 1)
|
232 |
+
|
233 |
+
def _get_pascal_value(self, row: int, col: int) -> int:
|
234 |
+
"""
|
235 |
+
Get Pascal's triangle value at (row, col).
|
236 |
+
|
237 |
+
Uses the efficient multiplicative formula rather than
|
238 |
+
factorial calculation for better performance.
|
239 |
+
"""
|
240 |
+
if col > row or col < 0:
|
241 |
+
return 0
|
242 |
+
if col == 0 or col == row:
|
243 |
+
return 1
|
244 |
+
|
245 |
+
# Use dynamic programming if we've already calculated it
|
246 |
+
if (row, col) in self.nodes_by_position:
|
247 |
+
return self.nodes_by_position[(row, col)].value
|
248 |
+
|
249 |
+
# Calculate using the multiplicative formula
|
250 |
+
k = min(col, row - col) # Use symmetry
|
251 |
+
result = 1
|
252 |
+
for i in range(k):
|
253 |
+
result = result * (row - i) // (i + 1)
|
254 |
+
|
255 |
+
return result
|
256 |
+
|
257 |
+
def calculate_tree_properties(self, root: TreeNode) -> Dict[str, Any]:
|
258 |
+
"""
|
259 |
+
Calculate various mathematical properties of the generated tree.
|
260 |
+
|
261 |
+
This includes:
|
262 |
+
- Sum of values at each level (should equal 2^n)
|
263 |
+
- Maximum value at each level
|
264 |
+
- Number of odd values (related to Sierpinski)
|
265 |
+
- Tree balance metrics
|
266 |
+
"""
|
267 |
+
properties = {
|
268 |
+
'level_sums': {},
|
269 |
+
'level_max_values': {},
|
270 |
+
'odd_count_by_level': {},
|
271 |
+
'total_nodes': 0,
|
272 |
+
'leaf_nodes': 0,
|
273 |
+
'internal_nodes': 0,
|
274 |
+
'max_depth': 0,
|
275 |
+
'perfectly_balanced': True
|
276 |
+
}
|
277 |
+
|
278 |
+
# BFS traversal to calculate properties
|
279 |
+
queue = deque([(root, 0)])
|
280 |
+
level_nodes_count = {}
|
281 |
+
|
282 |
+
while queue:
|
283 |
+
node, depth = queue.popleft()
|
284 |
+
properties['total_nodes'] += 1
|
285 |
+
properties['max_depth'] = max(properties['max_depth'], depth)
|
286 |
+
|
287 |
+
# Update level-based statistics
|
288 |
+
if depth not in properties['level_sums']:
|
289 |
+
properties['level_sums'][depth] = 0
|
290 |
+
properties['level_max_values'][depth] = 0
|
291 |
+
properties['odd_count_by_level'][depth] = 0
|
292 |
+
level_nodes_count[depth] = 0
|
293 |
+
|
294 |
+
properties['level_sums'][depth] += node.value
|
295 |
+
properties['level_max_values'][depth] = max(
|
296 |
+
properties['level_max_values'][depth],
|
297 |
+
node.value
|
298 |
+
)
|
299 |
+
|
300 |
+
if node.value % 2 == 1:
|
301 |
+
properties['odd_count_by_level'][depth] += 1
|
302 |
+
|
303 |
+
level_nodes_count[depth] += 1
|
304 |
+
|
305 |
+
# Check if leaf or internal node
|
306 |
+
if node.left is None and node.right is None:
|
307 |
+
properties['leaf_nodes'] += 1
|
308 |
+
else:
|
309 |
+
properties['internal_nodes'] += 1
|
310 |
+
if node.left:
|
311 |
+
queue.append((node.left, depth + 1))
|
312 |
+
if node.right:
|
313 |
+
queue.append((node.right, depth + 1))
|
314 |
+
|
315 |
+
# Verify mathematical properties
|
316 |
+
properties['level_sum_verification'] = {}
|
317 |
+
for level, sum_val in properties['level_sums'].items():
|
318 |
+
expected = 2 ** level # Sum of row n in Pascal's triangle
|
319 |
+
properties['level_sum_verification'][level] = {
|
320 |
+
'actual': sum_val,
|
321 |
+
'expected': expected,
|
322 |
+
'correct': sum_val == expected
|
323 |
+
}
|
324 |
+
|
325 |
+
# Check if tree is perfectly balanced
|
326 |
+
for level in range(properties['max_depth']):
|
327 |
+
expected_nodes = 2 ** level
|
328 |
+
if level_nodes_count[level] != expected_nodes:
|
329 |
+
properties['perfectly_balanced'] = False
|
330 |
+
break
|
331 |
+
|
332 |
+
return properties
|
333 |
+
|
334 |
+
def visualize_tree(self, root: TreeNode, filename: str = 'pascal_tree.png',
|
335 |
+
show_values: bool = True, show_sierpinski: bool = False):
|
336 |
+
"""
|
337 |
+
Create a visual representation of Pascal's tree.
|
338 |
+
|
339 |
+
Uses networkx and matplotlib to create a hierarchical layout
|
340 |
+
that clearly shows the tree structure and values.
|
341 |
+
"""
|
342 |
+
G = nx.DiGraph()
|
343 |
+
pos = {}
|
344 |
+
labels = {}
|
345 |
+
colors = []
|
346 |
+
|
347 |
+
# Build graph using BFS
|
348 |
+
queue = deque([(root, 0, 0)]) # node, x_position, depth
|
349 |
+
x_offset = 2 ** 6 # Initial horizontal spacing
|
350 |
+
|
351 |
+
while queue:
|
352 |
+
node, x, depth = queue.popleft()
|
353 |
+
node_id = f"{node.row},{node.col}"
|
354 |
+
|
355 |
+
G.add_node(node_id)
|
356 |
+
pos[node_id] = (x, -depth)
|
357 |
+
|
358 |
+
if show_values:
|
359 |
+
labels[node_id] = str(node.value)
|
360 |
+
else:
|
361 |
+
labels[node_id] = f"({node.row},{node.col})"
|
362 |
+
|
363 |
+
# Color based on Sierpinski pattern (odd/even)
|
364 |
+
if show_sierpinski:
|
365 |
+
colors.append('red' if node.value % 2 == 1 else 'lightblue')
|
366 |
+
else:
|
367 |
+
# Color based on value magnitude
|
368 |
+
colors.append(node.value)
|
369 |
+
|
370 |
+
# Add edges and queue children
|
371 |
+
current_offset = x_offset / (2 ** (depth + 1))
|
372 |
+
|
373 |
+
if node.left:
|
374 |
+
left_id = f"{node.left.row},{node.left.col}"
|
375 |
+
G.add_edge(node_id, left_id)
|
376 |
+
queue.append((node.left, x - current_offset, depth + 1))
|
377 |
+
|
378 |
+
if node.right:
|
379 |
+
right_id = f"{node.right.row},{node.right.col}"
|
380 |
+
G.add_edge(node_id, right_id)
|
381 |
+
queue.append((node.right, x + current_offset, depth + 1))
|
382 |
+
|
383 |
+
# Create visualization
|
384 |
+
plt.figure(figsize=(15, 10))
|
385 |
+
|
386 |
+
if show_sierpinski:
|
387 |
+
nx.draw(G, pos, labels=labels, node_color=colors,
|
388 |
+
node_size=500, font_size=10, font_weight='bold',
|
389 |
+
with_labels=True, arrows=False)
|
390 |
+
else:
|
391 |
+
nx.draw(G, pos, labels=labels, node_color=colors,
|
392 |
+
cmap='YlOrRd', node_size=500, font_size=10,
|
393 |
+
font_weight='bold', with_labels=True, arrows=False)
|
394 |
+
|
395 |
+
plt.title("Pascal's Tree Visualization", fontsize=16)
|
396 |
+
plt.axis('off')
|
397 |
+
plt.tight_layout()
|
398 |
+
plt.savefig(filename, dpi=300, bbox_inches='tight')
|
399 |
+
plt.close()
|
400 |
+
|
401 |
+
def print_tree_text(self, root: TreeNode, max_depth: int = 5):
|
402 |
+
"""
|
403 |
+
Print a text representation of Pascal's tree.
|
404 |
+
|
405 |
+
Uses indentation to show tree structure and includes
|
406 |
+
mathematical annotations.
|
407 |
+
"""
|
408 |
+
print("Pascal's Tree - Text Representation")
|
409 |
+
print("=" * 50)
|
410 |
+
print("Format: value [C(row,col)] (row,col)")
|
411 |
+
print("=" * 50)
|
412 |
+
|
413 |
+
self._print_node_recursive(root, "", True, 0, max_depth)
|
414 |
+
|
415 |
+
def _print_node_recursive(self, node: Optional[TreeNode], prefix: str,
|
416 |
+
is_tail: bool, depth: int, max_depth: int):
|
417 |
+
"""
|
418 |
+
Recursively print tree nodes with proper formatting.
|
419 |
+
"""
|
420 |
+
if node is None or depth > max_depth:
|
421 |
+
return
|
422 |
+
|
423 |
+
# Create the connection line
|
424 |
+
connector = "└── " if is_tail else "├── "
|
425 |
+
|
426 |
+
# Print current node
|
427 |
+
print(f"{prefix}{connector}{node.value} [C({node.row},{node.col})] "
|
428 |
+
f"({node.row},{node.col})")
|
429 |
+
|
430 |
+
# Prepare prefix for children
|
431 |
+
child_prefix = prefix + (" " if is_tail else "│ ")
|
432 |
+
|
433 |
+
# Print children
|
434 |
+
children = []
|
435 |
+
if node.left:
|
436 |
+
children.append(node.left)
|
437 |
+
if node.right:
|
438 |
+
children.append(node.right)
|
439 |
+
|
440 |
+
for i, child in enumerate(children):
|
441 |
+
is_last = (i == len(children) - 1)
|
442 |
+
self._print_node_recursive(child, child_prefix, is_last,
|
443 |
+
depth + 1, max_depth)
|
444 |
+
|
445 |
+
def generate_fibonacci_connection_tree(self, max_depth: int) -> TreeNode:
|
446 |
+
"""
|
447 |
+
Generate a tree that highlights the connection between Pascal's
|
448 |
+
triangle and Fibonacci numbers.
|
449 |
+
|
450 |
+
Mathematical insight: The sum of the nth diagonal in Pascal's
|
451 |
+
triangle equals the nth Fibonacci number.
|
452 |
+
|
453 |
+
Specifically: F(n) = Σ C(k, n-k-1) for k from 0 to floor(n/2)
|
454 |
+
"""
|
455 |
+
# Build standard Pascal tree first
|
456 |
+
self.root = self.generate_recursive_tree(max_depth)
|
457 |
+
|
458 |
+
# Annotate nodes with Fibonacci diagonal sums
|
459 |
+
self._annotate_fibonacci_diagonals(self.root)
|
460 |
+
|
461 |
+
return self.root
|
462 |
+
|
463 |
+
def _annotate_fibonacci_diagonals(self, root: TreeNode):
|
464 |
+
"""
|
465 |
+
Annotate tree nodes with their contribution to Fibonacci numbers.
|
466 |
+
|
467 |
+
Each diagonal starting from the edge of Pascal's triangle
|
468 |
+
sums to a Fibonacci number.
|
469 |
+
"""
|
470 |
+
# Calculate diagonal sums
|
471 |
+
diagonal_sums = {}
|
472 |
+
|
473 |
+
queue = deque([root])
|
474 |
+
while queue:
|
475 |
+
node = queue.popleft()
|
476 |
+
|
477 |
+
# Diagonal index: d = row - col
|
478 |
+
diagonal = node.row - node.col
|
479 |
+
|
480 |
+
if diagonal not in diagonal_sums:
|
481 |
+
diagonal_sums[diagonal] = 0
|
482 |
+
|
483 |
+
diagonal_sums[diagonal] += node.value
|
484 |
+
|
485 |
+
# Store Fibonacci connection in node
|
486 |
+
node.fibonacci_diagonal = diagonal
|
487 |
+
node.fibonacci_contribution = node.value
|
488 |
+
|
489 |
+
if node.left:
|
490 |
+
queue.append(node.left)
|
491 |
+
if node.right:
|
492 |
+
queue.append(node.right)
|
493 |
+
|
494 |
+
# Verify Fibonacci property
|
495 |
+
print("\nFibonacci-Pascal Connection:")
|
496 |
+
print("Diagonal Index -> Sum = Fibonacci Number")
|
497 |
+
fib_a, fib_b = 1, 1
|
498 |
+
for d in sorted(diagonal_sums.keys()):
|
499 |
+
if d >= 0:
|
500 |
+
print(f"Diagonal {d}: Sum = {diagonal_sums[d]}, "
|
501 |
+
f"Fibonacci F({d+1}) = {fib_a}")
|
502 |
+
fib_a, fib_b = fib_b, fib_a + fib_b
|
503 |
+
|
504 |
+
def analyze_combinatorial_interpretations(self, max_rows: int):
|
505 |
+
"""
|
506 |
+
Analyze various combinatorial interpretations of Pascal's tree values.
|
507 |
+
|
508 |
+
This includes:
|
509 |
+
1. Binomial expansion coefficients
|
510 |
+
2. Number of paths in a grid
|
511 |
+
3. Number of subsets of a set
|
512 |
+
4. Catalan number connections
|
513 |
+
"""
|
514 |
+
print("\nCombinatorial Interpretations of Pascal's Tree")
|
515 |
+
print("=" * 60)
|
516 |
+
|
517 |
+
for n in range(max_rows + 1):
|
518 |
+
print(f"\nRow {n}:")
|
519 |
+
|
520 |
+
# Binomial expansion: (x + y)^n
|
521 |
+
print(f" Binomial expansion of (x + y)^{n}:")
|
522 |
+
terms = []
|
523 |
+
for k in range(n + 1):
|
524 |
+
coeff = math.comb(n, k)
|
525 |
+
if k == 0:
|
526 |
+
terms.append(f"{coeff}x^{n}")
|
527 |
+
elif k == n:
|
528 |
+
terms.append(f"{coeff}y^{n}")
|
529 |
+
else:
|
530 |
+
terms.append(f"{coeff}x^{n-k}y^{k}")
|
531 |
+
print(f" {' + '.join(terms)}")
|
532 |
+
|
533 |
+
# Grid paths
|
534 |
+
print(f" Grid paths from (0,0) to ({n},k):")
|
535 |
+
for k in range(n + 1):
|
536 |
+
paths = math.comb(n, k)
|
537 |
+
print(f" To ({n},{k}): {paths} paths "
|
538 |
+
f"(choose {k} 'right' moves from {n} total moves)")
|
539 |
+
|
540 |
+
# Subset counting
|
541 |
+
total_subsets = 2 ** n
|
542 |
+
print(f" Subsets of a set with {n} elements: {total_subsets} total")
|
543 |
+
for k in range(n + 1):
|
544 |
+
subsets_k = math.comb(n, k)
|
545 |
+
print(f" Subsets of size {k}: {subsets_k}")
|
546 |
+
|
547 |
+
# Catalan numbers connection
|
548 |
+
print("\n\nCatalan Numbers Connection:")
|
549 |
+
print("Central binomial coefficients relate to Catalan numbers")
|
550 |
+
for n in range(min(max_rows + 1, 10)):
|
551 |
+
central_binom = math.comb(2*n, n)
|
552 |
+
catalan = central_binom // (n + 1) # nth Catalan number
|
553 |
+
print(f" C({2*n},{n}) = {central_binom}, "
|
554 |
+
f"Catalan({n}) = {catalan} = C({2*n},{n})/{n+1}")
|
555 |
+
|
556 |
+
|
557 |
+
# Example usage and demonstrations
|
558 |
+
def demonstrate_pascal_trees():
|
559 |
+
"""
|
560 |
+
Comprehensive demonstration of Pascal's tree implementations.
|
561 |
+
"""
|
562 |
+
print("PASCAL'S TREE GENERATOR - COMPREHENSIVE DEMONSTRATION")
|
563 |
+
print("=" * 70)
|
564 |
+
|
565 |
+
# Create generator instance
|
566 |
+
generator = PascalTreeGenerator()
|
567 |
+
|
568 |
+
# 1. Generate recursive tree
|
569 |
+
print("\n1. RECURSIVE PASCAL'S TREE")
|
570 |
+
print("-" * 40)
|
571 |
+
recursive_tree = generator.generate_recursive_tree(max_depth=5)
|
572 |
+
generator.print_tree_text(recursive_tree, max_depth=4)
|
573 |
+
|
574 |
+
# Calculate and display properties
|
575 |
+
properties = generator.calculate_tree_properties(recursive_tree)
|
576 |
+
print("\nTree Properties:")
|
577 |
+
print(f" Total nodes: {properties['total_nodes']}")
|
578 |
+
print(f" Max depth: {properties['max_depth']}")
|
579 |
+
print(f" Leaf nodes: {properties['leaf_nodes']}")
|
580 |
+
print(f" Perfectly balanced: {properties['perfectly_balanced']}")
|
581 |
+
|
582 |
+
print("\nLevel sums (should equal 2^n):")
|
583 |
+
for level, verification in properties['level_sum_verification'].items():
|
584 |
+
status = "✓" if verification['correct'] else "✗"
|
585 |
+
print(f" Level {level}: {verification['actual']} "
|
586 |
+
f"(expected {verification['expected']}) {status}")
|
587 |
+
|
588 |
+
# 2. Path sum tree
|
589 |
+
print("\n\n2. PATH SUM TREE")
|
590 |
+
print("-" * 40)
|
591 |
+
generator2 = PascalTreeGenerator()
|
592 |
+
path_tree = generator2.generate_path_sum_tree(max_depth=4)
|
593 |
+
print("Path sum tree shows how Pascal values can be computed")
|
594 |
+
print("by counting paths in the tree structure.")
|
595 |
+
|
596 |
+
# 3. Sierpinski tree
|
597 |
+
print("\n\n3. SIERPINSKI TREE (Pascal's Triangle mod 2)")
|
598 |
+
print("-" * 40)
|
599 |
+
generator3 = PascalTreeGenerator()
|
600 |
+
sierpinski_tree = generator3.generate_sierpinski_tree(max_depth=6)
|
601 |
+
|
602 |
+
print("Odd values in first 7 rows (forms Sierpinski triangle):")
|
603 |
+
for level in range(7):
|
604 |
+
row_values = []
|
605 |
+
for col in range(level + 1):
|
606 |
+
val = generator3._get_pascal_value(level, col)
|
607 |
+
row_values.append("●" if val % 2 == 1 else "○")
|
608 |
+
print(f" Row {level}: {' '.join(row_values)}")
|
609 |
+
|
610 |
+
# 4. Fibonacci connection
|
611 |
+
print("\n\n4. FIBONACCI CONNECTION TREE")
|
612 |
+
print("-" * 40)
|
613 |
+
generator4 = PascalTreeGenerator()
|
614 |
+
fib_tree = generator4.generate_fibonacci_connection_tree(max_depth=7)
|
615 |
+
|
616 |
+
# 5. Combinatorial analysis
|
617 |
+
print("\n\n5. COMBINATORIAL INTERPRETATIONS")
|
618 |
+
print("-" * 40)
|
619 |
+
generator5 = PascalTreeGenerator()
|
620 |
+
generator5.analyze_combinatorial_interpretations(max_rows=4)
|
621 |
+
|
622 |
+
# 6. Mathematical relationships
|
623 |
+
print("\n\n6. MATHEMATICAL RELATIONSHIPS IN PASCAL'S TREE")
|
624 |
+
print("-" * 40)
|
625 |
+
|
626 |
+
print("\nHockey Stick Identity:")
|
627 |
+
print("Sum of a column: Σ C(k,r) for k=r to n equals C(n+1,r+1)")
|
628 |
+
n, r = 6, 2
|
629 |
+
column_sum = sum(math.comb(k, r) for k in range(r, n+1))
|
630 |
+
print(f" Example: Σ C(k,{r}) for k={r} to {n} = {column_sum}")
|
631 |
+
print(f" This equals C({n+1},{r+1}) = {math.comb(n+1, r+1)} ✓")
|
632 |
+
|
633 |
+
print("\nVandermonde's Identity:")
|
634 |
+
print("C(m+n,r) = Σ C(m,k) * C(n,r-k) for k=0 to r")
|
635 |
+
m, n, r = 3, 4, 3
|
636 |
+
left_side = math.comb(m+n, r)
|
637 |
+
right_side = sum(math.comb(m, k) * math.comb(n, r-k) for k in range(r+1))
|
638 |
+
print(f" Example: C({m+n},{r}) = {left_side}")
|
639 |
+
print(f" Sum of products = {right_side} ✓")
|
640 |
+
|
641 |
+
# 7. Generate visualizations
|
642 |
+
print("\n\n7. GENERATING VISUALIZATIONS")
|
643 |
+
print("-" * 40)
|
644 |
+
|
645 |
+
# Standard visualization
|
646 |
+
print("Creating standard Pascal's tree visualization...")
|
647 |
+
generator.visualize_tree(recursive_tree, 'pascal_tree_standard.png',
|
648 |
+
show_values=True, show_sierpinski=False)
|
649 |
+
|
650 |
+
# Sierpinski visualization
|
651 |
+
print("Creating Sierpinski pattern visualization...")
|
652 |
+
generator3.visualize_tree(sierpinski_tree, 'pascal_tree_sierpinski.png',
|
653 |
+
show_values=True, show_sierpinski=True)
|
654 |
+
|
655 |
+
print("\nVisualizations saved as:")
|
656 |
+
print(" - pascal_tree_standard.png")
|
657 |
+
print(" - pascal_tree_sierpinski.png")
|
658 |
+
|
659 |
+
return generator
|
660 |
+
|
661 |
+
|
662 |
+
if __name__ == "__main__":
|
663 |
+
# Run comprehensive demonstration
|
664 |
+
generator = demonstrate_pascal_trees()
|
665 |
+
|
666 |
+
print("\n" + "=" * 70)
|
667 |
+
print("DEMONSTRATION COMPLETE")
|
668 |
+
print("\nPascal's tree reveals deep mathematical connections between:")
|
669 |
+
print("- Combinatorics (counting and choosing)")
|
670 |
+
print("- Number theory (divisibility patterns)")
|
671 |
+
print("- Fractals (Sierpinski triangle)")
|
672 |
+
print("- Fibonacci sequences")
|
673 |
+
print("- Probability theory")
|
674 |
+
print("- Algebraic expansions")
|
675 |
+
print("\nThe tree structure provides insights into the recursive")
|
676 |
+
print("and self-similar nature of Pascal's triangle.")
|
streaming.py
CHANGED
@@ -1,40 +1,90 @@
|
|
1 |
-
# streaming.py -
|
2 |
import json
|
3 |
import re
|
4 |
-
import logging
|
5 |
from typing import AsyncGenerator, Callable, Optional, Iterator, Tuple
|
6 |
from dataclasses import dataclass
|
7 |
-
import sys
|
8 |
import time
|
9 |
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException, TimeoutException
|
10 |
from selenium.webdriver.common.by import By
|
11 |
from selenium.webdriver.remote.webelement import WebElement
|
12 |
from selenium.webdriver.support.ui import WebDriverWait
|
13 |
from selenium.webdriver.support import expected_conditions as EC
|
14 |
-
|
15 |
|
16 |
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
17 |
logger = logging.getLogger(__name__)
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
@dataclass
|
21 |
class StreamConfig:
|
22 |
"""Configuration for streaming behavior"""
|
23 |
-
timeout: float = 300.0
|
24 |
retry_on_error: bool = True
|
25 |
max_retries: int = 3
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
31 |
|
32 |
class StreamingResponseGenerator:
|
33 |
-
"""
|
34 |
-
Generates Server-Sent Events (SSE) for streaming chat completions.
|
35 |
-
It takes an async generator producing text deltas and formats them into SSE chunks.
|
36 |
-
"""
|
37 |
-
|
38 |
def __init__(self, config: Optional[StreamConfig] = None):
|
39 |
self.config = config or StreamConfig()
|
40 |
|
@@ -43,220 +93,163 @@ class StreamingResponseGenerator:
|
|
43 |
completion_id: str,
|
44 |
created: int,
|
45 |
model: str,
|
46 |
-
prompt: str,
|
47 |
-
send_message_func: Callable[
|
48 |
) -> AsyncGenerator[str, None]:
|
49 |
-
"""
|
50 |
-
Creates an SSE stream from the text deltas generated by send_message_func.
|
51 |
-
"""
|
52 |
-
|
53 |
logger.info(f"[{completion_id}] Starting streaming response generation for model '{model}'.")
|
54 |
-
|
55 |
first_chunk_sent = False
|
56 |
accumulated_content_for_logging = ""
|
57 |
-
|
58 |
try:
|
59 |
-
# `send_message_func` is `ChatHandler.send_message_and_stream_response`
|
60 |
-
# It takes (prompt, model_id) and yields text deltas.
|
61 |
async for content_delta in send_message_func(prompt, model):
|
62 |
-
if not content_delta:
|
63 |
-
continue
|
64 |
-
|
65 |
accumulated_content_for_logging += content_delta
|
66 |
-
# logger.debug(f"[{completion_id}] Received content delta: '{content_delta[:50].replace(chr(10), ' ')}...' (Total: {len(accumulated_content_for_logging)})")
|
67 |
-
|
68 |
delta_payload = {"content": content_delta}
|
69 |
if not first_chunk_sent:
|
70 |
-
# The first contentful chunk should also carry the role.
|
71 |
delta_payload["role"] = "assistant"
|
72 |
first_chunk_sent = True
|
73 |
-
|
74 |
chunk_data = {
|
75 |
-
"id": completion_id,
|
76 |
-
"
|
77 |
-
"created": created,
|
78 |
-
"model": model,
|
79 |
-
"choices": [{
|
80 |
-
"index": 0,
|
81 |
-
"delta": delta_payload,
|
82 |
-
"finish_reason": None
|
83 |
-
}]
|
84 |
}
|
85 |
-
|
86 |
-
chunk_json = json.dumps(chunk_data)
|
87 |
-
# logger.debug(f"[{completion_id}] Yielding delta chunk: data: {chunk_json}")
|
88 |
-
yield f"data: {chunk_json}\n\n"
|
89 |
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
if not first_chunk_sent: # If stream ended without any content
|
94 |
-
logger.warning(f"[{completion_id}] Stream ended without any content. Sending empty assistant chunk before finish.")
|
95 |
-
empty_assistant_chunk = {
|
96 |
"id": completion_id, "object": "chat.completion.chunk", "created": created, "model": model,
|
97 |
"choices": [{"index": 0, "delta": {"role": "assistant", "content": ""}, "finish_reason": None}]
|
98 |
}
|
99 |
-
yield f"data: {json.dumps(
|
100 |
-
|
101 |
|
102 |
-
# Send the final chunk with finish_reason
|
103 |
final_chunk_data = {
|
104 |
-
"id": completion_id,
|
105 |
-
"
|
106 |
-
"created": created,
|
107 |
-
"model": model,
|
108 |
-
"choices": [{
|
109 |
-
"index": 0,
|
110 |
-
"delta": {}, # Empty delta
|
111 |
-
"finish_reason": "stop"
|
112 |
-
}]
|
113 |
}
|
114 |
-
final_chunk_json = json.dumps(final_chunk_data)
|
115 |
logger.info(f"[{completion_id}] Yielding final chunk. Total content length: {len(accumulated_content_for_logging)} chars.")
|
116 |
-
yield f"data: {
|
117 |
-
|
118 |
logger.info(f"[{completion_id}] Yielding [DONE] signal.")
|
119 |
yield "data: [DONE]\n\n"
|
120 |
-
|
121 |
except Exception as e:
|
122 |
logger.error(f"[{completion_id}] Error during streaming response generation: {e}", exc_info=True)
|
123 |
error_payload = {"content": f"\n\nError in stream: {str(e)}"}
|
124 |
-
if not first_chunk_sent:
|
125 |
-
|
126 |
-
|
127 |
-
error_chunk_data = {
|
128 |
"id": completion_id, "object": "chat.completion.chunk", "created": created, "model": model,
|
129 |
"choices": [{"index": 0, "delta": error_payload, "finish_reason": "error" }]
|
130 |
}
|
131 |
-
yield f"data: {json.dumps(
|
132 |
-
yield "data: [DONE]\n\n"
|
|
|
|
|
133 |
|
134 |
class StreamProcessor:
|
135 |
-
"""Processes streaming content with stabilization
|
136 |
-
|
137 |
def __init__(self, config: Optional[StreamConfig] = None):
|
138 |
-
|
139 |
-
|
140 |
-
self.
|
141 |
-
self.
|
142 |
-
self.
|
143 |
-
self.
|
144 |
-
self.
|
145 |
|
146 |
def set_request_id(self, request_id: str):
|
147 |
self.request_id_for_log = request_id
|
148 |
|
149 |
-
def
|
|
|
|
|
|
|
|
|
150 |
"""
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
"""
|
155 |
-
|
156 |
-
|
157 |
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
|
|
|
|
|
|
164 |
|
165 |
-
|
166 |
-
yield chunk_delta
|
167 |
-
last_successful_yield_time = current_time
|
168 |
-
# No specific handling for empty chunk here as poll_element_text_stream filters them.
|
169 |
|
170 |
-
|
171 |
-
#
|
172 |
-
|
173 |
-
if
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
|
180 |
-
|
181 |
-
|
182 |
-
|
|
|
183 |
|
184 |
-
|
185 |
-
|
186 |
-
Polls a web element's text content and yields incremental changes (deltas).
|
187 |
-
Stops if the element disappears, or if `self.response_timeout` is reached and text has not changed.
|
188 |
-
"""
|
189 |
log_prefix = f"[{self.request_id_for_log}/PollStream]"
|
190 |
-
|
191 |
-
|
192 |
-
element_previously_found = False
|
193 |
last_change_time = time.time()
|
194 |
|
195 |
-
|
196 |
-
|
197 |
-
while time.time() - start_time_for_stream < self.response_timeout:
|
198 |
loop_start_time = time.time()
|
199 |
try:
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
if current_text != last_text:
|
209 |
-
new_text_delta = current_text[len(last_text):]
|
210 |
-
if new_text_delta:
|
211 |
-
# logger.debug(f"{log_prefix} Yielding delta (len {len(new_text_delta)}): '{new_text_delta[:30].replace(chr(10),' ')}...'")
|
212 |
-
yield new_text_delta
|
213 |
-
last_change_time = time.time() # Update time of last actual text change
|
214 |
-
last_text = current_text
|
215 |
-
else:
|
216 |
-
# Text has not changed. If no change for `max_inactivity` (handled by read_stream_with_stabilization)
|
217 |
-
# or if `response_timeout` (overall) is hit, it will stop.
|
218 |
-
# This specific condition checks if text has stabilized for a duration longer than max_inactivity
|
219 |
-
# If this poller's `response_timeout` is very long, it relies on the outer wrapper's max_inactivity.
|
220 |
-
if time.time() - last_change_time > self.max_inactivity : # Check inactivity from *this* poller's perspective
|
221 |
-
logger.info(f"{log_prefix} Text has not changed for {self.max_inactivity:.2f}s. Assuming stable.")
|
222 |
-
return
|
223 |
-
|
224 |
-
except TimeoutException: # From WebDriverWait if element not present within its short timeout
|
225 |
-
if element_previously_found:
|
226 |
-
logger.info(f"{log_prefix} Element {element_locator} became non-present after being found. Assuming stream ended.")
|
227 |
return
|
228 |
-
|
229 |
-
logger.debug(f"{log_prefix} Element
|
230 |
-
except StaleElementReferenceException:
|
231 |
-
logger.warning(f"{log_prefix} StaleElementReferenceException for {element_locator}. Resetting and retrying find.")
|
232 |
-
last_text = ""
|
233 |
-
element_previously_found = False
|
234 |
-
except NoSuchElementException: # Should be caught by WebDriverWait's TimeoutException mostly
|
235 |
-
logger.warning(f"{log_prefix} NoSuchElementException for {element_locator} (should be rare with WebDriverWait).")
|
236 |
-
if element_previously_found: return
|
237 |
except Exception as e:
|
238 |
-
logger.error(f"{log_prefix} Unexpected error polling
|
239 |
return
|
|
|
|
|
|
|
|
|
240 |
|
241 |
-
elapsed_in_loop = time.time() - loop_start_time
|
242 |
-
sleep_duration = self.poll_interval - elapsed_in_loop
|
243 |
-
if sleep_duration > 0:
|
244 |
-
time.sleep(sleep_duration)
|
245 |
-
|
246 |
-
logger.info(f"{log_prefix} Max_wait ({self.response_timeout:.2f}s) reached for polling {element_locator} or stream naturally ended.")
|
247 |
-
|
248 |
-
|
249 |
-
# Wrapper for creating the generator instance easily
|
250 |
async def create_streaming_response(
|
251 |
completion_id: str,
|
252 |
created: int,
|
253 |
model: str,
|
254 |
prompt: str,
|
255 |
-
send_message_func: Callable[
|
256 |
-
stream_config: Optional[StreamConfig] = None
|
257 |
) -> AsyncGenerator[str, None]:
|
258 |
generator = StreamingResponseGenerator(config=stream_config)
|
259 |
async for chunk in generator.create_response(completion_id, created, model, prompt, send_message_func):
|
260 |
yield chunk
|
261 |
|
262 |
-
__all__ = ['create_streaming_response', 'StreamingResponseGenerator', 'StreamConfig', 'StreamProcessor']
|
|
|
1 |
+
# streaming.py - FINAL CORRECTED version with robust, state-aware processing.
|
2 |
import json
|
3 |
import re
|
4 |
+
import logging
|
5 |
from typing import AsyncGenerator, Callable, Optional, Iterator, Tuple
|
6 |
from dataclasses import dataclass
|
7 |
+
import sys, re, html
|
8 |
import time
|
9 |
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException, TimeoutException
|
10 |
from selenium.webdriver.common.by import By
|
11 |
from selenium.webdriver.remote.webelement import WebElement
|
12 |
from selenium.webdriver.support.ui import WebDriverWait
|
13 |
from selenium.webdriver.support import expected_conditions as EC
|
14 |
+
from bs4 import BeautifulSoup, Tag
|
15 |
|
16 |
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
17 |
logger = logging.getLogger(__name__)
|
18 |
|
19 |
+
# --- Helper Classes (largely unchanged, but included for completeness) ---
|
20 |
+
|
21 |
+
class HtmlToMarkdownConverter:
|
22 |
+
"""
|
23 |
+
Converts a BeautifulSoup Tag element into a Markdown string.
|
24 |
+
This is designed to be called on finalized, complete HTML elements.
|
25 |
+
"""
|
26 |
+
def convert_element(self, el: Tag) -> str:
|
27 |
+
if not isinstance(el, Tag):
|
28 |
+
return ""
|
29 |
+
|
30 |
+
name = el.name
|
31 |
+
if name == 'pre':
|
32 |
+
lang_match = re.search(r'class="language-(.*?)"', str(el), re.IGNORECASE)
|
33 |
+
lang = lang_match.group(1).strip() if lang_match else ''
|
34 |
+
# .get_text() from BS4 correctly preserves newlines from the code structure
|
35 |
+
content = el.get_text().strip()
|
36 |
+
return f'```{lang}\n{content}\n```'
|
37 |
+
if name == 'p':
|
38 |
+
# Convert child tags like <strong> or <code> within the paragraph
|
39 |
+
content = ''.join(self.convert_inline(child) for child in el.contents)
|
40 |
+
return content
|
41 |
+
if name in ['ul', 'ol']:
|
42 |
+
# The site's HTML for lists already contains the bullet/number in the text.
|
43 |
+
items = [li.get_text().strip() for li in el.find_all('li', recursive=False)]
|
44 |
+
return '\n'.join(items)
|
45 |
+
if name in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
|
46 |
+
level = int(name[1])
|
47 |
+
return '#' * level + ' ' + el.get_text().strip()
|
48 |
+
|
49 |
+
return el.get_text()
|
50 |
+
|
51 |
+
def convert_inline(self, el):
|
52 |
+
"""Recursively converts inline elements to markdown."""
|
53 |
+
if isinstance(el, str):
|
54 |
+
return html.unescape(el)
|
55 |
+
if not isinstance(el, Tag):
|
56 |
+
return ""
|
57 |
+
|
58 |
+
content = ''.join(self.convert_inline(child) for child in el.contents)
|
59 |
+
|
60 |
+
if el.name == 'strong' or el.name == 'b':
|
61 |
+
return f'**{content}**'
|
62 |
+
if el.name == 'em' or el.name == 'i':
|
63 |
+
return f'*{content}*'
|
64 |
+
if el.name == 'code':
|
65 |
+
return f'`{content}`'
|
66 |
+
if el.name == 'a':
|
67 |
+
href = el.get('href', '')
|
68 |
+
return f'[{content}]({href})'
|
69 |
+
|
70 |
+
return content
|
71 |
+
|
72 |
|
73 |
@dataclass
|
74 |
class StreamConfig:
|
75 |
"""Configuration for streaming behavior"""
|
76 |
+
timeout: float = 300.0
|
77 |
retry_on_error: bool = True
|
78 |
max_retries: int = 3
|
79 |
+
poll_interval: float = 0.05
|
80 |
+
response_timeout: float = 900
|
81 |
+
stabilization_timeout: float = 1.0
|
82 |
+
max_inactivity: float = 10.0
|
83 |
+
stream_raw_html: bool = False
|
84 |
+
convert_html_to_markdown: bool = True
|
85 |
|
86 |
class StreamingResponseGenerator:
|
87 |
+
"""Generates Server-Sent Events (SSE) for streaming chat completions."""
|
|
|
|
|
|
|
|
|
88 |
def __init__(self, config: Optional[StreamConfig] = None):
|
89 |
self.config = config or StreamConfig()
|
90 |
|
|
|
93 |
completion_id: str,
|
94 |
created: int,
|
95 |
model: str,
|
96 |
+
prompt: str,
|
97 |
+
send_message_func: Callable[..., AsyncGenerator[str, None]]
|
98 |
) -> AsyncGenerator[str, None]:
|
|
|
|
|
|
|
|
|
99 |
logger.info(f"[{completion_id}] Starting streaming response generation for model '{model}'.")
|
|
|
100 |
first_chunk_sent = False
|
101 |
accumulated_content_for_logging = ""
|
|
|
102 |
try:
|
|
|
|
|
103 |
async for content_delta in send_message_func(prompt, model):
|
104 |
+
if not content_delta: continue
|
|
|
|
|
105 |
accumulated_content_for_logging += content_delta
|
|
|
|
|
106 |
delta_payload = {"content": content_delta}
|
107 |
if not first_chunk_sent:
|
|
|
108 |
delta_payload["role"] = "assistant"
|
109 |
first_chunk_sent = True
|
|
|
110 |
chunk_data = {
|
111 |
+
"id": completion_id, "object": "chat.completion.chunk", "created": created, "model": model,
|
112 |
+
"choices": [{"index": 0, "delta": delta_payload, "finish_reason": None}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
}
|
114 |
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
|
|
|
|
|
115 |
|
116 |
+
if not first_chunk_sent:
|
117 |
+
logger.warning(f"[{completion_id}] Stream ended without content. Sending empty assistant chunk.")
|
118 |
+
empty_chunk = {
|
|
|
|
|
|
|
119 |
"id": completion_id, "object": "chat.completion.chunk", "created": created, "model": model,
|
120 |
"choices": [{"index": 0, "delta": {"role": "assistant", "content": ""}, "finish_reason": None}]
|
121 |
}
|
122 |
+
yield f"data: {json.dumps(empty_chunk)}\n\n"
|
|
|
123 |
|
|
|
124 |
final_chunk_data = {
|
125 |
+
"id": completion_id, "object": "chat.completion.chunk", "created": created, "model": model,
|
126 |
+
"choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
}
|
|
|
128 |
logger.info(f"[{completion_id}] Yielding final chunk. Total content length: {len(accumulated_content_for_logging)} chars.")
|
129 |
+
yield f"data: {json.dumps(final_chunk_data)}\n\n"
|
|
|
130 |
logger.info(f"[{completion_id}] Yielding [DONE] signal.")
|
131 |
yield "data: [DONE]\n\n"
|
|
|
132 |
except Exception as e:
|
133 |
logger.error(f"[{completion_id}] Error during streaming response generation: {e}", exc_info=True)
|
134 |
error_payload = {"content": f"\n\nError in stream: {str(e)}"}
|
135 |
+
if not first_chunk_sent: error_payload["role"] = "assistant"
|
136 |
+
error_chunk = {
|
|
|
|
|
137 |
"id": completion_id, "object": "chat.completion.chunk", "created": created, "model": model,
|
138 |
"choices": [{"index": 0, "delta": error_payload, "finish_reason": "error" }]
|
139 |
}
|
140 |
+
yield f"data: {json.dumps(error_chunk)}\n\n"
|
141 |
+
yield "data: [DONE]\n\n"
|
142 |
+
|
143 |
+
# --- Main Processor with Corrected Logic ---
|
144 |
|
145 |
class StreamProcessor:
|
146 |
+
"""Processes streaming content from Selenium, with stabilization and content conversion."""
|
|
|
147 |
def __init__(self, config: Optional[StreamConfig] = None):
|
148 |
+
cfg = config or StreamConfig()
|
149 |
+
self.poll_interval = cfg.poll_interval
|
150 |
+
self.response_timeout = cfg.response_timeout
|
151 |
+
self.stabilization_timeout = cfg.stabilization_timeout
|
152 |
+
self.max_inactivity = cfg.max_inactivity
|
153 |
+
self.request_id_for_log = "StreamProc"
|
154 |
+
self.markdown_converter = HtmlToMarkdownConverter()
|
155 |
|
156 |
def set_request_id(self, request_id: str):
|
157 |
self.request_id_for_log = request_id
|
158 |
|
159 |
+
def get_processed_text_stream(self, driver, element_locator: Tuple[str, str]) -> Iterator[str]:
|
160 |
+
html_stream = self._poll_element_content_stream(driver, element_locator)
|
161 |
+
return self._convert_html_stream_to_markdown_deltas(html_stream)
|
162 |
+
|
163 |
+
def _convert_html_stream_to_markdown_deltas(self, html_iterator: Iterator[str]) -> Iterator[str]:
|
164 |
"""
|
165 |
+
Statefully converts a stream of HTML snapshots to Markdown deltas.
|
166 |
+
It identifies "finalized" vs "active" blocks to avoid streaming
|
167 |
+
volatile, incomplete code blocks, preventing corruption.
|
168 |
"""
|
169 |
+
last_yielded_markdown = ""
|
170 |
+
last_html_snapshot = ""
|
171 |
|
172 |
+
for full_html_snapshot in html_iterator:
|
173 |
+
last_html_snapshot = full_html_snapshot
|
174 |
+
soup = BeautifulSoup(f"<ol>{full_html_snapshot}</ol>", 'lxml')
|
175 |
+
|
176 |
+
# Find all content blocks in the last message bubble.
|
177 |
+
# The structure is <ol> -> <div> (bubble) -> ... -> <div class="prose"> -> <p/pre/ul>
|
178 |
+
all_prose_divs = soup.select('div.prose')
|
179 |
+
if not all_prose_divs:
|
180 |
+
continue
|
181 |
|
182 |
+
content_elements = [div.contents[0] for div in all_prose_divs if div.contents]
|
|
|
|
|
|
|
183 |
|
184 |
+
finalized_elements = content_elements
|
185 |
+
# Check if the very last element is an incomplete code block.
|
186 |
+
# If so, we don't process it in this pass, we wait for it to be finalized.
|
187 |
+
if content_elements and content_elements[-1].name == 'pre':
|
188 |
+
finalized_elements = content_elements[:-1]
|
189 |
+
|
190 |
+
# Convert all finalized elements to markdown
|
191 |
+
md_pieces = [self.markdown_converter.convert_element(el) for el in finalized_elements]
|
192 |
+
current_safe_markdown = "\n\n".join(md_pieces)
|
193 |
+
|
194 |
+
# Yield the delta if it's a simple append
|
195 |
+
if current_safe_markdown != last_yielded_markdown:
|
196 |
+
if current_safe_markdown.startswith(last_yielded_markdown):
|
197 |
+
delta = current_safe_markdown[len(last_yielded_markdown):]
|
198 |
+
if delta:
|
199 |
+
yield delta.lstrip('\n')
|
200 |
+
last_yielded_markdown = current_safe_markdown
|
201 |
+
|
202 |
+
# After the loop, the stream has finished. Process the very last snapshot in full.
|
203 |
+
final_soup = BeautifulSoup(f"<ol>{last_html_snapshot}</ol>", 'lxml')
|
204 |
+
all_prose_divs = final_soup.select('div.prose')
|
205 |
+
content_elements = [div.contents[0] for div in all_prose_divs if div.contents]
|
206 |
+
final_md_pieces = [self.markdown_converter.convert_element(el) for el in content_elements]
|
207 |
+
final_markdown = "\n\n".join(final_md_pieces)
|
208 |
|
209 |
+
if final_markdown.startswith(last_yielded_markdown):
|
210 |
+
final_delta = final_markdown[len(last_yielded_markdown):]
|
211 |
+
if final_delta:
|
212 |
+
yield final_delta.lstrip('\n')
|
213 |
|
214 |
+
|
215 |
+
def _poll_element_content_stream(self, driver, element_locator: Tuple[str, str]) -> Iterator[str]:
|
|
|
|
|
|
|
216 |
log_prefix = f"[{self.request_id_for_log}/PollStream]"
|
217 |
+
start_time = time.time()
|
218 |
+
last_content = ""
|
|
|
219 |
last_change_time = time.time()
|
220 |
|
221 |
+
while time.time() - start_time < self.response_timeout:
|
|
|
|
|
222 |
loop_start_time = time.time()
|
223 |
try:
|
224 |
+
element = WebDriverWait(driver, self.poll_interval * 2).until(EC.presence_of_element_located(element_locator))
|
225 |
+
current_content = element.get_attribute('innerHTML')
|
226 |
+
if current_content != last_content:
|
227 |
+
yield current_content
|
228 |
+
last_content = current_content
|
229 |
+
last_change_time = time.time()
|
230 |
+
elif time.time() - last_change_time > self.max_inactivity:
|
231 |
+
logger.info(f"{log_prefix} Content stable for {self.max_inactivity:.2f}s. Ending poll.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
232 |
return
|
233 |
+
except (TimeoutException, StaleElementReferenceException, NoSuchElementException):
|
234 |
+
logger.debug(f"{log_prefix} Element not present or stale. Continuing poll.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
except Exception as e:
|
236 |
+
logger.error(f"{log_prefix} Unexpected error polling: {e}", exc_info=True)
|
237 |
return
|
238 |
+
|
239 |
+
sleep_duration = self.poll_interval - (time.time() - loop_start_time)
|
240 |
+
if sleep_duration > 0: time.sleep(sleep_duration)
|
241 |
+
logger.warning(f"{log_prefix} Polling finished due to max wait ({self.response_timeout:.2f}s).")
|
242 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
243 |
async def create_streaming_response(
|
244 |
completion_id: str,
|
245 |
created: int,
|
246 |
model: str,
|
247 |
prompt: str,
|
248 |
+
send_message_func: Callable[..., AsyncGenerator[str, None]],
|
249 |
+
stream_config: Optional[StreamConfig] = None
|
250 |
) -> AsyncGenerator[str, None]:
|
251 |
generator = StreamingResponseGenerator(config=stream_config)
|
252 |
async for chunk in generator.create_response(completion_id, created, model, prompt, send_message_func):
|
253 |
yield chunk
|
254 |
|
255 |
+
__all__ = ['create_streaming_response', 'StreamingResponseGenerator', 'StreamConfig', 'StreamProcessor', 'HtmlToMarkdownConverter']
|
test_api.py
CHANGED
@@ -8,31 +8,10 @@ import random
|
|
8 |
# Configure the OpenAI client to point to the local server.
|
9 |
# The API key is required by the library but not used by the local server.
|
10 |
client = OpenAI(
|
11 |
-
base_url="
|
12 |
api_key=os.environ.get("OPENAI_API_KEY", "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
|
13 |
)
|
14 |
|
15 |
-
@pytest.mark.asyncio
|
16 |
-
async def test_list_models_openai():
|
17 |
-
"""Test the /models endpoint to get a list of available models."""
|
18 |
-
print("\nTesting /models endpoint...")
|
19 |
-
try:
|
20 |
-
start_time = time.time()
|
21 |
-
models_response = client.models.list()
|
22 |
-
end_time = time.time()
|
23 |
-
|
24 |
-
print(f"Received response from /models in {end_time - start_time:.2f}s")
|
25 |
-
assert models_response is not None
|
26 |
-
assert len(models_response.data) > 0, "No models were returned from the /models endpoint."
|
27 |
-
|
28 |
-
print(f"Found {len(models_response.data)} models:")
|
29 |
-
for model in models_response.data:
|
30 |
-
print(f" - Model ID: {model.id}")
|
31 |
-
assert isinstance(model.id, str)
|
32 |
-
assert model.object == "model"
|
33 |
-
|
34 |
-
except Exception as e:
|
35 |
-
pytest.fail(f"/models endpoint test failed using openai lib: {e}")
|
36 |
|
37 |
@pytest.mark.asyncio
|
38 |
async def test_chat_completion_streaming_openai():
|
@@ -40,29 +19,16 @@ async def test_chat_completion_streaming_openai():
|
|
40 |
Test the /chat/completions endpoint for streaming requests.
|
41 |
This test first fetches an available model from the /models endpoint.
|
42 |
"""
|
|
|
43 |
|
44 |
-
|
45 |
-
print("\nFetching available models before chat completion test...")
|
46 |
-
try:
|
47 |
-
models_response = client.models.list()
|
48 |
-
assert len(models_response.data) > 0, "Cannot run chat completion test: no models available."
|
49 |
-
# Use the first model from the list for the test.
|
50 |
-
model_to_test = models_response.data[random.randint(0, len(models_response.data) - 1)].id
|
51 |
-
print(f"Will use model '{model_to_test}' for the test.")
|
52 |
-
except Exception as e:
|
53 |
-
pytest.fail(f"Failed to fetch models before running chat completion test: {e}")
|
54 |
-
|
55 |
-
# Step 2: Use the fetched model to run the chat completion test.
|
56 |
-
messages = [{"role": "user", "content": "squirrel pet simulator in python with emojis"}]
|
57 |
-
|
58 |
-
print(f"\nTesting streaming chat completion with model '{model_to_test}'...")
|
59 |
accumulated_content = ""
|
60 |
deltas_received = 0
|
61 |
start_time = time.time()
|
62 |
|
63 |
try:
|
64 |
stream = client.chat.completions.create(
|
65 |
-
model=
|
66 |
messages=messages,
|
67 |
stream=True
|
68 |
)
|
@@ -100,7 +66,6 @@ async def test_chat_completion_streaming_openai():
|
|
100 |
|
101 |
assert deltas_received > 0, "No content deltas were received."
|
102 |
assert len(accumulated_content) > 0, "Accumulated content is empty."
|
103 |
-
|
104 |
-
|
105 |
except Exception as e:
|
106 |
pytest.fail(f"Streaming chat completion failed: {e}")
|
|
|
8 |
# Configure the OpenAI client to point to the local server.
|
9 |
# The API key is required by the library but not used by the local server.
|
10 |
client = OpenAI(
|
11 |
+
base_url="http://localhost:8000/",
|
12 |
api_key=os.environ.get("OPENAI_API_KEY", "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
|
13 |
)
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
@pytest.mark.asyncio
|
17 |
async def test_chat_completion_streaming_openai():
|
|
|
19 |
Test the /chat/completions endpoint for streaming requests.
|
20 |
This test first fetches an available model from the /models endpoint.
|
21 |
"""
|
22 |
+
messages = [{"role": "user", "content": "bionomial thoerem calc in python concise code but explain before the code block"}]
|
23 |
|
24 |
+
print(f"\nTesting streaming chat completion with model 'Gemini 2.0 Flash'...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
accumulated_content = ""
|
26 |
deltas_received = 0
|
27 |
start_time = time.time()
|
28 |
|
29 |
try:
|
30 |
stream = client.chat.completions.create(
|
31 |
+
model="Gemini 2.0 Flash",
|
32 |
messages=messages,
|
33 |
stream=True
|
34 |
)
|
|
|
66 |
|
67 |
assert deltas_received > 0, "No content deltas were received."
|
68 |
assert len(accumulated_content) > 0, "Accumulated content is empty."
|
69 |
+
|
|
|
70 |
except Exception as e:
|
71 |
pytest.fail(f"Streaming chat completion failed: {e}")
|