diff --git "a/app.py" "b/app.py" --- "a/app.py" +++ "b/app.py" @@ -77,12 +77,12 @@ class OTCodeEditor: def __init__(self, initial_value: Dict[str, str]): self.files: Dict[str, str] = initial_value.copy() self.revision = 0 # Basic revision counter, not used for OT logic - logger.debug(f"OTCodeEditor initialized with files: {{list(self.files.keys())}}") + logger.debug(f"OTCodeEditor initialized with files: {list(self.files.keys())}") def apply_delta(self, delta: Dict[str, Any]): # VERY basic placeholder: This logs the delta but does NOT perform OT. # It does NOT handle concurrent edits safely. - logger.warning(f"Placeholder apply_delta called. Delta: {{str(delta)[:200]}}. " + logger.warning(f"Placeholder apply_delta called. Delta: {str(delta)[:200]}. " "WARNING: Full Operational Transformation is NOT implemented. Concurrent edits are UNSAFE.") # Increment revision regardless for basic tracking self.revision += 1 @@ -108,7 +108,7 @@ try: block_shadow="*shadow_drop_lg", ) except AttributeError as e: - logger.warning(f"Could not apply all theme settings (might be Gradio version difference): {{e}}. Using default Soft theme.") + logger.warning(f"Could not apply all theme settings (might be Gradio version difference): {e}. Using default Soft theme.") theme = gr.themes.Soft() @@ -123,25 +123,25 @@ class WebhookHandler(BaseHTTPRequestHandler): payload_bytes = self.rfile.read(content_length) payload = json.loads(payload_bytes.decode('utf-8')) except json.JSONDecodeError: - logger.error(f"Invalid JSON payload received: {payload_bytes[:500]}}}") + logger.error(f"Invalid JSON payload received: {payload_bytes[:500]}}") self.send_response(400) self.send_header("Content-type", "text/plain") self.end_headers() self.wfile.write(b"Invalid JSON payload") return except Exception as e: - logger.error(f"Error reading webhook payload: {e}}}") + logger.error(f"Error reading webhook payload: {e}}") self.send_response(500) self.end_headers() return event = self.headers.get('X-GitHub-Event') delivery_id = self.headers.get('X-GitHub-Delivery') - logger.info(f"Received GitHub webhook event: {event}}} (Delivery ID: {delivery_id}}})") + logger.info(f"Received GitHub webhook event: {event}} (Delivery ID: {delivery_id}})") if event == 'issues' and WebhookHandler.manager_instance and WebhookHandler.main_loop: action = payload.get('action') - logger.info(f"Issue action: {{action}}") + logger.info(f"Issue action: {action}") # Handle common actions that affect issue state or content if action in ['opened', 'reopened', 'closed', 'assigned', 'unassigned', 'edited', 'labeled', 'unlabeled', 'milestoned', 'demilestoned']: if WebhookHandler.main_loop.is_running(): @@ -158,7 +158,7 @@ class WebhookHandler(BaseHTTPRequestHandler): elif event == 'ping': logger.info("Received GitHub webhook ping.") else: - logger.warning(f"Unhandled event type: {event}}} or manager/loop not initialized.") + logger.warning(f"Unhandled event type: {event}} or manager/loop not initialized.") self.send_response(200) self.send_header("Content-type", "text/plain") @@ -253,15 +253,15 @@ class IssueManager: return "Error: Issue data for this suggestion request (hash) not found in current state. The issue might have been updated or closed. Please re-select the issue." if model_key not in HF_MODELS: - logger.error(f"Invalid model key '{model_key}' requested for suggestion.") - return f"Error: Invalid model key: {model_key}}" + return {"error": f"Error: Invalid model key: {model_key}"} # Removed the extra } - logger.info(f"Cache miss or first request for issue hash {issue_hash}}. Requesting suggestion from {model_key}}.") + + logger.info(f"Cache miss or first request for issue hash {issue_hash}. Requesting suggestion from {model_key}.") return await self.suggest_resolution(found_issue, model_key) async def handle_webhook_event(self, event: str, action: str, payload: dict): """Processes incoming webhook events to update the issue state.""" - logger.info(f"Processing webhook event: {event}}, action: {action}}") + logger.info(f"Processing webhook event: {event}, action: {action}") issue_data = payload.get('issue') repo_data = payload.get('repository') @@ -271,7 +271,7 @@ class IssueManager: event_repo_url = repo_data.get('html_url') if event_repo_url != self.repo_url: - logger.info(f"Ignoring webhook event for different repository: {event_repo_url}}") + logger.info(f"Ignoring webhook event for different repository: {event_repo_url}") return issue_number = issue_data.get('number') @@ -283,7 +283,7 @@ class IssueManager: significant_change = False # Flag for changes affecting clustering/content/AI caches if action == 'closed': - logger.info(f"Webhook: Removing closed issue {issue_number}} from active list.") + logger.info(f"Webhook: Removing closed issue {issue_number} from active list.") if self.issues.pop(issue_number, None): needs_ui_update = True significant_change = True @@ -298,7 +298,7 @@ class IssueManager: self.code_editors.pop(issue_number, None) elif action in ['opened', 'reopened', 'edited', 'assigned', 'unassigned', 'labeled', 'unlabeled', 'milestoned', 'demilestoned']: - logger.info(f"Webhook: Adding/Updating issue {issue_number}} (action: {action}}).") + logger.info(f"Webhook: Adding/Updating issue {issue_number} (action: {action}).") processed_data = self._process_issue_data(issue_data) old_issue = self.issues.get(issue_number) @@ -307,7 +307,7 @@ class IssueManager: old_issue.get('title') != processed_data.get('title') or \ set(old_issue.get('labels', [])) != set(processed_data.get('labels', [])): significant_change = True - logger.info(f"Significant change detected for issue {issue_number}} (content/labels).") + logger.info(f"Significant change detected for issue {issue_number} (content/labels).") # Invalidate ALL precomputed AI state on significant edit self.precomputed_context.pop(issue_number, None) self.precomputed_summaries.pop(issue_number, None) @@ -319,7 +319,7 @@ class IssueManager: old_issue.get('updated_at') != processed_data.get('updated_at') or \ old_issue.get('assignee') != processed_data.get('assignee') or \ set(old_issue.get('labels', [])) != set(processed_data.get('labels', [])): - logger.debug(f"State-related change detected for issue {issue_number}} (update time, assignee, labels). Idle loop will re-evaluate.") + logger.debug(f"State-related change detected for issue {issue_number} (update time, assignee, labels). Idle loop will re-evaluate.") self.issues[issue_number] = processed_data needs_ui_update = True @@ -339,10 +339,10 @@ class IssueManager: def _increment_change_counter(self): """Increments change counter and sets recluster flag if threshold reached.""" self._webhook_change_count += 1 - logger.debug(f"Significant change detected. Change count: {self._webhook_change_count}}/{self.recluster_threshold}}") + logger.debug(f"Significant change detected. Change count: {self._webhook_change_count}/{self.recluster_threshold}") if self._webhook_change_count >= self.recluster_threshold: self.needs_recluster = True - logger.info(f"Change threshold ({self.recluster_threshold}}) reached. Flagging for re-clustering.") + logger.info(f"Change threshold ({self.recluster_threshold}) reached. Flagging for re-clustering.") def _process_issue_data(self, issue_data: dict) -> dict: """Helper to structure issue data consistently.""" @@ -372,7 +372,7 @@ class IssueManager: plot_bgcolor='rgba(0,0,0,0)', paper_bgcolor='rgba(0,0,0,0)') return [], empty_fig, "Error: Repository URL and Hugging Face Token are required.", empty_fig - logger.info(f"Starting new issue crawl and setup for {repo_url}}") + logger.info(f"Starting new issue crawl and setup for {repo_url}") # --- Reset Manager State --- self.stop_idle_processing() @@ -402,16 +402,16 @@ class IssueManager: # --- Repository Cloning/Updating --- match = re.match(r"https?://github\.com/([^/]+)/([^/]+)", self.repo_url) if not match: - logger.error(f"Invalid GitHub URL format: {self.repo_url}}") + logger.error(f"Invalid GitHub URL format: {self.repo_url}") empty_fig = go.Figure() empty_fig.update_layout(title="Issue Severity Distribution", annotations=[{"text": "Invalid URL.", "xref": "paper", "yref": "paper", "showarrow": False}], plot_bgcolor='rgba(0,0,0,0)', paper_bgcolor='rgba(0,0,0,0)') return [], empty_fig, "Error: Invalid GitHub URL format. Use https://github.com/owner/repo", empty_fig self.repo_owner, self.repo_name = match.groups() - self.repo_local_path = WORKSPACE / f"{self.repo_owner}}_{self.repo_name}}" + self.repo_local_path = WORKSPACE / f"{self.repo_owner}_{self.repo_name}" try: if self.repo_local_path.exists(): - logger.info(f"Attempting to update existing repository clone at {self.repo_local_path}}") + logger.info(f"Attempting to update existing repository clone at {self.repo_local_path}") try: self.repo = Repo(self.repo_local_path) if self.repo.remotes: @@ -420,70 +420,70 @@ class IssueManager: if remote_url not in expected_urls: logger.warning(f"Existing repo path {self.repo_local_path} has different remote URL ('{remote_url}' vs '{self.repo_url}'). Re-cloning.") shutil.rmtree(self.repo_local_path) - self.repo = Repo.clone_from(self.repo_url, self.repo_local_path, progress=lambda op, cur, tot, msg: logger.debug(f"Clone progress: {msg}}")) + self.repo = Repo.clone_from(self.repo_url, self.repo_local_path, progress=lambda op, cur, tot, msg: logger.debug(f"Clone progress: {msg}")) else: logger.info("Pulling latest changes...") - self.repo.remotes.origin.fetch(progress=lambda op, cur, tot, msg: logger.debug(f"Fetch progress: {msg}}")) - self.repo.remotes.origin.pull(progress=lambda op, cur, tot, msg: logger.debug(f"Pull progress: {msg}}")) + self.repo.remotes.origin.fetch(progress=lambda op, cur, tot, msg: logger.debug(f"Fetch progress: {msg}")) + self.repo.remotes.origin.pull(progress=lambda op, cur, tot, msg: logger.debug(f"Pull progress: {msg}")) if self.repo.git.rev_parse('--is-shallow-repository').strip() == 'true': logger.info("Repository is shallow, unshallowing...") self.repo.git.fetch('--unshallow') else: - logger.warning(f"Existing repo at {self.repo_local_path}} has no remotes defined. Re-cloning.") + logger.warning(f"Existing repo at {self.repo_local_path} has no remotes defined. Re-cloning.") shutil.rmtree(self.repo_local_path) - self.repo = Repo.clone_from(self.repo_url, self.repo_local_path, progress=lambda op, cur, tot, msg: logger.debug(f"Clone progress: {msg}}")) + self.repo = Repo.clone_from(self.repo_url, self.repo_local_path, progress=lambda op, cur, tot, msg: logger.debug(f"Clone progress: {msg}")) except (InvalidGitRepositoryError, NoSuchPathError): - logger.warning(f"Invalid or missing Git repository at {self.repo_local_path}}. Re-cloning.") + logger.warning(f"Invalid or missing Git repository at {self.repo_local_path}. Re-cloning.") if self.repo_local_path.exists(): shutil.rmtree(self.repo_local_path) - self.repo = Repo.clone_from(self.repo_url, self.repo_local_path, progress=lambda op, cur, tot, msg: logger.debug(f"Clone progress: {msg}}")) + self.repo = Repo.clone_from(self.repo_url, self.repo_local_path, progress=lambda op, cur, tot, msg: logger.debug(f"Clone progress: {msg}")) except GitCommandError as git_err: - logger.error(f"Git pull/update error: {git_err}}. Trying to proceed with existing copy, but it might be stale.") + logger.error(f"Git pull/update error: {git_err}. Trying to proceed with existing copy, but it might be stale.") if not self.repo: try: self.repo = Repo(self.repo_local_path) except Exception: logger.error("Failed to even load existing repo after pull error.") else: - logger.info(f"Cloning repository {self.repo_url}} to {self.repo_local_path}}") - self.repo = Repo.clone_from(self.repo_url, self.repo_local_path, progress=lambda op, cur, tot, msg: logger.debug(f"Clone progress: {msg}}")) + logger.info(f"Cloning repository {self.repo_url} to {self.repo_local_path}") + self.repo = Repo.clone_from(self.repo_url, self.repo_local_path, progress=lambda op, cur, tot, msg: logger.debug(f"Clone progress: {msg}")) logger.info("Repository clone/update process finished.") if not self.repo: raise Exception("Repository object could not be initialized.") except GitCommandError as e: - logger.error(f"Failed to clone/update repository: {e}}") + logger.error(f"Failed to clone/update repository: {e}") empty_fig = go.Figure() empty_fig.update_layout(title="Issue Severity Distribution", annotations=[{"text": "Repo Error.", "xref": "paper", "yref": "paper", "showarrow": False}], plot_bgcolor='rgba(0,0,0,0)', paper_bgcolor='rgba(0,0,0,0)') - return [], empty_fig, f"Error cloning/updating repository: {e}}. Check URL, permissions, and network.", empty_fig + return [], empty_fig, f"Error cloning/updating repository: {e}. Check URL, permissions, and network.", empty_fig except Exception as e: - logger.exception(f"An unexpected error occurred during repository handling: {e}}") + logger.exception(f"An unexpected error occurred during repository handling: {e}") empty_fig = go.Figure() empty_fig.update_layout(title="Issue Severity Distribution", annotations=[{"text": "Repo Error.", "xref": "paper", "yref": "paper", "showarrow": False}], plot_bgcolor='rgba(0,0,0,0)', paper_bgcolor='rgba(0,0,0,0)') - return [], empty_fig, f"An unexpected error occurred during repo setup: {e}}", empty_fig + return [], empty_fig, f"An unexpected error occurred during repo setup: {e}", empty_fig # --- Issue Fetching --- - api_url = f"{GITHUB_API}}/{self.repo_owner}}/{self.repo_name}}/issues?state=open&per_page=100" + api_url = f"{GITHUB_API}/{self.repo_owner}/{self.repo_name}/issues?state=open&per_page=100" headers = {"Accept": "application/vnd.github.v3+json"} if github_token: - headers["Authorization"] = f"token {github_token}}" + headers["Authorization"] = f"token {github_token}" try: all_issues_data = [] page = 1 - logger.info(f"Fetching open issues from GitHub API (repo: {self.repo_owner}}/{self.repo_name}})...") + logger.info(f"Fetching open issues from GitHub API (repo: {self.repo_owner}/{self.repo_name})...") async with aiohttp.ClientSession(headers=headers) as session: while True: - paginated_url = f"{api_url}}&page={page}}" - logger.debug(f"Fetching URL: {paginated_url}}") + paginated_url = f"{api_url}&page={page}" + logger.debug(f"Fetching URL: {paginated_url}") async with session.get(paginated_url) as response: rate_limit_remaining = response.headers.get('X-RateLimit-Remaining') - logger.debug(f"GitHub API Response Status: {response.status}}, RateLimit Remaining: {rate_limit_remaining}}") + logger.debug(f"GitHub API Response Status: {response.status}, RateLimit Remaining: {rate_limit_remaining}") response.raise_for_status() issues_page_data = await response.json() if not issues_page_data: break - logger.info(f"Fetched page {page}} with {len(issues_page_data)}} items.") + logger.info(f"Fetched page {page} with {len(issues_page_data)} items.") all_issues_data.extend(issues_page_data) link_header = response.headers.get('Link') @@ -493,14 +493,14 @@ class IssueManager: page += 1 await asyncio.sleep(0.1) - logger.info(f"Total items fetched (including potential PRs): {len(all_issues_data)}}") + logger.info(f"Total items fetched (including potential PRs): {len(all_issues_data)}") self.issues = { issue_data['number']: self._process_issue_data(issue_data) for issue_data in all_issues_data if 'pull_request' not in issue_data } - logger.info(f"Filtered out pull requests, {len(self.issues)}} actual open issues remaining.") + logger.info(f"Filtered out pull requests, {len(self.issues)} actual open issues remaining.") empty_fig = go.Figure() empty_fig.update_layout(title="Issue Severity Distribution", xaxis={"visible": False}, yaxis={"visible": False}, @@ -533,7 +533,7 @@ class IssueManager: if 0 <= index < len(self.issue_list_for_clustering): index_to_cluster_id[index] = cluster_id else: - logger.warning(f"Clustering returned invalid index {index}} for list of length {len(self.issue_list_for_clustering)}}") + logger.warning(f"Clustering returned invalid index {index} for list of length {len(self.issue_list_for_clustering)}") for i, issue in enumerate(self.issue_list_for_clustering): severity = self._determine_severity(issue['labels']) @@ -553,15 +553,15 @@ class IssueManager: self.start_broadcast_loop() self.start_idle_processing() - success_msg = f"Found {len(self.issues)}} open issues. Clustered into {len(self.issue_clusters)}} groups. Repo ready. Background analysis started." + success_msg = f"Found {len(self.issues)} open issues. Clustered into {len(self.issue_clusters)} groups. Repo ready. Background analysis started." logger.info(success_msg) # Return both plots return dataframe_data, stats_fig, success_msg, stats_fig # Mypy may complain about return type mismatch if not explicitly handled except aiohttp.ClientResponseError as e: logger.error(f"GitHub API request failed: Status={e.status}, Message='{e.message}', URL='{e.request_info.url}'") - error_msg = f"Error fetching issues: {e.status}} - {e.message}}. Check token/URL." - if e.status == 404: error_msg = f"Error: Repository not found at {self.repo_url}}." + error_msg = f"Error fetching issues: {e.status} - {e.message}. Check token/URL." + if e.status == 404: error_msg = f"Error: Repository not found at {self.repo_url}." elif e.status == 401: error_msg = "Error: Invalid GitHub token or insufficient permissions for this repository." elif e.status == 403: rate_limit_reset = e.headers.get('X-RateLimit-Reset') @@ -569,7 +569,7 @@ class IssueManager: if rate_limit_reset: try: reset_time_str = datetime.fromtimestamp(int(rate_limit_reset), timezone.utc).strftime('%Y-%m-%d %H:%M:%S %Z') except ValueError: pass - error_msg = f"Error: GitHub API rate limit likely exceeded or access forbidden (Remaining: {rate_limit_remaining}}). Reset time: {reset_time_str}}. Check token or wait." + error_msg = f"Error: GitHub API rate limit likely exceeded or access forbidden (Remaining: {rate_limit_remaining}). Reset time: {reset_time_str}. Check token or wait." self.stop_idle_processing() self.stop_broadcast_loop() empty_fig = go.Figure() @@ -578,10 +578,10 @@ class IssueManager: except Exception as e: self.stop_idle_processing() self.stop_broadcast_loop() - logger.exception(f"An unexpected error occurred during issue crawl: {e}}") + logger.exception(f"An unexpected error occurred during issue crawl: {e}") empty_fig = go.Figure() empty_fig.update_layout(title="Issue Severity Distribution", annotations=[{"text": "Unexpected Error.", "xref": "paper", "yref": "paper", "showarrow": False}], plot_bgcolor='rgba(0,0,0,0)', paper_bgcolor='rgba(0,0,0,0)') - return [], empty_fig, f"An unexpected error occurred: {e}}", empty_fig + return [], empty_fig, f"An unexpected error occurred: {e}", empty_fig def _determine_severity(self, labels: List[str]) -> str: """Determines issue severity based on labels using predefined rules.""" @@ -597,7 +597,7 @@ class IssueManager: if not filtered_counts: fig = go.Figure() fig.update_layout(title="Issue Severity Distribution", xaxis={"visible": False}, yaxis={"visible": False}, - annotations=[{"text": "No issues to display.", "xref": "paper", "yref": "paper", "showarrow": False, "font": {"size": 16}}], + annotations=[{"text": "No issues to display.", "xref": "paper", "yref": "paper", "showarrow": False, "font": {"size": 16}], plot_bgcolor='rgba(0,0,0,0)', paper_bgcolor='rgba(0,0,0,0)') return fig @@ -629,7 +629,7 @@ class IssueManager: return num_issues = len(self.issue_list_for_clustering) - logger.info(f"Generating embeddings for {num_issues}} issues for clustering...") + logger.info(f"Generating embeddings for {num_issues} issues for clustering...") try: texts_to_embed = [ f"Title: {i.get('title','')} Body: {i.get('body','')[:1500]}" @@ -642,7 +642,7 @@ class IssueManager: self.issue_clusters = {} return - logger.info(f"Generated {len(embeddings)}} embeddings. Running HDBSCAN clustering...") + logger.info(f"Generated {len(embeddings)} embeddings. Running HDBSCAN clustering...") clusterer = HDBSCAN(min_cluster_size=2, metric='cosine', allow_single_cluster=True, gen_min_span_tree=True) clusters = clusterer.fit_predict(embeddings) @@ -658,7 +658,7 @@ class IssueManager: new_issue_clusters[cluster_id_int].append(i) self.issue_clusters = new_issue_clusters - logger.info(f"Clustering complete. Found {len(self.issue_clusters)}} clusters (min size 2) with {noise_count}} noise points.") + logger.info(f"Clustering complete. Found {len(self.issue_clusters)} clusters (min size 2) with {noise_count} noise points.") # Reset the change counter and flag after successful clustering self._webhook_change_count = 0 @@ -666,7 +666,7 @@ class IssueManager: logger.debug("Reset webhook change counter and recluster flag after clustering.") except Exception as e: - logger.exception(f"Error during issue clustering: {e}}") + logger.exception(f"Error during issue clustering: {e}") self.issue_clusters = {} def _identify_potential_duplicates(self): @@ -681,11 +681,11 @@ class IssueManager: for i, issue in enumerate(self.issue_list_for_clustering): issue_id = issue.get('id') if issue_id is None: - logger.warning(f"Issue at index {i}} in clustering list is missing an ID.") + logger.warning(f"Issue at index {i} in clustering list is missing an ID.") continue index_to_id[i] = issue_id except Exception as e: - logger.error(f"Error creating index-to-ID map for duplicate check: {e}}. Issue list might be inconsistent.") + logger.error(f"Error creating index-to-ID map for duplicate check: {e}. Issue list might be inconsistent.") return for cluster_id, indices in self.issue_clusters.items(): @@ -695,7 +695,7 @@ class IssueManager: for issue_id in cluster_issue_ids: self.potential_duplicates[issue_id] = [other_id for other_id in cluster_issue_ids if other_id != issue_id] - logger.info(f"Identified potential duplicates for {len(self.potential_duplicates)}} issues based on clustering.") + logger.info(f"Identified potential duplicates for {len(self.potential_duplicates)} issues based on clustering.") async def _generate_embeddings(self, texts: List[str]): """Generates sentence embeddings using Hugging Face Inference API.""" @@ -707,54 +707,54 @@ class IssueManager: return [] model_id = "sentence-transformers/all-mpnet-base-v2" - api_url = f"{HF_INFERENCE_API}}/{model_id}}" - headers = {"Authorization": f"Bearer {self.hf_token}}"} + api_url = f"{HF_INFERENCE_API}/{model_id}" + headers = {"Authorization": f"Bearer {self.hf_token}"} timeout = aiohttp.ClientTimeout(total=180) - logger.info(f"Requesting embeddings from {api_url}} for {len(texts)}} texts.") + logger.info(f"Requesting embeddings from {api_url} for {len(texts)} texts.") async with aiohttp.ClientSession(headers=headers, timeout=timeout) as session: try: - payload = {"inputs": texts, "options": {"wait_for_model": True}} + payload = {"inputs": texts, "options": {"wait_for_model": True} async with session.post(api_url, json=payload) as response: rate_limit_remaining = response.headers.get('X-Ratelimit-Remaining') - logger.debug(f"HF Embedding API Response Status: {response.status}}, RateLimit Remaining: {rate_limit_remaining}}") + logger.debug(f"HF Embedding API Response Status: {response.status}, RateLimit Remaining: {rate_limit_remaining}") response.raise_for_status() result = await response.json() if isinstance(result, list) and all(isinstance(emb, list) and all(isinstance(f, float) for f in emb) for emb in result): if len(result) == len(texts): - logger.info(f"Successfully received {len(result)}} embeddings of expected dimension.") + logger.info(f"Successfully received {len(result)} embeddings of expected dimension.") return result else: - logger.error(f"HF Embedding API returned wrong number of embeddings: Got {len(result)}}, expected {len(texts)}}.") + logger.error(f"HF Embedding API returned wrong number of embeddings: Got {len(result)}, expected {len(texts)}.") return None elif isinstance(result, dict) and 'error' in result: error_msg = result['error'] estimated_time = result.get('estimated_time') - logger.error(f"HF Inference API embedding error: {error_msg}}" + (f" (Estimated time: {estimated_time}}s)" if estimated_time else "")) + logger.error(f"HF Inference API embedding error: {error_msg}" + (f" (Estimated time: {estimated_time}s)" if estimated_time else "")) return None else: - logger.error(f"Unexpected embedding format received: Type={type(result)}}. Response: {str(result)[:500]}}") + logger.error(f"Unexpected embedding format received: Type={type(result)}. Response: {str(result)[:500]}") return None except aiohttp.ClientResponseError as e: error_body = await e.text() logger.error(f"HF Inference API embedding request failed: Status={e.status}, Message='{e.message}'. Body: {error_body[:500]}") return None except asyncio.TimeoutError: - logger.error(f"HF Inference API embedding request timed out after {timeout.total}} seconds.") + logger.error(f"HF Inference API embedding request timed out after {timeout.total} seconds.") return None except Exception as e: - logger.exception(f"Unexpected error during embedding generation: {e}}") + logger.exception(f"Unexpected error during embedding generation: {e}") return None async def generate_code_patch(self, issue_number: int, model_key: str) -> dict: """Generates a code patch suggestion using a selected AI model.""" if issue_number not in self.issues: - return {"error": f"Issue {issue_number}} not found."} + return {"error": f"Issue {issue_number} not found."} if not self.hf_token: return {"error": "Hugging Face token not set."} if model_key not in HF_MODELS: - return {"error": f"Invalid model key: {model_key}}"} + return {"error": f"Invalid model key: {model_key}"} if not self.repo_local_path or not self.repo: return {"error": "Repository not cloned/available locally. Please scan the repository first."} @@ -771,17 +771,17 @@ class IssueManager: timestamp_str = datetime.fromtimestamp(context_data.get('timestamp', 0)).strftime('%Y-%m-%d %H:%M:%S') if context_data.get("error"): context_str = f"Pre-computed context retrieval failed: {context_data['error']}" - context_source = f"Pre-computed (Failed @ {timestamp_str}})" + context_source = f"Pre-computed (Failed @ {timestamp_str})" elif context_data.get("content"): context_str = context_data["content"] num_files = len(context_data.get('files',[])) - context_source = f"Pre-computed ({num_files}} files @ {timestamp_str}})" + context_source = f"Pre-computed ({num_files} files @ {timestamp_str})" else: context_str = "Pre-computed context was empty or unavailable." - context_source = f"Pre-computed (Empty @ {timestamp_str}})" - logger.info(f"Using pre-computed context for issue {issue_number}} (Source: {context_source}})") + context_source = f"Pre-computed (Empty @ {timestamp_str})" + logger.info(f"Using pre-computed context for issue {issue_number} (Source: {context_source})") else: - logger.info(f"No pre-computed context found for issue {issue_number}}, computing now.") + logger.info(f"No pre-computed context found for issue {issue_number}, computing now.") context_source = "Computed On-Demand" context_result = await self._get_code_context(issue) if "error" in context_result and context_result["error"]: @@ -797,7 +797,7 @@ class IssueManager: "timestamp": time.time() } context_duration = time.time() - start_time_context - logger.info(f"Computed context on-demand in {context_duration:.2f}}s. Source: {context_source}}") + logger.info(f"Computed context on-demand in {context_duration:.2f}s. Source: {context_source}") # --- Get Pre-computed Info --- summary_text = self._get_precomputed_text(issue_number, self.precomputed_summaries, "summary", "Summary") @@ -829,8 +829,8 @@ class IssueManager: 5. **Constraints:** If context is insufficient, state "Insufficient context to generate patch." and explain why. **Do not generate a diff block.** Do not invent code/paths # # Patch Suggestion: """ # --- Call Inference API --- - api_url = f"{HF_INFERENCE_API}}/{model_id}}" - headers = {"Authorization": f"Bearer {self.hf_token}}"} + api_url = f"{HF_INFERENCE_API}/{model_id}" + headers = {"Authorization": f"Bearer {self.hf_token}"} payload = { "inputs": prompt, "parameters": {"max_new_tokens": 2048, "temperature": 0.1, "return_full_text": False, "do_sample": False}, @@ -844,13 +844,13 @@ class IssueManager: async with session.post(api_url, json=payload) as response: api_duration = time.time() - start_time_api rate_limit_remaining = response.headers.get('X-Ratelimit-Remaining') - logger.debug(f"HF Patch API Response Status: {response.status}}, Duration: {api_duration:.2f}}s, RateLimit Remaining: {rate_limit_remaining}}") + logger.debug(f"HF Patch API Response Status: {response.status}, Duration: {api_duration:.2f}s, RateLimit Remaining: {rate_limit_remaining}") response.raise_for_status() result = await response.json() if result and isinstance(result, list) and 'generated_text' in result[0]: generated_text = result[0].get('generated_text', '').strip() - logger.info(f"Received patch suggestion from {model_id}} ({len(generated_text)}} chars).") + logger.info(f"Received patch suggestion from {model_id} ({len(generated_text)} chars).") diff_match = re.search(r"```diff\n(.*?)```", generated_text, re.DOTALL | re.IGNORECASE) explanation = generated_text.split("```diff")[0].strip() if diff_match else generated_text @@ -859,34 +859,34 @@ class IssueManager: if diff_match: patch_content = diff_match.group(1).strip() if not re.search(r'^(--- |\+\+\+ |@@ )', patch_content, re.MULTILINE): - logger.warning(f"Generated patch for issue {issue_number}} might lack standard diff headers or spacing.") + logger.warning(f"Generated patch for issue {issue_number} might lack standard diff headers or spacing.") return {"explanation": explanation, "patch": patch_content, "model_used": model_id} else: if re.search(r"(insufficient context|cannot generate|unable to create patch|context required)", explanation, re.IGNORECASE): - logger.warning(f"AI indicated insufficient context for issue {issue_number}}.") + logger.warning(f"AI indicated insufficient context for issue {issue_number}.") return {"explanation": explanation, "patch": None, "model_used": model_id} else: - logger.warning(f"No diff block found in patch suggestion response for issue {issue_number}}.") - return {"explanation": f"(AI response did not contain a ```diff block. Full response below)\n---\n{generated_text}}", "patch": None, "model_used": model_id} + logger.warning(f"No diff block found in patch suggestion response for issue {issue_number}.") + return {"explanation": f"(AI response did not contain a ```diff block. Full response below)\n---\n{generated_text}", "patch": None, "model_used": model_id} elif isinstance(result, dict) and 'error' in result: error_msg = result['error'] estimated_time = result.get('estimated_time') - logger.error(f"HF Inference API patch error for issue {issue_number}}: {error_msg}}" + (f" (Est: {estimated_time}}s)" if estimated_time else "")) - return {"error": f"AI model error: {error_msg}}"} + logger.error(f"HF Inference API patch error for issue {issue_number}: {error_msg}" + (f" (Est: {estimated_time}s)" if estimated_time else "")) + return {"error": f"AI model error: {error_msg}"} else: - logger.error(f"Unexpected patch response format from {model_id}} for issue {issue_number}}: {str(result)[:500]}}") + logger.error(f"Unexpected patch response format from {model_id} for issue {issue_number}: {str(result)[:500]}") return {"error": "Unexpected response format from AI model."} except aiohttp.ClientResponseError as e: error_body = await e.response.text() logger.error(f"HF Inference API patch request failed for issue {issue_number}: Status={e.status}, Message='{e.message}'. Body: {error_body[:500]}") - return {"error": f"AI model request failed ({e.status}}). Check model/token/API status. {error_body[:100]}}"} + return {"error": f"AI model request failed ({e.status}). Check model/token/API status. {error_body[:100]}"} except asyncio.TimeoutError: - logger.error(f"HF Inference API patch request timed out ({timeout.total}}s) for issue {issue_number}}.") + logger.error(f"HF Inference API patch request timed out ({timeout.total}s) for issue {issue_number}.") return {"error": "AI model request timed out. The model might be overloaded or the request too complex."} except Exception as e: - logger.exception(f"Error generating code patch for issue {issue_number}}: {e}}") - return {"error": f"An unexpected error occurred during patch generation: {e}}"} + logger.exception(f"Error generating code patch for issue {issue_number}: {e}") + return {"error": f"An unexpected error occurred during patch generation: {e}"} async def _get_code_context(self, issue: dict) -> dict: """Retrieves relevant code context based on file paths mentioned in the issue.""" @@ -896,7 +896,7 @@ class IssueManager: issue_id = issue.get('id', 'N/A') issue_body = issue.get('body', '') or "" issue_title = issue.get('title', '') or "" - text_to_search = f"{issue_title}}\n{issue_body}}" + text_to_search = f"{issue_title}\n{issue_body}" if not text_to_search.strip(): return {"content": "No issue title or body provided to search for file paths.", "files": [], "error": None} @@ -920,7 +920,7 @@ class IssueManager: if not potential_files: return {"content": "No file paths matching common patterns found in the issue title or body.", "files": [], "error": None} - logger.info(f"Found {len(potential_files)}} potential file references in issue {issue_id}}: {potential_files}}") + logger.info(f"Found {len(potential_files)} potential file references in issue {issue_id}: {potential_files}") context_content = "" max_context_length = 6000 files_included = [] @@ -943,13 +943,13 @@ class IssueManager: context_content += content_snippet files_included.append(str(relative_path)) else: - logger.warning(f"Skipping file {relative_path}} for context in issue {issue_id}} due to total length limit ({max_context_length}} chars).") + logger.warning(f"Skipping file {relative_path} for context in issue {issue_id} due to total length limit ({max_context_length} chars).") files_skipped_length.append(str(relative_path)) except OSError as e: - logger.warning(f"Could not read file {full_path}} for issue {issue_id}}: {e}}") + logger.warning(f"Could not read file {full_path} for issue {issue_id}: {e}") files_read_error.append(str(relative_path)) except Exception as e: - logger.warning(f"Unexpected error reading file {full_path}} for issue {issue_id}}: {e}}") + logger.warning(f"Unexpected error reading file {full_path} for issue {issue_id}: {e}") files_read_error.append(str(relative_path)) else: logger.info(f"Potential path '{relative_path}' (from '{file_path_str}') not found or not a file in local repo for issue {issue_id}.") @@ -959,23 +959,23 @@ class IssueManager: error_status = None if files_included: final_content = context_content.strip() - logger.info(f"Included context from {len(files_included)}} files for issue {issue_id}}: {files_included}}") + logger.info(f"Included context from {len(files_included)} files for issue {issue_id}: {files_included}") else: final_content = "No content could be retrieved from the potential file paths found." - logger.warning(f"Context generation for issue {issue_id}} resulted in no included files.") + logger.warning(f"Context generation for issue {issue_id} resulted in no included files.") if potential_files: # If paths were found but none included error_status = "No readable or found files among potential paths." status_notes = [] if files_not_found: - status_notes.append(f"Files mentioned but not found: {files_not_found}}") - logger.info(f"Files mentioned but not found for issue {issue_id}}: {files_not_found}}") + status_notes.append(f"Files mentioned but not found: {files_not_found}") + logger.info(f"Files mentioned but not found for issue {issue_id}: {files_not_found}") if files_read_error: - status_notes.append(f"Files failed to read: {files_read_error}}") - logger.warning(f"Files mentioned but failed to read for issue {issue_id}}: {files_read_error}}") + status_notes.append(f"Files failed to read: {files_read_error}") + logger.warning(f"Files mentioned but failed to read for issue {issue_id}: {files_read_error}") if files_skipped_length: - status_notes.append(f"File content skipped due to length limit: {files_skipped_length}}") - logger.warning(f"File content skipped due to length limit for issue {issue_id}}: {files_skipped_length}}") + status_notes.append(f"File content skipped due to length limit: {files_skipped_length}") + logger.warning(f"File content skipped due to length limit for issue {issue_id}: {files_skipped_length}") if status_notes: final_content += "\n\n--- Context Notes ---\n" + "\n".join(status_notes) @@ -991,7 +991,7 @@ class IssueManager: if not self.hf_token: return "Error: Hugging Face token not set." if model_key not in HF_MODELS: - return f"Error: Invalid model key: {model_key}}" + return f"Error: Invalid model key: {model_key}" model_id = HF_MODELS[model_key] issue_id = issue.get('id','N/A') @@ -1031,8 +1031,8 @@ Based on *all* the information provided above, outline a potential plan: * [Mention branching, coding, testing, committing, PR.] **Important:** If critical information is still missing despite the AI analysis, emphasize that in step 1. Do not invent details. """ - api_url = f"{HF_INFERENCE_API}}/{model_id}}" - headers = {"Authorization": f"Bearer {self.hf_token}}"} + api_url = f"{HF_INFERENCE_API}/{model_id}" + headers = {"Authorization": f"Bearer {self.hf_token}"} payload = { "inputs": prompt, "parameters": {"max_new_tokens": 1024, "temperature": 0.6, "return_full_text": False, "do_sample": True, "top_p": 0.9}, @@ -1046,32 +1046,32 @@ Based on *all* the information provided above, outline a potential plan: async with session.post(api_url, json=payload) as response: api_duration = time.time() - start_time_api rate_limit_remaining = response.headers.get('X-Ratelimit-Remaining') - logger.debug(f"HF Suggestion API Response Status: {response.status}}, Duration: {api_duration:.2f}}s, RateLimit Remaining: {rate_limit_remaining}}") + logger.debug(f"HF Suggestion API Response Status: {response.status}, Duration: {api_duration:.2f}s, RateLimit Remaining: {rate_limit_remaining}") response.raise_for_status() result = await response.json() if result and isinstance(result, list) and 'generated_text' in result[0]: suggestion = result[0].get('generated_text', 'AI Error: No suggestion text generated.').strip() - logger.info(f"Received suggestion from {model_id}} for issue {issue_id}} ({len(suggestion)}} chars).") + logger.info(f"Received suggestion from {model_id} for issue {issue_id} ({len(suggestion)} chars).") return suggestion elif isinstance(result, dict) and 'error' in result: error_msg = result['error'] estimated_time = result.get('estimated_time') - logger.error(f"HF Inference API suggestion error for issue {issue_id}}: {error_msg}}" + (f" (Est: {estimated_time}}s)" if estimated_time else "")) - return f"Error: AI model returned an error: {error_msg}}" + logger.error(f"HF Inference API suggestion error for issue {issue_id}: {error_msg}" + (f" (Est: {estimated_time}s)" if estimated_time else "")) + return f"Error: AI model returned an error: {error_msg}" else: - logger.error(f"Unexpected suggestion response format from {model_id}} for issue {issue_id}}: {str(result)[:500]}}") + logger.error(f"Unexpected suggestion response format from {model_id} for issue {issue_id}: {str(result)[:500]}") return "Error: Received unexpected response format from AI model." except aiohttp.ClientResponseError as e: error_body = await e.response.text() logger.error(f"HF Inference API suggestion request failed for issue {issue_id}: Status={e.status}, Message='{e.message}'. Body: {error_body[:500]}") - return f"Error: AI model request failed ({e.status}}). Check model/token/API status. {error_body[:100]}}" + return f"Error: AI model request failed ({e.status}). Check model/token/API status. {error_body[:100]}" except asyncio.TimeoutError: - logger.error(f"HF Inference API suggestion request timed out ({timeout.total}}s) for issue {issue_id}}.") + logger.error(f"HF Inference API suggestion request timed out ({timeout.total}s) for issue {issue_id}.") return "Error: AI model request timed out. The model might be busy." except Exception as e: - logger.exception(f"Error suggesting resolution for issue {issue_id}}: {e}}") - return f"An unexpected error occurred during suggestion generation: {e}}" + logger.exception(f"Error suggesting resolution for issue {issue_id}: {e}") + return f"An unexpected error occurred during suggestion generation: {e}" def _get_precomputed_text(self, issue_id: int, data_dict: dict, key: str, name: str) -> str: """Safely retrieves precomputed text, handling errors and pending states.""" @@ -1086,11 +1086,11 @@ Based on *all* the information provided above, outline a potential plan: return entry[key] else: # No error, but key might be missing or None if is_recent: - return f"({name}} computation pending...)" + return f"({name} computation pending...)" else: - return f"({name}} not computed or result was empty)" + return f"({name} not computed or result was empty)" else: - return f"({name}} not computed yet)" + return f"({name} not computed yet)" def _get_duplicate_info_text(self, issue_id: int) -> str: """Formats duplicate info text.""" @@ -1141,7 +1141,7 @@ Based on *all* the information provided above, outline a potential plan: if disconnected_clients: unique_disconnected = list(set(disconnected_clients)) - logger.info(f"Removing {len(unique_disconnected)}} disconnected clients after broadcast.") + logger.info(f"Removing {len(unique_disconnected)} disconnected clients after broadcast.") for client in unique_disconnected: self.remove_ws_client(client) @@ -1149,7 +1149,7 @@ Based on *all* the information provided above, outline a potential plan: logger.info("Broadcast loop cancelled.") break except Exception as e: - logger.exception(f"Error in broadcast loop: {e}}") + logger.exception(f"Error in broadcast loop: {e}") await asyncio.sleep(10) async def handle_code_editor_update(self, issue_num: int, delta_str: str, sender_client_id: str): @@ -1158,19 +1158,19 @@ Based on *all* the information provided above, outline a potential plan: WARNING: Lacks Operational Transformation - concurrent edits are UNSAFE. """ if issue_num not in self.code_editors: - logger.warning(f"Received code update for non-existent editor instance for issue {issue_num}}. Ignoring.") + logger.warning(f"Received code update for non-existent editor instance for issue {issue_num}. Ignoring.") return if issue_num not in self.issues: - logger.warning(f"Received code update for non-existent issue {issue_num}} in manager. Ignoring.") + logger.warning(f"Received code update for non-existent issue {issue_num} in manager. Ignoring.") return - logger.warning(f"Handling code editor update for issue {issue_num}} from {sender_client_id}}. " + logger.warning(f"Handling code editor update for issue {issue_num} from {sender_client_id}. " "WARNING: NO OT IMPLEMENTED - Last write wins / potential conflicts.") try: delta_obj = json.loads(delta_str) self.code_editors[issue_num].apply_delta(delta_obj) - logger.info(f"Applied delta for issue {issue_num}} from client {sender_client_id}} (Placeholder OT Logic - Revision {self.code_editors[issue_num].revision}})") + logger.info(f"Applied delta for issue {issue_num} from client {sender_client_id} (Placeholder OT Logic - Revision {self.code_editors[issue_num].revision})") update_payload = json.dumps({ "type": "code_update", @@ -1201,27 +1201,27 @@ Based on *all* the information provided above, outline a potential plan: if tasks: - logger.debug(f"Broadcasting code update for issue {issue_num}} to {len(tasks)}} other clients.") + logger.debug(f"Broadcasting code update for issue {issue_num} to {len(tasks)} other clients.") results = await asyncio.gather(*tasks, return_exceptions=True) for i, result in enumerate(results): if isinstance(result, Exception): failed_client = active_clients_snapshot[i] failed_client_id = getattr(failed_client, 'client_id', 'Unknown') - logger.warning(f"Failed to broadcast code update to client {failed_client_id}}: {result}}") + logger.warning(f"Failed to broadcast code update to client {failed_client_id}: {result}") # Use standard socket exceptions for checks if isinstance(result, (ConnectionClosed, ConnectionClosedOK, ConnectionAbortedError, ConnectionResetError)): disconnected_clients.append(failed_client) if disconnected_clients: unique_disconnected = list(set(disconnected_clients)) - logger.info(f"Removing {len(unique_disconnected)}} clients after code update broadcast failure.") + logger.info(f"Removing {len(unique_disconnected)} clients after code update broadcast failure.") for client in unique_disconnected: if client: self.remove_ws_client(client) except json.JSONDecodeError: - logger.error(f"Received invalid JSON delta for issue {issue_num}} from {sender_client_id}}: {delta_str[:200]}}") + logger.error(f"Received invalid JSON delta for issue {issue_num} from {sender_client_id}: {delta_str[:200]}") except Exception as e: - logger.exception(f"Error handling code editor update for issue {issue_num}} from {sender_client_id}}: {e}}") + logger.exception(f"Error handling code editor update for issue {issue_num} from {sender_client_id}: {e}") async def broadcast_issue_update(self): """Notifies clients that the issue list/data has changed (e.g., due to webhook).""" @@ -1261,7 +1261,7 @@ Based on *all* the information provided above, outline a potential plan: if disconnected_clients: unique_disconnected = list(set(disconnected_clients)) - logger.info(f"Removing {len(unique_disconnected)}} clients after issue update broadcast.") + logger.info(f"Removing {len(unique_disconnected)} clients after issue update broadcast.") for client in unique_disconnected: self.remove_ws_client(client) @@ -1276,19 +1276,19 @@ Based on *all* the information provided above, outline a potential plan: try: self.ws_clients.remove(client_to_remove) removed_from_list = True - logger.info(f"Removed WebSocket client from list: {client_desc}} (Remaining: {len(self.ws_clients)}})") + logger.info(f"Removed WebSocket client from list: {client_desc} (Remaining: {len(self.ws_clients)})") except ValueError: - logger.debug(f"Client {client_desc}} already removed from list or not found.") + logger.debug(f"Client {client_desc} already removed from list or not found.") pass if client_id and client_id in self.collaborators: del self.collaborators[client_id] removed_from_collab = True - logger.info(f"Removed collaborator entry for {client_id}}.") + logger.info(f"Removed collaborator entry for {client_id}.") if (removed_from_list or removed_from_collab) and self.main_loop.is_running(): asyncio.run_coroutine_threadsafe(self.broadcast_collaboration_status_once(), self.main_loop) - logger.debug(f"Scheduled immediate status broadcast after removing client {client_desc}}.") + logger.debug(f"Scheduled immediate status broadcast after removing client {client_desc}.") async def broadcast_collaboration_status_once(self): """Sends a single collaboration status update immediately.""" @@ -1318,14 +1318,14 @@ Based on *all* the information provided above, outline a potential plan: if isinstance(result, Exception): client = active_clients_snapshot[i] client_id = getattr(client, 'client_id', 'Unknown') - logger.warning(f"Error during single status broadcast to client {client_id}}: {result}}") + logger.warning(f"Error during single status broadcast to client {client_id}: {result}") # Use standard socket exceptions for checks if isinstance(result, (ConnectionClosed, ConnectionClosedOK, ConnectionAbortedError, ConnectionResetError)): disconnected_clients.append(client) if disconnected_clients: unique_disconnected = list(set(disconnected_clients)) - logger.info(f"Removing {len(unique_disconnected)}} clients found disconnected during single broadcast.") + logger.info(f"Removing {len(unique_disconnected)} clients found disconnected during single broadcast.") for client in unique_disconnected: self.remove_ws_client(client) @@ -1344,7 +1344,7 @@ Based on *all* the information provided above, outline a potential plan: self.stale_issues.append(issue_id) except (ValueError, TypeError) as e: logger.warning(f"Could not parse 'updated_at' ('{updated_at_str}') for issue {issue_id}: {e}") - logger.info(f"Identified {len(self.stale_issues)}}} potentially stale issues (updated > {self.stale_issue_threshold_days}}} days ago).") + logger.info(f"Identified {len(self.stale_issues)}} potentially stale issues (updated > {self.stale_issue_threshold_days}} days ago).") def _identify_high_priority_candidates(self): """Identifies high-priority issues (e.g., Critical/High severity).""" @@ -1353,12 +1353,12 @@ Based on *all* the information provided above, outline a potential plan: severity = self._determine_severity(issue_data.get('labels', [])) if severity in ["Critical", "High"]: self.high_priority_candidates.append(issue_id) - logger.info(f"Identified {len(self.high_priority_candidates)}} high-priority candidates (Critical/High severity).") + logger.info(f"Identified {len(self.high_priority_candidates)} high-priority candidates (Critical/High severity).") async def _compute_and_store_summary(self, issue_id: int): """Generates and stores a summary for a given issue using an LLM (Idle Task).""" if issue_id not in self.issues: - logger.warning(f"Skipping summary generation for issue {issue_id}}: Issue no longer exists.") + logger.warning(f"Skipping summary generation for issue {issue_id}: Issue no longer exists.") return if not self.hf_token: self.precomputed_summaries[issue_id] = {"error": "HF token not set", "timestamp": time.time()} @@ -1367,15 +1367,15 @@ Based on *all* the information provided above, outline a potential plan: try: issue = self.issues[issue_id] model_id = DEFAULT_IDLE_MODEL_ID # Use designated idle model - logger.info(f"Idle Task: Generating summary for issue {issue_id}} using {model_id}}") + logger.info(f"Idle Task: Generating summary for issue {issue_id} using {model_id}") start_time = time.time() prompt = f"""Concisely summarize the following GitHub issue in 1-2 sentences. Focus on the core problem or request reported by the user. Issue Title: {issue.get('title', 'N/A')} Issue Body (first 1000 chars): {issue.get('body', 'N/A')[:1000]} Summary:""" - api_url = f"{HF_INFERENCE_API}}/{model_id}}" - headers = {"Authorization": f"Bearer {self.hf_token}}"} + api_url = f"{HF_INFERENCE_API}/{model_id}" + headers = {"Authorization": f"Bearer {self.hf_token}"} payload = { "inputs": prompt, "parameters": {"max_new_tokens": 128, "temperature": 0.2, "return_full_text": False, "do_sample": False}, @@ -1392,15 +1392,15 @@ Summary:""" if result and isinstance(result, list) and 'generated_text' in result[0]: summary = result[0].get('generated_text', '').strip() or "(AI generated empty summary)" self.precomputed_summaries[issue_id] = {"summary": summary, "error": None, "timestamp": time.time()} - logger.info(f"Stored summary for issue {issue_id}} (took {duration:.2f}}s).") + logger.info(f"Stored summary for issue {issue_id} (took {duration:.2f}s).") elif isinstance(result, dict) and 'error' in result: raise ValueError(f"API Error: {result['error']}") else: - raise ValueError(f"Unexpected API response format: {str(result)[:200]}}") + raise ValueError(f"Unexpected API response format: {str(result)[:200]}") except Exception as e: - err_msg = f"Failed summary: {e}}" - logger.error(f"Failed to generate summary for issue {issue_id}}: {e}}", exc_info=False) # Keep log cleaner + err_msg = f"Failed summary: {e}" + logger.error(f"Failed to generate summary for issue {issue_id}: {e}", exc_info=False) # Keep log cleaner self.precomputed_summaries[issue_id] = {"error": err_msg, "summary": None, "timestamp": time.time()} async def _compute_and_store_missing_info(self, issue_id: int): @@ -1413,7 +1413,7 @@ Summary:""" try: issue = self.issues[issue_id] model_id = DEFAULT_IDLE_MODEL_ID # Use cheap model - logger.info(f"Idle Task: Identifying missing info for issue {issue_id}} using {model_id}}") + logger.info(f"Idle Task: Identifying missing info for issue {issue_id} using {model_id}") start_time = time.time() prompt = f"""Analyze the following GitHub issue description. Identify critical information potentially missing for effective debugging or resolution. List the missing items concisely (e.g., "Steps to reproduce", "Error logs", "Expected vs. Actual behavior", "Environment details"). If the description seems reasonably complete, respond with ONLY the word "None". @@ -1422,8 +1422,8 @@ Issue Body: {issue.get('body', 'N/A')[:1500]} Missing Information:""" - api_url = f"{HF_INFERENCE_API}}/{model_id}}" - headers = {"Authorization": f"Bearer {self.hf_token}}"} + api_url = f"{HF_INFERENCE_API}/{model_id}" + headers = {"Authorization": f"Bearer {self.hf_token}"} payload = { "inputs": prompt, "parameters": {"max_new_tokens": 64, "temperature": 0.1, "return_full_text": False, "do_sample": False}, @@ -1446,11 +1446,11 @@ Missing Information:""" elif isinstance(result, dict) and 'error' in result: raise ValueError(f"API Error: {result['error']}") else: - raise ValueError(f"Unexpected API response format: {str(result)[:200]}}") + raise ValueError(f"Unexpected API response format: {str(result)[:200]}") except Exception as e: - err_msg = f"Failed missing info analysis: {e}}" - logger.error(f"Failed missing info analysis for issue {issue_id}}: {e}}", exc_info=False) + err_msg = f"Failed missing info analysis: {e}" + logger.error(f"Failed missing info analysis for issue {issue_id}: {e}", exc_info=False) self.precomputed_missing_info[issue_id] = {"error": err_msg, "info_needed": None, "timestamp": time.time()} async def _compute_and_store_preliminary_analysis(self, issue_id: int): @@ -1463,7 +1463,7 @@ Missing Information:""" try: issue = self.issues[issue_id] model_id = DEFAULT_IDLE_MODEL_ID # Use cheap model - logger.info(f"Idle Task: Generating preliminary analysis for issue {issue_id}} using {model_id}}") + logger.info(f"Idle Task: Generating preliminary analysis for issue {issue_id} using {model_id}") start_time = time.time() prompt = f"""Analyze the GitHub issue below. Provide a single, concise sentence hypothesizing the root cause OR the main goal. Start with "Hypothesis:". If unsure, respond ONLY with "Hypothesis: Further investigation needed.". @@ -1472,8 +1472,8 @@ Issue Body: {issue.get('body', 'N/A')[:1500]} Response:""" - api_url = f"{HF_INFERENCE_API}}/{model_id}}" - headers = {"Authorization": f"Bearer {self.hf_token}}"} + api_url = f"{HF_INFERENCE_API}/{model_id}" + headers = {"Authorization": f"Bearer {self.hf_token}"} payload = { "inputs": prompt, "parameters": {"max_new_tokens": 80, "temperature": 0.3, "return_full_text": False, "do_sample": False}, @@ -1499,11 +1499,11 @@ Response:""" elif isinstance(result, dict) and 'error' in result: raise ValueError(f"API Error: {result['error']}") else: - raise ValueError(f"Unexpected API response format: {str(result)[:200]}}") + raise ValueError(f"Unexpected API response format: {str(result)[:200]}") except Exception as e: - err_msg = f"Failed preliminary analysis: {e}}" - logger.error(f"Failed preliminary analysis for issue {issue_id}}: {e}}", exc_info=False) + err_msg = f"Failed preliminary analysis: {e}" + logger.error(f"Failed preliminary analysis for issue {issue_id}: {e}", exc_info=False) self.precomputed_analysis[issue_id] = {"error": err_msg, "hypothesis": None, "timestamp": time.time()} def start_idle_processing(self): @@ -1511,7 +1511,7 @@ Response:""" if not self.idle_task or self.idle_task.done(): if self.hf_token and self.repo and self.main_loop.is_running(): self.idle_task = self.main_loop.create_task(self.run_idle_processing()) - logger.info(f"Started background idle processing task (interval: {self.idle_processing_interval}}s).") + logger.info(f"Started background idle processing task (interval: {self.idle_processing_interval}s).") else: missing = [] if not self.hf_token: missing.append("HF Token") @@ -1559,7 +1559,7 @@ Response:""" self._identify_stale_issues() self._identify_high_priority_candidates() except Exception as e: - logger.error(f"Error during synchronous stale/priority identification: {e}}") + logger.error(f"Error during synchronous stale/priority identification: {e}") # --- Identify Issues Needing Work --- all_issue_ids = list(self.issues.keys()) @@ -1574,8 +1574,8 @@ Response:""" other_summary_candidates = [i for i in issues_needing_summary if i not in self.high_priority_candidates] ordered_summary_candidates = priority_summary_candidates + other_summary_candidates - logger.debug(f"Idle candidates: Ctx:{len(issues_needing_context)}}, Sum:{len(issues_needing_summary)}}, " - f"Info:{len(issues_needing_missing_info)}}, Anl:{len(issues_needing_analysis)}}") + logger.debug(f"Idle candidates: Ctx:{len(issues_needing_context)}, Sum:{len(issues_needing_summary)}, " + f"Info:{len(issues_needing_missing_info)}, Anl:{len(issues_needing_analysis)}") # 3. Schedule Context Pre-computation (I/O) context_computed_count = 0 @@ -1584,7 +1584,7 @@ Response:""" cycle_tasks.append(self._compute_and_store_context(issue_id)) context_computed_count += 1 else: break - if context_computed_count > 0: logger.info(f"Scheduled {context_computed_count}} context computations.") + if context_computed_count > 0: logger.info(f"Scheduled {context_computed_count} context computations.") # 4. Schedule Summary Generation (LLM - Medium Cost) summary_computed_count = 0 @@ -1593,7 +1593,7 @@ Response:""" cycle_tasks.append(self._compute_and_store_summary(issue_id)) summary_computed_count += 1 else: break - if summary_computed_count > 0: logger.info(f"Scheduled {summary_computed_count}} summary computations.") + if summary_computed_count > 0: logger.info(f"Scheduled {summary_computed_count} summary computations.") # 5. Schedule Missing Info Analysis (LLM - Low Cost) missing_info_count = 0 @@ -1602,7 +1602,7 @@ Response:""" cycle_tasks.append(self._compute_and_store_missing_info(issue_id)) missing_info_count += 1 else: break - if missing_info_count > 0: logger.info(f"Scheduled {missing_info_count}} missing info analyses.") + if missing_info_count > 0: logger.info(f"Scheduled {missing_info_count} missing info analyses.") # 6. Schedule Preliminary Analysis (LLM - Low Cost) analysis_count = 0 @@ -1611,19 +1611,19 @@ Response:""" cycle_tasks.append(self._compute_and_store_preliminary_analysis(issue_id)) analysis_count += 1 else: break - if analysis_count > 0: logger.info(f"Scheduled {analysis_count}} preliminary analyses.") + if analysis_count > 0: logger.info(f"Scheduled {analysis_count} preliminary analyses.") # --- Execute Scheduled Async Tasks --- if cycle_tasks: - logger.info(f"Executing {len(cycle_tasks)}} async idle tasks for this cycle...") + logger.info(f"Executing {len(cycle_tasks)} async idle tasks for this cycle...") results = await asyncio.gather(*cycle_tasks, return_exceptions=True) num_errors = 0 for i, result in enumerate(results): if isinstance(result, Exception): num_errors += 1 - logger.error(f"Error encountered in background idle task {i+1}}/{len(cycle_tasks)}}: {result}}", exc_info=False) # Keep log cleaner + logger.error(f"Error encountered in background idle task {i+1}/{len(cycle_tasks)}: {result}", exc_info=False) # Keep log cleaner cycle_duration = time.time() - start_time_cycle - logger.info(f"Idle processing cycle finished in {cycle_duration:.2f}} seconds. {len(results)}} tasks processed ({num_errors}} errors).") + logger.info(f"Idle processing cycle finished in {cycle_duration:.2f} seconds. {len(results)} tasks processed ({num_errors} errors).") else: logger.info("No async idle tasks to perform in this cycle.") logger.info(f"--- Finished idle processing cycle ---") @@ -1631,7 +1631,7 @@ Response:""" except asyncio.CancelledError: logger.info("Idle processing loop cancelled.") except Exception as e: - logger.exception(f"Critical error in idle processing loop: {e}}") + logger.exception(f"Critical error in idle processing loop: {e}") await asyncio.sleep(self.idle_processing_interval * 2) finally: logger.info("Idle processing loop finished.") @@ -1639,10 +1639,10 @@ Response:""" async def _compute_and_store_context(self, issue_id: int): """Helper async task to compute and store context for one issue during idle time.""" if issue_id not in self.issues: - logger.warning(f"Skipping context computation for issue {issue_id}}: Issue no longer exists.") + logger.warning(f"Skipping context computation for issue {issue_id}: Issue no longer exists.") return try: - logger.debug(f"Starting background context computation for issue {issue_id}}...") + logger.debug(f"Starting background context computation for issue {issue_id}...") start_time = time.time() issue_data = self.issues[issue_id] context_result = await self._get_code_context(issue_data) @@ -1662,8 +1662,8 @@ Response:""" logger.info(log_msg) except Exception as e: - logger.exception(f"Failed to compute context for issue {issue_id}} in background task: {e}}") - self.precomputed_context[issue_id] = {"error": f"Unexpected computation error: {e}}", "timestamp": time.time(), "content": None, "files": []} + logger.exception(f"Failed to compute context for issue {issue_id} in background task: {e}") + self.precomputed_context[issue_id] = {"error": f"Unexpected computation error: {e}", "timestamp": time.time(), "content": None, "files": []} async def _run_clustering_and_duplicates_async(self): """Runs clustering and then identifies duplicates as a single background task unit.""" @@ -1673,18 +1673,18 @@ Response:""" await self._cluster_similar_issues() # This resets flags on success internally cluster_duration = time.time() - start_time if self.issue_clusters: - logger.info(f"Background Task: Clustering finished in {cluster_duration:.2f}}s. Identifying duplicates...") + logger.info(f"Background Task: Clustering finished in {cluster_duration:.2f}s. Identifying duplicates...") start_time_dup = time.time() self._identify_potential_duplicates() dup_duration = time.time() - start_time_dup - logger.info(f"Background Task: Duplicate identification finished in {dup_duration:.2f}}s.") + logger.info(f"Background Task: Duplicate identification finished in {dup_duration:.2f}s.") else: logger.warning("Background Task: Clustering did not produce results. Skipping duplicate identification.") self._webhook_change_count = 0 self.needs_recluster = False except Exception as e: - logger.error(f"Error during background clustering/duplicate identification task: {e}}", exc_info=True) + logger.error(f"Error during background clustering/duplicate identification task: {e}", exc_info=True) # ========== Gradio UI Definition ========== @@ -1799,7 +1799,7 @@ def create_ui(manager: IssueManager) -> gr.Blocks: """ return preview_html except Exception as e: - logger.exception(f"Error generating issue preview for {issue_num}}: {e}}") + logger.exception(f"Error generating issue preview for {issue_num}: {e}") return f"
Error generating preview for issue {issue_num}. Check logs.
" async def get_ai_suggestion_wrapper(issue_num: Optional[int], model_key: str, progress=gr.Progress()) -> str: @@ -1812,19 +1812,19 @@ def create_ui(manager: IssueManager) -> gr.Blocks: issue = manager.issues[issue_num] issue_hash = manager._get_issue_hash(issue) - logger.info(f"Requesting suggestion for issue {issue_num}} (hash: {issue_hash}}) using model {model_key}}.") + logger.info(f"Requesting suggestion for issue {issue_num} (hash: {issue_hash}) using model {model_key}.") try: - progress(0.3, desc=f"Querying {model_key}}...") + progress(0.3, desc=f"Querying {model_key}...") suggestion = await manager.cached_suggestion(issue_hash, model_key) progress(1, desc="Suggestion received.") if suggestion.lower().startswith("error:"): - return f"⚠️ {suggestion}}" + return f"⚠️ {suggestion}" else: - return f"**💡 Suggestion based on {model_key}}:**\n\n---\n{suggestion}}" + return f"**💡 Suggestion based on {model_key}:**\n\n---\n{suggestion}" except Exception as e: - logger.exception(f"Error in get_ai_suggestion_wrapper for issue {issue_num}}: {e}}") - return f"❌ An unexpected error occurred while getting the suggestion: {e}}" + logger.exception(f"Error in get_ai_suggestion_wrapper for issue {issue_num}: {e}") + return f"❌ An unexpected error occurred while getting the suggestion: {e}" async def get_ai_patch_wrapper(issue_num: Optional[int], model_key: str, progress=gr.Progress()) -> str: """UI wrapper for getting AI patches, handles state and progress.""" @@ -1836,10 +1836,10 @@ def create_ui(manager: IssueManager) -> gr.Blocks: if not manager.repo: return "❌ Error: Repository not loaded. Please scan the repository first." - logger.info(f"Requesting patch for issue {issue_num}} using model {model_key}}.") + logger.info(f"Requesting patch for issue {issue_num} using model {model_key}.") progress(0.1, desc="Gathering code context (using cache if available)...") try: - progress(0.4, desc=f"Querying {model_key}} for patch...") + progress(0.4, desc=f"Querying {model_key} for patch...") result = await manager.generate_code_patch(issue_num, model_key) progress(1, desc="Patch result received.") @@ -1853,7 +1853,7 @@ def create_ui(manager: IssueManager) -> gr.Blocks: if patch_content: patch_content_sanitized = patch_content.replace('`', '\\`') - logger.info(f"Successfully generated patch for issue {issue_num}} using {model_used}}.") + logger.info(f"Successfully generated patch for issue {issue_num} using {model_used}.") return f"""**🩹 Patch Suggestion from {model_used}:** **Explanation:** {explanation} @@ -1863,7 +1863,7 @@ def create_ui(manager: IssueManager) -> gr.Blocks: {patch_content_sanitized} ```""" else: - logger.warning(f"AI provided explanation but no patch for issue {issue_num}}. Explanation: {explanation}}") + logger.warning(f"AI provided explanation but no patch for issue {issue_num}. Explanation: {explanation}") if re.search(r"(insufficient context|cannot generate|unable to create patch)", explanation, re.IGNORECASE): status_msg = "AI indicated insufficient context" else: @@ -1876,8 +1876,8 @@ def create_ui(manager: IssueManager) -> gr.Blocks: **({status_msg})**""" except Exception as e: - logger.exception(f"Error in get_ai_patch_wrapper for issue {issue_num}}: {e}}") - return f"❌ An unexpected error occurred while generating the patch: {e}}" + logger.exception(f"Error in get_ai_patch_wrapper for issue {issue_num}: {e}") + return f"❌ An unexpected error occurred while generating the patch: {e}" async def handle_issue_select(evt: gr.SelectData, current_state: dict): """ @@ -1897,7 +1897,7 @@ def create_ui(manager: IssueManager) -> gr.Blocks: try: selected_id = int(evt.value[0]) - logger.info(f"Issue selected via Dataframe: ID {selected_id}}") + logger.info(f"Issue selected via Dataframe: ID {selected_id}") if selected_id not in manager.issues: logger.error(f"Selected issue ID {selected_id} not found in manager's issue list.") @@ -1915,7 +1915,7 @@ def create_ui(manager: IssueManager) -> gr.Blocks: context_data = manager.precomputed_context[selected_id] timestamp_str = datetime.fromtimestamp(context_data.get('timestamp', 0)).strftime('%Y-%m-%d %H:%M:%S') if context_data.get("error"): - context_source_msg = f"Pre-computed (Failed @ {timestamp_str}})" + context_source_msg = f"Pre-computed (Failed @ {timestamp_str})" files_content["error_context.txt"] = f"# Error loading pre-computed context:\n# {context_data['error']}" elif context_data.get("files"): context_source_msg = f"Pre-computed ({len(context_data['files'])} files @ {timestamp_str})" @@ -1927,15 +1927,15 @@ def create_ui(manager: IssueManager) -> gr.Blocks: files_content[file_path_str] = full_path.read_text(encoding='utf-8', errors='ignore') loaded_count += 1 except Exception as e: - logger.warning(f"Error reading pre-computed file {full_path}} for issue {selected_id}}: {e}}") - files_content[file_path_str] = f"# Error reading file: {e}}" + logger.warning(f"Error reading pre-computed file {full_path} for issue {selected_id}: {e}") + files_content[file_path_str] = f"# Error reading file: {e}" if loaded_count == 0 and context_data["files"]: files_content["error_reading_files.txt"] = "# Precomputed context found file references, but failed to read any file content." else: - context_source_msg = f"Pre-computed (No files found @ {timestamp_str}})" - files_content[f"issue_{selected_id}}_context.md"] = context_data.get("content", "# No specific code context found (pre-computed).") + context_source_msg = f"Pre-computed (No files found @ {timestamp_str})" + files_content[f"issue_{selected_id}_context.md"] = context_data.get("content", "# No specific code context found (pre-computed).") else: - logger.info(f"Context not pre-computed for issue {selected_id}}, computing on demand for editor.") + logger.info(f"Context not pre-computed for issue {selected_id}, computing on demand for editor.") context_source_msg = "Computed On-Demand" context_result = await manager._get_code_context(issue_data) @@ -1959,27 +1959,27 @@ def create_ui(manager: IssueManager) -> gr.Blocks: files_content[file_path_str] = full_path.read_text(encoding='utf-8', errors='ignore') loaded_count +=1 except Exception as e: - logger.warning(f"Error reading on-demand file {full_path}} for issue {selected_id}}: {e}}") - files_content[file_path_str] = f"# Error reading file: {e}}" + logger.warning(f"Error reading on-demand file {full_path} for issue {selected_id}: {e}") + files_content[file_path_str] = f"# Error reading file: {e}" if loaded_count == 0 and context_result["files"]: files_content["error_reading_files.txt"] = "# Context computation found file references, but failed to read any file content." else: context_source_msg += " (No files found)" - files_content[f"issue_{selected_id}}_context.md"] = context_result.get("content", "# No specific code context found.") + files_content[f"issue_{selected_id}_context.md"] = context_result.get("content", "# No specific code context found.") context_load_duration = time.time() - context_load_start - logger.info(f"Context loading for editor took {context_load_duration:.2f}}s. Source: {context_source_msg}}") + logger.info(f"Context loading for editor took {context_load_duration:.2f}s. Source: {context_source_msg}") if not files_content: - files_content["placeholder.txt"] = f"# No relevant files found or context failed to load for issue {selected_id}}." + files_content["placeholder.txt"] = f"# No relevant files found or context failed to load for issue {selected_id}." manager.code_editors[selected_id] = OTCodeEditor(initial_value=files_content) - logger.info(f"Initialized/Updated OT editor state for issue {selected_id}} with files: {list(files_content.keys())}}") + logger.info(f"Initialized/Updated OT editor state for issue {selected_id} with files: {list(files_content.keys())}") updates = { "selected_issue_id_state": gr.update(value=selected_id), "issue_preview_html": gr.update(value=generate_issue_preview(selected_id)), "code_edit_component": gr.update(value=files_content, interactive=True), - "ai_output_display": gr.update(value=f"*Context loaded ({context_source_msg}}). Ready for AI actions or editing.*") + "ai_output_display": gr.update(value=f"*Context loaded ({context_source_msg}). Ready for AI actions or editing.*") } return updates @@ -1988,7 +1988,7 @@ def create_ui(manager: IssueManager) -> gr.Blocks: return { **default_response, "issue_preview_html": gr.update(value="Error loading issue details. Please check logs and try again.
"), - "code_edit_component": gr.update(value={"error.txt": f"# Error loading code context for selection.\n# Error: {e}}"}, interactive=False), + "code_edit_component": gr.update(value={"error.txt": f"# Error loading code context for selection.\n# Error: {e}"}, interactive=False), "ai_output_display": gr.update(value="*Error processing selection. See logs.*") } @@ -2155,19 +2155,19 @@ def create_ui(manager: IssueManager) -> gr.Blocks: # --- JavaScript for WebSocket Communication --- def web_socket_js(ws_port): - client_id = f"client_{hashlib.sha1(os.urandom(16)).hexdigest()[:8]}}" - logger.info(f"Generated Client ID for WebSocket: {client_id}}") + client_id = f"client_{hashlib.sha1(os.urandom(16)).hexdigest()[:8]}" + logger.info(f"Generated Client ID for WebSocket: {client_id}") return f""" """ demo_app.load(_js=web_socket_js(WS_PORT), fn=None, inputs=None, outputs=None) @@ -2498,13 +2498,13 @@ def create_ui(manager: IssueManager) -> gr.Blocks: # ========== WebSocket Server Logic ========== async def handle_ws_connection(websocket: WebSocketServerProtocol, path: str, manager: IssueManager): """Handles incoming WebSocket connections and messages for collaboration.""" - client_id = f"client_{hashlib.sha1(os.urandom(16)).hexdigest()[:8]}}" + client_id = f"client_{hashlib.sha1(os.urandom(16)).hexdigest()[:8]}" setattr(websocket, 'client_id', client_id) remote_addr = websocket.remote_address - logger.info(f"WebSocket client connected: {remote_addr}} assigned ID {client_id}}") + logger.info(f"WebSocket client connected: {remote_addr} assigned ID {client_id}") manager.ws_clients.append(websocket) - logger.info(f"Client list size: {len(manager.ws_clients)}}") + logger.info(f"Client list size: {len(manager.ws_clients)}") try: async for message in websocket: @@ -2516,9 +2516,9 @@ async def handle_ws_connection(websocket: WebSocketServerProtocol, path: str, ma logger.debug(f"Received WS message type '{msg_type}' from {sender_id} ({remote_addr})") if msg_type == "join": - client_name = data.get("name", f"User_{sender_id[:4]}}") + client_name = data.get("name", f"User_{sender_id[:4]}") manager.collaborators[sender_id] = {"name": client_name, "status": "Connected"} - logger.info(f"Client {sender_id}} ({client_name}}) joined collaboration. Current collaborators: {list(manager.collaborators.keys())}}") + logger.info(f"Client {sender_id} ({client_name}) joined collaboration. Current collaborators: {list(manager.collaborators.keys())}") await manager.broadcast_collaboration_status_once() elif msg_type == "code_update": @@ -2535,25 +2535,25 @@ async def handle_ws_connection(websocket: WebSocketServerProtocol, path: str, ma manager.collaborators[sender_id]["status"] = status await manager.broadcast_collaboration_status_once() else: - logger.warning(f"Received status update from client {sender_id}} not in collaborator list. Adding/Updating.") - manager.collaborators[sender_id] = {"name": f"User_{sender_id[:4]}} (Re-added)", "status": status} + logger.warning(f"Received status update from client {sender_id} not in collaborator list. Adding/Updating.") + manager.collaborators[sender_id] = {"name": f"User_{sender_id[:4]} (Re-added)", "status": status} await manager.broadcast_collaboration_status_once() else: logger.warning(f"Unknown WebSocket message type '{msg_type}' received from {sender_id} ({remote_addr}). Message: {str(message)[:200]}") except json.JSONDecodeError: - logger.error(f"Received invalid JSON over WebSocket from {sender_id}} ({remote_addr}}): {str(message)[:200]}}...") + logger.error(f"Received invalid JSON over WebSocket from {sender_id} ({remote_addr}): {str(message)[:200]}...") except Exception as e: - logger.exception(f"Error processing WebSocket message from {sender_id}} ({remote_addr}}): {e}}") + logger.exception(f"Error processing WebSocket message from {sender_id} ({remote_addr}): {e}") # Catch standard socket exceptions for disconnects except (ConnectionClosed, ConnectionClosedOK, ConnectionAbortedError, ConnectionResetError) as e: logger.info(f"WebSocket client {client_id} ({remote_addr}) disconnected: Code={getattr(e, 'code', 'N/A')}, Reason='{getattr(e, 'reason', 'N/A')}'") except Exception as e: - logger.exception(f"Unexpected error in WebSocket handler for {client_id}} ({remote_addr}}): {e}}") + logger.exception(f"Unexpected error in WebSocket handler for {client_id} ({remote_addr}): {e}") finally: - logger.info(f"Cleaning up connection for client {client_id}} ({remote_addr}})") + logger.info(f"Cleaning up connection for client {client_id} ({remote_addr})") manager.remove_ws_client(websocket) @@ -2579,16 +2579,16 @@ async def start_websocket_server(manager: IssueManager, port: int): ping_interval=20, ping_timeout=20 ) - logger.info(f"WebSocket server started successfully on ws://0.0.0.0:{port}}") + logger.info(f"WebSocket server started successfully on ws://0.0.0.0:{port}") await stop_event # Keep running until stop_event is set except OSError as e: - logger.error(f"Failed to start WebSocket server on port {port}}: {e}}. Is the port already in use?") - raise SystemExit(f"WebSocket Port {port}} unavailable. Application cannot start.") + logger.error(f"Failed to start WebSocket server on port {port}: {e}. Is the port already in use?") + raise SystemExit(f"WebSocket Port {port} unavailable. Application cannot start.") except asyncio.CancelledError: logger.info("WebSocket server task cancelled.") except Exception as e: - logger.exception(f"An unexpected error occurred starting or running the WebSocket server: {e}}") + logger.exception(f"An unexpected error occurred starting or running the WebSocket server: {e}") if not stop_event.done(): stop_event.set_result(True) # Ensure loop terminates on error raise finally: @@ -2609,12 +2609,12 @@ def run_webhook_server(manager: IssueManager, port: int, main_loop: asyncio.Abst try: server_address = ("0.0.0.0", port) httpd = HTTPServer(server_address, WebhookHandler) - logger.info(f"Webhook HTTP server starting on http://0.0.0.0:{port}}") + logger.info(f"Webhook HTTP server starting on http://0.0.0.0:{port}") httpd.serve_forever() except OSError as e: - logger.error(f"Failed to start Webhook server on port {port}}: {e}}. Is the port already in use?") + logger.error(f"Failed to start Webhook server on port {port}: {e}. Is the port already in use?") except Exception as e: - logger.exception(f"Unexpected error in Webhook server thread: {e}}") + logger.exception(f"Unexpected error in Webhook server thread: {e}") finally: if httpd: logger.info("Shutting down Webhook HTTP server...") @@ -2663,7 +2663,7 @@ if __name__ == "__main__": except KeyboardInterrupt: logger.info("Gradio app interrupted. Initiating shutdown...") except Exception as e: - logger.exception(f"An unexpected error occurred: {{e}}") + logger.exception(f"An unexpected error occurred: {e}") finally: # Ensure graceful shutdown of tasks logger.info("Cancelling WebSocket server task...") @@ -2674,7 +2674,7 @@ if __name__ == "__main__": except asyncio.CancelledError: pass except Exception as e: - logger.error(f"Error during WebSocket cancellation: {{e}}") + logger.error(f"Error during WebSocket cancellation: {e}") # Stop the main asyncio loop if main_loop.is_running():