""" DeepFunding Oracle: This script dynamically loads dependency data and for each repository URL: • Fetches GitHub features (stars, forks, watchers, open issues, pull requests, activity) using the GitHub API. • Uses the LLama model to analyze parent-child behavior (based on the fetched features and parent info) and returns a base weight (0-1) for the repository. • Trains a RandomForest regressor on these features (with the base weight as the target) to predict a final weight. The output submission CSV has three columns: repo, parent, and final_weight. """ import base64 from io import StringIO import os import warnings import csv import re import requests import numpy as np import pandas as pd import time import threading import logging import concurrent.futures from concurrent.futures import ThreadPoolExecutor import signal from tqdm import tqdm import sys import re import json import time from sklearn.model_selection import train_test_split, RandomizedSearchCV from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error from Oracle.SmolLM import SmolLM warnings.filterwarnings("ignore") # Configure logging to file and console logging.basicConfig( handlers=[ logging.FileHandler("deepfundingoracle.log"), logging.StreamHandler(sys.stdout) ], level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" ) ############################## # GitHub API helper: Fetch repository metrics ############################## def fetch_repo_metrics(repo_url): """ Fetch GitHub metrics (stars, forks, watchers, open issues, pull requests, and activity) given a repository URL. Assumes repo_url is in the form "https://github.com/owner/repo". """ try: # Extract owner and repo name m = re.search(r"github\.com/([^/]+)/([^/]+)", repo_url) if not m: return {"stargazers_count": 0, "forks_count": 0, "watchers_count": 0, "open_issues_count": 0, "pulls_count": 0, "activity": 0} owner, repo_name = m.group(1), m.group(2) api_url = f"https://api.github.com/repos/{owner}/{repo_name}" headers = {} token = os.environ.get("GITHUB_API_TOKEN", "") if token: headers["Authorization"] = f"token {token}" r = requests.get(api_url, headers=headers) if r.status_code == 200: data = r.json() pulls_url = data.get("pulls_url", "").replace("{\/*state}", "") pulls_count = len(requests.get(pulls_url, headers=headers).json()) if pulls_url else 0 activity = data.get("updated_at", "") return { "stargazers_count": data.get("stargazers_count", 0), "forks_count": data.get("forks_count", 0), "watchers_count": data.get("watchers_count", 0), "open_issues_count": data.get("open_issues_count", 0), "pulls_count": pulls_count, "activity": activity, "owner": owner, "repo_name": repo_name, "token": token } else: return {"stargazers_count": 0, "forks_count": 0, "watchers_count": 0, "open_issues_count": 0, "pulls_count": 0, "activity": 0} except Exception: return {"stargazers_count": 0, "forks_count": 0, "watchers_count": 0, "open_issues_count": 0, "pulls_count": 0, "activity": 0} ############################## # Feature Extraction ############################## def load_data(file): """ Dynamically load the dependency data CSV from the uploaded file. Expects at least "repo" and "parent" columns. """ try: print("[INFO] Loading data from uploaded file...") start_time = time.time() # Read the uploaded file directly into a DataFrame df = pd.read_csv(file) end_time = time.time() print(f"[INFO] Data loaded successfully in {end_time - start_time:.2f} seconds.") return df except Exception as e: print("[ERROR] Error loading data:", e) return None def fetch_github_features(df): """ For each row, using the repo URL, call the GitHub API to fetch: stars, forks, watchers, open issues, pull requests, activity, and contributors count. Adds these as new columns to the DataFrame. """ print("[INFO] Fetching GitHub features for repositories...") start_time = time.time() stars_list = [] forks_list = [] watchers_list = [] issues_list = [] pulls_list = [] activity_list = [] contributors_list = [] dependencies_list =[] cache = {} def get_metrics(repo_url): if repo_url in cache: return cache[repo_url] val = fetch_repo_metrics(repo_url) try: m = re.search(r"github\.com/([^/]+)/([^/]+)",repo_url) if m: owner, repo_name = m.group(1), m.group(2) pkg_url = f"https://api.github.com/repos/{owner}/{repo_name}/packages.json" headers = {} token = os.environ.get("GITHUB_API_TOKEN", "") if token: headers["Authorization"] = f"token {token}" pkg_resp = requests.get(pkg_url, headers=headers) if pkg_resp.status_code ==200: pkg_data = pkg_resp.json() content = base64.b64decode(pkg_data["content",""]).decode("utf-8") pkg_json = json.loads(content) dependencies = pkg_json.get("dependencies", {}) val["dependencies_count"] = len(dependencies) else: val["dependencies_count"] = 0 else: val["dependencies_count"] = 0 except Exception: val["dependencies_count"] = 0 cache[repo_url] = val return val with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor: futures = {executor.submit(get_metrics, row['repo']): i for i, row in df.iterrows()} for fut in tqdm(concurrent.futures.as_completed(futures), total=len(futures), desc="Fetching metrics"): res = fut.result() stars_list.append(res.get("stargazers_count", 0)) forks_list.append(res.get("forks_count", 0)) watchers_list.append(res.get("watchers_count", 0)) issues_list.append(res.get("open_issues_count", 0)) pulls_list.append(res.get("pulls_count", 0)) activity_list.append(res.get("activity", 0)) dependencies_list.append(res.get("dependencies_count", 0)) # Fetch contributors count try: contributors_url = f"https://api.github.com/repos/{res['owner']}/{res['repo_name']}/contributors" headers = {"Authorization": f"token {res['token']}"} contributors_response = requests.get(contributors_url, headers=headers) if contributors_response.status_code == 200: contributors_list.append(len(contributors_response.json())) else: contributors_list.append(0) except Exception: contributors_list.append(0) df["stars"] = stars_list df["forks"] = forks_list df["watchers"] = watchers_list df["open_issues"] = issues_list df["pulls"] = pulls_list df["activity"] = activity_list df["contributors"] = contributors_list df["dependencies_count"] = dependencies_list end_time = time.time() print(f"[INFO] GitHub features fetched successfully in {end_time - start_time:.2f} seconds.") return df def timeout_handler(signum, frame): raise TimeoutError("LLama model prediction timed out.") def assign_base_weight(df, max_workers=32, llm_retries=2, llm_delay=0): """ Assign base weights using a single LLM call to determine feature weights, and programmatically calculate repository weights. """ print("[INFO] Starting optimized base weight assignment...", flush=True) logging.info("[INFO] Assigning base weights using optimized approach...") start_time = time.time() oracle = SmolLM() prompt = ( "Can you Predict a weight in the range (0-1) for these github features such as stars,forks,watchers,open_issues,pulls,activity,contributors based on its importance in determining " "the influence of a repository. Output the weights for each feature as text e.g.: " 'stars: 0.3, forks: 0.2, watchers: 0.2, open_issues: 0.1, pulls: 0.1, activity: 0.05, contributors: 0.05' ) feature_weights = None for attempt in range(llm_retries): try: response = oracle.predict(prompt, max_length=512, max_new_tokens=150) if not response or not response.strip(): raise ValueError("Empty response from Oracle.") matches = re.findall( r'(stars|forks|watchers|open_issues|pulls|activity|contributors)\s*[:=]\s*([0-9]*\.?[0-9]+)', response, re.IGNORECASE) feature_weights = {k.lower(): float(v) for k, v in matches} if not feature_weights or len(feature_weights) < 7: raise ValueError("Could not extract all feature weights from response.") print(f"[INFO] Feature weights from LLM: {feature_weights}", flush=True) break except Exception as e: print(f"[ERROR] Oracle attempt {attempt+1} failed: {e}", flush=True) logging.error(f"[ERROR] Oracle attempt {attempt+1} failed: {e}") time.sleep(llm_delay) if feature_weights is None: feature_weights = { "stars": 0.3, "forks": 0.2, "watchers": 0.2, "open_issues": 0.1, "pulls": 0.1, "activity": 0.05, "contributors": 0.05 } print(f"[INFO] Using default feature weights: {feature_weights}", flush=True) for feature in feature_weights.keys(): if feature in df.columns: df[feature] = pd.to_numeric(df[feature], errors='coerce').fillna(0) def calculate_weight(row): weight = 0 for feature, feature_weight in feature_weights.items(): if feature in row: weight += row[feature] * feature_weight return weight df["base_weight_raw"] = df.apply(calculate_weight, axis=1) df["base_weight"] = df.groupby("parent")["base_weight_raw"].transform( lambda s: (s - s.min()) / (s.max() - s.min() if s.max() != s.min() else 1) ) end_time = time.time() print(f"[INFO] Base weights assigned successfully in {end_time - start_time:.2f} seconds.", flush=True) logging.info(f"[INFO] Base weights assigned successfully in {end_time - start_time:.2f} seconds.") return df def normalize_funding(df): """ Normalize funding weights for child repositories grouped by parent. """ print("[INFO] Normalizing funding weights...", flush=True) df["final_weight"] = df.groupby("parent")["final_weight"].transform( lambda x: x / x.sum() if x.sum() > 0 else 1 / len(x) ) print("[INFO] Funding weights normalized successfully.", flush=True) return df def prepare_dataset(file): print("[INFO] Starting dataset preparation...") start_time = time.time() df = load_data(file) if df is None: raise ValueError("Failed to load data.") if not {"repo", "parent"}.issubset(df.columns): raise ValueError("Input CSV must contain 'repo' and 'parent' columns.") print("[INFO] Fetching GitHub features...") df = fetch_github_features(df) print("[INFO] GitHub features fetched successfully.") print("[INFO] Assigning base weights using LLama model...") df = assign_base_weight(df) df = train_predict_weight(df) df = normalize_funding(df) end_time = time.time() print(f"[INFO] Dataset preparation completed in {end_time - start_time:.2f} seconds.") return df ############################## # RandomForest Regression ############################## def train_predict_weight(df, criterion='gini', max_features='auto', max_depth=12, min_samples_split=2, min_samples_leaf=1): """ Uses a RandomForestRegressor to predict a repository weight based on GitHub features. The regressor is tuned with provided hyperparameters. A flag column 'is_source' is used to indicate if a repository is the primary source. If none is flagged, the repo with the highest prediction is set as the parent. """ print("[INFO] Starting weight prediction...", flush=True) start_time = time.time() target = "base_weight" feature_cols = ["stars", "forks", "watchers", "open_issues", "pulls", "activity", "contributors"] if "activity" in df.columns: df["activity"] = pd.to_datetime(df["activity"], errors="coerce", utc=True) now = pd.Timestamp.now(tz="UTC") df["activity"] = (now - df["activity"]).dt.days.fillna(-1) if target not in df.columns: raise ValueError("Base weight column missing.") X = df[feature_cols] y = df[target] # For regression, if a classification criterion is given, switch to 'mse' reg_criterion = "squared_error" if criterion in ["gini", "entropy"] else criterion rf_model = RandomForestRegressor(random_state=42, criterion=reg_criterion, max_features=max_features, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf, n_estimators=200) rf_model.fit(X, y) df["rf_pred"] = rf_model.predict(X) # Provide feedback about one of the trees in the RF try: depth = rf_model.estimators_[0].get_depth() leaves = rf_model.estimators_[0].get_n_leaves() print(f"[INFO] RF tree depth: {depth}, number of leaves: {leaves}", flush=True) except Exception: pass parent_map = df.groupby("parent")["repo"].apply(list).to_dict() final_weights = {} for parent, children in parent_map.items(): group_idxs = df[df["parent"] == parent].index preds = df.loc[group_idxs, "rf_pred"] total = preds.sum() if total > 0: normed = preds / total else: # If sum is zero, assign equal weights. normed = pd.Series([1/len(preds)] * len(preds), index=preds.index) for idx, weight in normed.items(): final_weights[idx] = weight df["final_weight"] = df.index.map(final_weights).fillna(0.0) end_time = time.time() print(f"[INFO] Weight prediction completed in {end_time - start_time:.2f} seconds.", flush=True) return df ############################## # CSV Output ############################## def create_submission_csv(df, output_filename="submission.csv"): print(f"[INFO] Writing results to {output_filename}...", flush=True) required_cols = ["repo", "parent", "final_weight"] submission_df = df[required_cols] submission_df.to_csv(output_filename, index=False) print(f"[INFO] Results written to {output_filename}.", flush=True) return output_filename # Removed Gradio UI code from this file to ensure modular workflow. # This file now focuses solely on data processing and prediction. if __name__ == "__main__": input_file = "input.csv" # Replace with the actual input file path output_file = "submission.csv" print("[INFO] Preparing dataset...") df = prepare_dataset(input_file) print("[INFO] Creating submission CSV...") create_submission_csv(df, output_file) print("[INFO] Process completed successfully.")