Kunal Pai commited on
Commit
ea6ff5b
·
1 Parent(s): a827aa8

RIP Cost Benefit

Browse files
.github/workflows/restore_cost_benefit.yml DELETED
@@ -1,92 +0,0 @@
1
- # .github/workflows/restore_cost_benefit.yml
2
- name: Restore src/cost_benefit.py
3
-
4
- on:
5
- push:
6
- branches: [main]
7
- pull_request:
8
- branches: [main]
9
-
10
- jobs:
11
- restore:
12
- runs-on: ubuntu-latest
13
- # allow the GITHUB_TOKEN to push a commit back
14
- permissions:
15
- contents: write
16
-
17
- steps:
18
- - name: Checkout repository
19
- uses: actions/checkout@v3
20
- with:
21
- # you need the token available for the later push
22
- token: ${{ secrets.GITHUB_TOKEN }}
23
-
24
- - name: Recreate src/cost_benefit.py
25
- run: |
26
- mkdir -p src
27
- cat > src/cost_benefit.py <<'PY'
28
- import argparse
29
- import subprocess
30
- import time
31
- import requests
32
-
33
- def detect_available_budget(runtime_env: str) -> int:
34
- """
35
- Return an approximate VRAM‑based budget (MB) when running locally,
36
- else default to 100.
37
- """
38
- import torch
39
- if "local" in runtime_env and torch.cuda.is_available():
40
- total_vram_mb = torch.cuda.get_device_properties(0).total_memory // (1024 ** 2)
41
- return min(total_vram_mb, 100)
42
- return 100
43
-
44
- def get_best_model(runtime_env: str, *, use_local_only: bool = False, use_api_only: bool = False) -> dict:
45
- """
46
- Pick the fastest model that fits in the detected budget while
47
- respecting the locality filters.
48
- """
49
- static_costs = {
50
- "llama3.2": {"size": 20, "token_cost": 0.0001, "tokens_sec": 30, "type": "local"},
51
- "mistral": {"size": 40, "token_cost": 0.0002, "tokens_sec": 50, "type": "local"},
52
- "gemini-2.0-flash": {"size": 60, "token_cost": 0.0005, "tokens_sec": 60, "type": "api"},
53
- "gemini-2.5-pro-preview-03-25": {"size": 80, "token_cost": 0.002 , "tokens_sec": 45, "type": "api"},
54
- }
55
-
56
- budget = detect_available_budget(runtime_env)
57
- best_model, best_speed = None, -1
58
-
59
- for model, info in static_costs.items():
60
- if info["size"] > budget:
61
- continue
62
- if use_local_only and info["type"] != "local":
63
- continue
64
- if use_api_only and info["type"] != "api":
65
- continue
66
- if info["tokens_sec"] > best_speed:
67
- best_model, best_speed = model, info["tokens_sec"]
68
-
69
- chosen = best_model or "llama3.2" # sensible default
70
- return {
71
- "model": chosen,
72
- "token_cost": static_costs[chosen]["token_cost"],
73
- "tokens_sec": static_costs[chosen]["tokens_sec"],
74
- "note": None if best_model else "Defaulted because no model met the constraints",
75
- }
76
- PY
77
-
78
- - name: Commit & push only when the file was missing
79
- if: github.event_name == 'push' && github.ref == 'refs/heads/main'
80
- run: |
81
- set -e
82
- git config user.name "HARSHIL PATEL"
83
- git config user.email "hpppatel@ucdavis.edu"
84
-
85
- # If the file is NOT in the current Git index, add‑commit‑push.
86
- if ! git ls-files --error-unmatch src/cost_benefit.py >/dev/null 2>&1; then
87
- git add src/cost_benefit.py
88
- git commit -m "chore(ci): restore missing src/cost_benefit.py [skip ci]"
89
- git push origin HEAD:main
90
- else
91
- echo "src/cost_benefit.py already exists in the repository – nothing to restore"
92
- fi
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/cost_benefit.py DELETED
@@ -1,48 +0,0 @@
1
- import argparse
2
- import subprocess
3
- import time
4
- import requests
5
-
6
- def detect_available_budget(runtime_env: str) -> int:
7
- """
8
- Return an approximate VRAM‑based budget (MB) when running locally,
9
- else default to 100.
10
- """
11
- import torch
12
- if "local" in runtime_env and torch.cuda.is_available():
13
- total_vram_mb = torch.cuda.get_device_properties(0).total_memory // (1024 ** 2)
14
- return min(total_vram_mb, 100)
15
- return 100
16
-
17
- def get_best_model(runtime_env: str, *, use_local_only: bool = False, use_api_only: bool = False) -> dict:
18
- """
19
- Pick the fastest model that fits in the detected budget while
20
- respecting the locality filters.
21
- """
22
- static_costs = {
23
- "llama3.2": {"size": 20, "token_cost": 0.0001, "tokens_sec": 30, "type": "local"},
24
- "mistral": {"size": 40, "token_cost": 0.0002, "tokens_sec": 50, "type": "local"},
25
- "gemini-2.0-flash": {"size": 60, "token_cost": 0.0005, "tokens_sec": 60, "type": "api"},
26
- "gemini-2.5-pro-preview-03-25": {"size": 80, "token_cost": 0.002 , "tokens_sec": 45, "type": "api"},
27
- }
28
-
29
- budget = detect_available_budget(runtime_env)
30
- best_model, best_speed = None, -1
31
-
32
- for model, info in static_costs.items():
33
- if info["size"] > budget:
34
- continue
35
- if use_local_only and info["type"] != "local":
36
- continue
37
- if use_api_only and info["type"] != "api":
38
- continue
39
- if info["tokens_sec"] > best_speed:
40
- best_model, best_speed = model, info["tokens_sec"]
41
-
42
- chosen = best_model or "llama3.2" # sensible default
43
- return {
44
- "model": chosen,
45
- "token_cost": static_costs[chosen]["token_cost"],
46
- "tokens_sec": static_costs[chosen]["tokens_sec"],
47
- "note": None if best_model else "Defaulted because no model met the constraints",
48
- }