update
#34
by
AppleSwing
- opened
- src/backend/run_eval_suite.py +8 -22
- src/backend/tasks/measurement_task_utils.py +8 -21
- src/display/about.py +4 -7
- src/display/utils.py +10 -28
- src/populate.py +1 -1
- src/utils.py +32 -826
src/backend/run_eval_suite.py
CHANGED
@@ -17,22 +17,16 @@ def process_results_decorator(func):
|
|
17 |
end_to_end_time = sum([r[1] for r in results]) / len(results)
|
18 |
prefilling_time = sum([r[2] for r in results]) / len(results)
|
19 |
decoding_throughput = sum([r[3] for r in results]) / len(results)
|
20 |
-
|
21 |
-
|
22 |
-
prefill_throughput = sum([r[6] for r in results]) / len(results)
|
23 |
-
prefill_mfu = sum([r[7] for r in results]) / len(results)
|
24 |
-
prefill_mbu = sum([r[8] for r in results]) / len(results)
|
25 |
# print(f"end_to_end_time: {end_to_end_time}, prefilling_time: {prefilling_time}, decoding_throughput: {decoding_throughput}")
|
26 |
|
27 |
result_dict = func(self, doc, processed_results, *args, **kwargs)
|
28 |
result_dict["end_to_end_time"] = end_to_end_time
|
29 |
result_dict["prefilling_time"] = prefilling_time
|
30 |
result_dict["decoding_throughput"] = decoding_throughput
|
31 |
-
result_dict["
|
32 |
-
result_dict["
|
33 |
-
result_dict["prefill_throughput"] = prefill_throughput
|
34 |
-
result_dict["prefill_mfu"] = prefill_mfu
|
35 |
-
result_dict["prefill_mbu"] = prefill_mbu
|
36 |
return result_dict
|
37 |
return wrapper
|
38 |
ConfigurableTask.process_results = process_results_decorator(orig_process_results)
|
@@ -43,11 +37,8 @@ def aggregation_decorator(func):
|
|
43 |
aggregation_list["end_to_end_time"] = mean
|
44 |
aggregation_list["prefilling_time"] = mean
|
45 |
aggregation_list["decoding_throughput"] = mean
|
46 |
-
aggregation_list["
|
47 |
-
aggregation_list["
|
48 |
-
aggregation_list["prefill_throughput"] = mean
|
49 |
-
aggregation_list["prefill_mfu"] = mean
|
50 |
-
aggregation_list["prefill_mbu"] = mean
|
51 |
return aggregation_list
|
52 |
return wrapper
|
53 |
ConfigurableTask.aggregation = aggregation_decorator(orig_aggregation)
|
@@ -58,11 +49,8 @@ def higher_is_better_decorator(func):
|
|
58 |
higher_is_better_dict["end_to_end_time"] = False
|
59 |
higher_is_better_dict["prefilling_time"] = False
|
60 |
higher_is_better_dict["decoding_throughput"] = True
|
61 |
-
higher_is_better_dict["
|
62 |
-
higher_is_better_dict["
|
63 |
-
higher_is_better_dict["prefill_throughput"] = True
|
64 |
-
higher_is_better_dict["prefill_mfu"] = True
|
65 |
-
higher_is_better_dict["prefill_mbu"] = True
|
66 |
return higher_is_better_dict
|
67 |
return wrapper
|
68 |
ConfigurableTask.higher_is_better = higher_is_better_decorator(orig_higher_is_better)
|
@@ -77,8 +65,6 @@ from src.backend.tasks.selfcheckgpt.task import SelfCheckGPT
|
|
77 |
|
78 |
from src.backend.huggingface_generate_until import HFLMwithChatTemplate
|
79 |
from src.backend.moe_infinity import MoEHFLM
|
80 |
-
from src.backend.vllm import VLLM_MOE
|
81 |
-
from src.backend.sglang import SGLangMoE
|
82 |
|
83 |
def run_evaluation(
|
84 |
eval_request: EvalRequest,
|
|
|
17 |
end_to_end_time = sum([r[1] for r in results]) / len(results)
|
18 |
prefilling_time = sum([r[2] for r in results]) / len(results)
|
19 |
decoding_throughput = sum([r[3] for r in results]) / len(results)
|
20 |
+
mfu = sum([r[4] for r in results]) / len(results)
|
21 |
+
mbu = sum([r[5] for r in results]) / len(results)
|
|
|
|
|
|
|
22 |
# print(f"end_to_end_time: {end_to_end_time}, prefilling_time: {prefilling_time}, decoding_throughput: {decoding_throughput}")
|
23 |
|
24 |
result_dict = func(self, doc, processed_results, *args, **kwargs)
|
25 |
result_dict["end_to_end_time"] = end_to_end_time
|
26 |
result_dict["prefilling_time"] = prefilling_time
|
27 |
result_dict["decoding_throughput"] = decoding_throughput
|
28 |
+
result_dict["mfu"] = mfu
|
29 |
+
result_dict["mbu"] = mbu
|
|
|
|
|
|
|
30 |
return result_dict
|
31 |
return wrapper
|
32 |
ConfigurableTask.process_results = process_results_decorator(orig_process_results)
|
|
|
37 |
aggregation_list["end_to_end_time"] = mean
|
38 |
aggregation_list["prefilling_time"] = mean
|
39 |
aggregation_list["decoding_throughput"] = mean
|
40 |
+
aggregation_list["mfu"] = mean
|
41 |
+
aggregation_list["mbu"] = mean
|
|
|
|
|
|
|
42 |
return aggregation_list
|
43 |
return wrapper
|
44 |
ConfigurableTask.aggregation = aggregation_decorator(orig_aggregation)
|
|
|
49 |
higher_is_better_dict["end_to_end_time"] = False
|
50 |
higher_is_better_dict["prefilling_time"] = False
|
51 |
higher_is_better_dict["decoding_throughput"] = True
|
52 |
+
higher_is_better_dict["mfu"] = True
|
53 |
+
higher_is_better_dict["mbu"] = True
|
|
|
|
|
|
|
54 |
return higher_is_better_dict
|
55 |
return wrapper
|
56 |
ConfigurableTask.higher_is_better = higher_is_better_decorator(orig_higher_is_better)
|
|
|
65 |
|
66 |
from src.backend.huggingface_generate_until import HFLMwithChatTemplate
|
67 |
from src.backend.moe_infinity import MoEHFLM
|
|
|
|
|
68 |
|
69 |
def run_evaluation(
|
70 |
eval_request: EvalRequest,
|
src/backend/tasks/measurement_task_utils.py
CHANGED
@@ -12,12 +12,8 @@ def process_results_decorator(func):
|
|
12 |
end_to_end_time = sum([r[1] for r in results]) / len(results)
|
13 |
prefilling_time = sum([r[2] for r in results]) / len(results)
|
14 |
decoding_throughput = sum([r[3] for r in results]) / len(results)
|
15 |
-
|
16 |
-
|
17 |
-
prefill_throughput = sum([r[6] for r in results]) / len(results)
|
18 |
-
prefill_mfu = sum([r[7] for r in results]) / len(results)
|
19 |
-
prefill_mbu = sum([r[8] for r in results]) / len(results)
|
20 |
-
|
21 |
|
22 |
# print(f"end_to_end_time: {end_to_end_time}, prefilling_time: {prefilling_time}, decoding_throughput: {decoding_throughput}")
|
23 |
|
@@ -26,11 +22,8 @@ def process_results_decorator(func):
|
|
26 |
result_dict["end_to_end_time"] = end_to_end_time
|
27 |
result_dict["prefilling_time"] = prefilling_time
|
28 |
result_dict["decoding_throughput"] = decoding_throughput
|
29 |
-
result_dict["
|
30 |
-
result_dict["
|
31 |
-
result_dict["prefill_throughput"] = prefill_throughput
|
32 |
-
result_dict["prefill_mfu"] = prefill_mfu
|
33 |
-
result_dict["prefill_mbu"] = prefill_mbu
|
34 |
return result_dict
|
35 |
return wrapper
|
36 |
|
@@ -42,11 +35,8 @@ def aggregation_decorator(func):
|
|
42 |
aggregation_list["end_to_end_time"] = mean
|
43 |
aggregation_list["prefilling_time"] = mean
|
44 |
aggregation_list["decoding_throughput"] = mean
|
45 |
-
aggregation_list["
|
46 |
-
aggregation_list["
|
47 |
-
aggregation_list["prefill_throughput"] = mean
|
48 |
-
aggregation_list["prefill_mfu"] = mean
|
49 |
-
aggregation_list["prefill_mbu"] = mean
|
50 |
return aggregation_list
|
51 |
return wrapper
|
52 |
|
@@ -58,11 +48,8 @@ def higher_is_better_decorator(func):
|
|
58 |
higher_is_better_dict["end_to_end_time"] = False
|
59 |
higher_is_better_dict["prefilling_time"] = False
|
60 |
higher_is_better_dict["decoding_throughput"] = True
|
61 |
-
higher_is_better_dict["
|
62 |
-
higher_is_better_dict["
|
63 |
-
higher_is_better_dict["prefill_throughput"] = True
|
64 |
-
higher_is_better_dict["prefill_mfu"] = True
|
65 |
-
higher_is_better_dict["prefill_mbu"] = True
|
66 |
return higher_is_better_dict
|
67 |
return wrapper
|
68 |
|
|
|
12 |
end_to_end_time = sum([r[1] for r in results]) / len(results)
|
13 |
prefilling_time = sum([r[2] for r in results]) / len(results)
|
14 |
decoding_throughput = sum([r[3] for r in results]) / len(results)
|
15 |
+
mfu = sum([r[4] for r in results]) / len(results)
|
16 |
+
mbu = sum([r[5] for r in results]) / len(results)
|
|
|
|
|
|
|
|
|
17 |
|
18 |
# print(f"end_to_end_time: {end_to_end_time}, prefilling_time: {prefilling_time}, decoding_throughput: {decoding_throughput}")
|
19 |
|
|
|
22 |
result_dict["end_to_end_time"] = end_to_end_time
|
23 |
result_dict["prefilling_time"] = prefilling_time
|
24 |
result_dict["decoding_throughput"] = decoding_throughput
|
25 |
+
result_dict["mfu"] = mfu
|
26 |
+
result_dict["mbu"] = mbu
|
|
|
|
|
|
|
27 |
return result_dict
|
28 |
return wrapper
|
29 |
|
|
|
35 |
aggregation_list["end_to_end_time"] = mean
|
36 |
aggregation_list["prefilling_time"] = mean
|
37 |
aggregation_list["decoding_throughput"] = mean
|
38 |
+
aggregation_list["mfu"] = mean
|
39 |
+
aggregation_list["mbu"] = mean
|
|
|
|
|
|
|
40 |
return aggregation_list
|
41 |
return wrapper
|
42 |
|
|
|
48 |
higher_is_better_dict["end_to_end_time"] = False
|
49 |
higher_is_better_dict["prefilling_time"] = False
|
50 |
higher_is_better_dict["decoding_throughput"] = True
|
51 |
+
higher_is_better_dict["mfu"] = True
|
52 |
+
higher_is_better_dict["mbu"] = True
|
|
|
|
|
|
|
53 |
return higher_is_better_dict
|
54 |
return wrapper
|
55 |
|
src/display/about.py
CHANGED
@@ -18,13 +18,10 @@ Columns and Metrics:
|
|
18 |
- Method: The MOE LLMs inference framework.
|
19 |
- E2E(s): Average End to End generation time in seconds.
|
20 |
- PRE(s): Prefilling Time of input prompt in seconds.
|
21 |
-
-
|
22 |
-
-
|
23 |
-
-
|
24 |
-
-
|
25 |
-
- Prefill S-MBU(%): Sparse Model Bandwidth Utilization for Prefilling.
|
26 |
-
- Prefill S-MFU(%): Sparse Model FLOPs Utilization for Prefilling.
|
27 |
-
- Precision: The precision of used model.
|
28 |
|
29 |
"""
|
30 |
|
|
|
18 |
- Method: The MOE LLMs inference framework.
|
19 |
- E2E(s): Average End to End generation time in seconds.
|
20 |
- PRE(s): Prefilling Time of input prompt in seconds.
|
21 |
+
- T/s: Tokens throughout per second.
|
22 |
+
- S-MBU(%): Sparse Model Bandwidth Utilization.
|
23 |
+
- S-MFU(%): Sparse Model FLOPs Utilization.
|
24 |
+
- Precision: The precison of used model.
|
|
|
|
|
|
|
25 |
|
26 |
"""
|
27 |
|
src/display/utils.py
CHANGED
@@ -9,32 +9,25 @@ def fields(raw_class):
|
|
9 |
|
10 |
E2Es = "E2E(s)" #"End-to-end time (s)"
|
11 |
PREs = "PRE(s)" #"Prefilling time (s)"
|
12 |
-
TS = "
|
13 |
-
PTS = "Prefill T/s" #Prefill throughput (tok/s)
|
14 |
InFrame = "Method" #"Inference framework"
|
15 |
MULTIPLE_CHOICEs = ["mmlu"]
|
16 |
|
17 |
-
|
18 |
GPU_TEMP = 'Temp(C)'
|
19 |
GPU_Power = 'Power(W)'
|
20 |
GPU_Mem = 'Mem(G)'
|
21 |
GPU_Name = "GPU"
|
22 |
GPU_Util = 'Util(%)'
|
23 |
-
|
24 |
-
|
25 |
-
PSMFU = 'Prefill S-MFU(%)'
|
26 |
-
PSMBU = 'Prefill S-MBU(%)'
|
27 |
BATCH_SIZE = 'bs'
|
28 |
PRECISION = "Precision"
|
29 |
system_metrics_to_name_map = {
|
30 |
"end_to_end_time": f"{E2Es}",
|
31 |
"prefilling_time": f"{PREs}",
|
32 |
"decoding_throughput": f"{TS}",
|
33 |
-
"
|
34 |
-
"
|
35 |
-
"prefill_throughput": f"{PTS}",
|
36 |
-
"prefill_mfu": f"{PSMFU}",
|
37 |
-
"prefill_mbu": f"{PSMBU}",
|
38 |
}
|
39 |
|
40 |
gpu_metrics_to_name_map = {
|
@@ -85,11 +78,10 @@ class Tasks(Enum):
|
|
85 |
|
86 |
# # XXX include me back at some point
|
87 |
# selfcheck = Task("selfcheckgpt", "max-selfcheckgpt", "SelfCheckGPT")
|
88 |
-
|
89 |
gsm8k = Task("gsm8k_custom", "em", "GSM8K") #GSM8K/EM (5-shot)
|
90 |
# gsm8k_cot = Task("gsm8k_cot", "em", "GSM8K COT") #GSM8K COT/EM (5-shot)
|
91 |
arena_hard = Task("arena_hard", "score", "Arena Hard") #Arena Hard/Score
|
92 |
-
mmlu = Task("mmlu", "acc", "MMLU") #MMLU/Acc (5-shot)
|
93 |
|
94 |
|
95 |
# These classes are for user facing column names,
|
@@ -125,18 +117,12 @@ for task in Tasks:
|
|
125 |
# auto_eval_column_dict.append([f"{task.name}_gpu_mem", ColumnContent, ColumnContent(f"{task.value.col_name} {GPU_Mem}", "number", True, hidden=True)])
|
126 |
auto_eval_column_dict.append([f"{task.name}_gpu", ColumnContent, ColumnContent(f"{task.value.col_name} {GPU_Name}", "str", True, hidden=True)])
|
127 |
# auto_eval_column_dict.append([f"{task.name}_gpu_util", ColumnContent, ColumnContent(f"{task.value.col_name} {GPU_Util}", "number", True, hidden=True)])
|
128 |
-
auto_eval_column_dict.append([f"{task.name}_prefilling_time", ColumnContent, ColumnContent(f"{task.value.col_name} {PREs}", "number", False, hidden=True)])
|
129 |
if task.value.benchmark in MULTIPLE_CHOICEs:
|
130 |
continue
|
|
|
131 |
auto_eval_column_dict.append([f"{task.name}_decoding_throughput", ColumnContent, ColumnContent(f"{task.value.col_name} {TS}", "number", True, hidden=True)])
|
132 |
-
|
133 |
-
|
134 |
-
auto_eval_column_dict.append([f"{task.name}_decoding_mbu", ColumnContent, ColumnContent(f"{task.value.col_name} {DSMBU}", "number", True, hidden=True)])
|
135 |
-
auto_eval_column_dict.append([f"{task.name}_decoding_mfu", ColumnContent, ColumnContent(f"{task.value.col_name} {DSMFU}", "number", True, hidden=True)])
|
136 |
-
auto_eval_column_dict.append([f"{task.name}_prefill_throughput", ColumnContent, ColumnContent(f"{task.value.col_name} {PTS}", "number", True, hidden=True)])
|
137 |
-
auto_eval_column_dict.append([f"{task.name}_prefill_mbu", ColumnContent, ColumnContent(f"{task.value.col_name} {PSMBU}", "number", True, hidden=True)])
|
138 |
-
auto_eval_column_dict.append([f"{task.name}_prefill_mfu", ColumnContent, ColumnContent(f"{task.value.col_name} {PSMFU}", "number", True, hidden=True)])
|
139 |
-
|
140 |
|
141 |
|
142 |
# Model information
|
@@ -201,9 +187,8 @@ class InferenceFramework(Enum):
|
|
201 |
# MoE_Infinity = ModelDetails("moe-infinity")
|
202 |
HF_Chat = ModelDetails("hf-chat")
|
203 |
VLLM = ModelDetails("vllm_moe")
|
204 |
-
VLLM_FIX = ModelDetails("vllm_moe_fixbs")
|
205 |
TRTLLM = ModelDetails("tensorrt_llm")
|
206 |
-
|
207 |
Unknown = ModelDetails("?")
|
208 |
|
209 |
def to_str(self):
|
@@ -221,13 +206,10 @@ class InferenceFramework(Enum):
|
|
221 |
return InferenceFramework.VLLM
|
222 |
if inference_framework in ["vllm_moe_fixbs"]:
|
223 |
return InferenceFramework.VLLM_FIX
|
224 |
-
if inference_framework in ["sglang"]:
|
225 |
-
return InferenceFramework.SGLANG
|
226 |
return InferenceFramework.Unknown
|
227 |
|
228 |
class GPUType(Enum):
|
229 |
A100_sxm = ModelDetails("NVIDIA-A100-SXM4-80GB")
|
230 |
-
A100_sxm4 = ModelDetails("NVIDIA-A100-SMX4-80GB")
|
231 |
A100_pcie = ModelDetails("NVIDIA-A100-PCIe-80GB")
|
232 |
Unknown = ModelDetails("?")
|
233 |
|
|
|
9 |
|
10 |
E2Es = "E2E(s)" #"End-to-end time (s)"
|
11 |
PREs = "PRE(s)" #"Prefilling time (s)"
|
12 |
+
TS = "T/s" #Decoding throughput (tok/s)
|
|
|
13 |
InFrame = "Method" #"Inference framework"
|
14 |
MULTIPLE_CHOICEs = ["mmlu"]
|
15 |
|
|
|
16 |
GPU_TEMP = 'Temp(C)'
|
17 |
GPU_Power = 'Power(W)'
|
18 |
GPU_Mem = 'Mem(G)'
|
19 |
GPU_Name = "GPU"
|
20 |
GPU_Util = 'Util(%)'
|
21 |
+
MFU = 'S-MFU(%)'
|
22 |
+
MBU = 'S-MBU(%)'
|
|
|
|
|
23 |
BATCH_SIZE = 'bs'
|
24 |
PRECISION = "Precision"
|
25 |
system_metrics_to_name_map = {
|
26 |
"end_to_end_time": f"{E2Es}",
|
27 |
"prefilling_time": f"{PREs}",
|
28 |
"decoding_throughput": f"{TS}",
|
29 |
+
"mfu": f"{MFU}",
|
30 |
+
"mbu": f"{MBU}"
|
|
|
|
|
|
|
31 |
}
|
32 |
|
33 |
gpu_metrics_to_name_map = {
|
|
|
78 |
|
79 |
# # XXX include me back at some point
|
80 |
# selfcheck = Task("selfcheckgpt", "max-selfcheckgpt", "SelfCheckGPT")
|
81 |
+
mmlu = Task("mmlu", "acc", "MMLU") #MMLU/Acc (5-shot)
|
82 |
gsm8k = Task("gsm8k_custom", "em", "GSM8K") #GSM8K/EM (5-shot)
|
83 |
# gsm8k_cot = Task("gsm8k_cot", "em", "GSM8K COT") #GSM8K COT/EM (5-shot)
|
84 |
arena_hard = Task("arena_hard", "score", "Arena Hard") #Arena Hard/Score
|
|
|
85 |
|
86 |
|
87 |
# These classes are for user facing column names,
|
|
|
117 |
# auto_eval_column_dict.append([f"{task.name}_gpu_mem", ColumnContent, ColumnContent(f"{task.value.col_name} {GPU_Mem}", "number", True, hidden=True)])
|
118 |
auto_eval_column_dict.append([f"{task.name}_gpu", ColumnContent, ColumnContent(f"{task.value.col_name} {GPU_Name}", "str", True, hidden=True)])
|
119 |
# auto_eval_column_dict.append([f"{task.name}_gpu_util", ColumnContent, ColumnContent(f"{task.value.col_name} {GPU_Util}", "number", True, hidden=True)])
|
|
|
120 |
if task.value.benchmark in MULTIPLE_CHOICEs:
|
121 |
continue
|
122 |
+
auto_eval_column_dict.append([f"{task.name}_prefilling_time", ColumnContent, ColumnContent(f"{task.value.col_name} {PREs}", "number", False, hidden=True)])
|
123 |
auto_eval_column_dict.append([f"{task.name}_decoding_throughput", ColumnContent, ColumnContent(f"{task.value.col_name} {TS}", "number", True, hidden=True)])
|
124 |
+
auto_eval_column_dict.append([f"{task.name}_mbu", ColumnContent, ColumnContent(f"{task.value.col_name} {MBU}", "number", True, hidden=True)])
|
125 |
+
auto_eval_column_dict.append([f"{task.name}_mfu", ColumnContent, ColumnContent(f"{task.value.col_name} {MFU}", "number", True, hidden=True)])
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
|
127 |
|
128 |
# Model information
|
|
|
187 |
# MoE_Infinity = ModelDetails("moe-infinity")
|
188 |
HF_Chat = ModelDetails("hf-chat")
|
189 |
VLLM = ModelDetails("vllm_moe")
|
|
|
190 |
TRTLLM = ModelDetails("tensorrt_llm")
|
191 |
+
VLLM_FIX = ModelDetails("vllm_moe_fixbs")
|
192 |
Unknown = ModelDetails("?")
|
193 |
|
194 |
def to_str(self):
|
|
|
206 |
return InferenceFramework.VLLM
|
207 |
if inference_framework in ["vllm_moe_fixbs"]:
|
208 |
return InferenceFramework.VLLM_FIX
|
|
|
|
|
209 |
return InferenceFramework.Unknown
|
210 |
|
211 |
class GPUType(Enum):
|
212 |
A100_sxm = ModelDetails("NVIDIA-A100-SXM4-80GB")
|
|
|
213 |
A100_pcie = ModelDetails("NVIDIA-A100-PCIe-80GB")
|
214 |
Unknown = ModelDetails("?")
|
215 |
|
src/populate.py
CHANGED
@@ -75,7 +75,7 @@ def get_leaderboard_df(
|
|
75 |
df[col] = np.nan
|
76 |
|
77 |
if not df.empty:
|
78 |
-
df = df.
|
79 |
|
80 |
# filter out if any of the benchmarks have not been produced
|
81 |
# df = df[has_no_nan_values(df, benchmark_cols)]
|
|
|
75 |
df[col] = np.nan
|
76 |
|
77 |
if not df.empty:
|
78 |
+
df = df.round(decimals=2)
|
79 |
|
80 |
# filter out if any of the benchmarks have not been produced
|
81 |
# df = df[has_no_nan_values(df, benchmark_cols)]
|
src/utils.py
CHANGED
@@ -4,8 +4,6 @@ import subprocess
|
|
4 |
import re
|
5 |
import os
|
6 |
import GPUtil
|
7 |
-
from transformers import AutoConfig
|
8 |
-
from typing import List
|
9 |
|
10 |
try:
|
11 |
from src.display.utils import GPU_TEMP, GPU_Mem, GPU_Power, GPU_Util, GPU_Name
|
@@ -14,63 +12,44 @@ except:
|
|
14 |
from display.utils import GPU_TEMP, GPU_Mem, GPU_Power, GPU_Util, GPU_Name
|
15 |
|
16 |
MEM_BW_DICT ={
|
17 |
-
"NVIDIA-A100-PCIe-80GB":
|
18 |
-
"NVIDIA-A100-
|
19 |
-
"NVIDIA-H100-PCIe-80GB":
|
20 |
-
"NVIDIA-RTX-A5000-24GB":
|
21 |
-
"NVIDIA-RTX-A6000-48GB": 768e9,
|
22 |
}
|
23 |
|
24 |
PEAK_FLOPS_DICT = {
|
25 |
"float32":{
|
26 |
"NVIDIA-A100-PCIe-80GB": 312e12,
|
27 |
-
"NVIDIA-A100-
|
28 |
"NVIDIA-H100-PCIe-80GB": 756e12,
|
29 |
-
"NVIDIA-RTX-A5000-24GB": 222.2e12
|
30 |
-
"NVIDIA-RTX-A6000-48GB": 309.7e12
|
31 |
},
|
32 |
"float16":{
|
33 |
"NVIDIA-A100-PCIe-80GB": 624e12,
|
34 |
-
"NVIDIA-A100-
|
35 |
"NVIDIA-H100-PCIe-80GB": 1513e12,
|
36 |
-
"NVIDIA-RTX-A5000-24GB":
|
37 |
-
"NVIDIA-RTX-A6000-48GB": 309.7e12
|
38 |
},
|
39 |
"bfloat16":{
|
40 |
"NVIDIA-A100-PCIe-80GB": 624e12,
|
41 |
-
"NVIDIA-A100-
|
42 |
"NVIDIA-H100-PCIe-80GB": 1513e12,
|
43 |
-
"NVIDIA-RTX-A5000-24GB":
|
44 |
-
"NVIDIA-RTX-A6000-48GB": 309.7e12
|
45 |
},
|
46 |
-
"
|
47 |
"NVIDIA-A100-PCIe-80GB": 1248e12,
|
48 |
-
"NVIDIA-A100-
|
49 |
"NVIDIA-H100-PCIe-80GB": 3026e12,
|
50 |
-
"NVIDIA-RTX-A5000-24GB":
|
51 |
-
"NVIDIA-RTX-A6000-48GB": 309.7e12
|
52 |
},
|
53 |
-
"
|
54 |
-
"NVIDIA-A100-PCIe-80GB":
|
55 |
-
"NVIDIA-A100-
|
56 |
-
"NVIDIA-H100-PCIe-80GB":
|
57 |
-
"NVIDIA-RTX-A5000-24GB":
|
58 |
-
"NVIDIA-RTX-A6000-48GB": 0
|
59 |
-
},
|
60 |
-
"fp4": {
|
61 |
-
"NVIDIA-A100-PCIe-80GB": 1248e12,
|
62 |
-
"NVIDIA-A100-SXM4-80GB": 1248e12,
|
63 |
-
"NVIDIA-H100-PCIe-80GB": 3026e12,
|
64 |
-
"NVIDIA-RTX-A5000-24GB": 0,
|
65 |
-
"NVIDIA-RTX-A6000-48GB": 0
|
66 |
-
},
|
67 |
-
"int4": {
|
68 |
-
"NVIDIA-A100-PCIe-80GB": 1248e12,
|
69 |
-
"NVIDIA-A100-SXM4-80GB": 1248e12,
|
70 |
-
"NVIDIA-H100-PCIe-80GB": 3026e12,
|
71 |
-
"NVIDIA-RTX-A5000-24GB": 222.2e12,
|
72 |
-
"NVIDIA-RTX-A6000-48GB": 309.7e12
|
73 |
}
|
|
|
74 |
}
|
75 |
|
76 |
def my_snapshot_download(repo_id, revision, local_dir, repo_type, max_workers):
|
@@ -118,7 +97,7 @@ def parse_nvidia_smi():
|
|
118 |
# print(f"gpu_indices: {gpu_indices}")
|
119 |
gpu_stats = []
|
120 |
|
121 |
-
gpu_info_pattern = re.compile(r'(\d+)C\s+P\d+\s+(\d+)W\
|
122 |
# gpu_name_pattern = re.compile(r'NVIDIA\s+([\w\s]+\d+(?:\s*GB)?)')
|
123 |
gpu_name_pattern = re.compile(r'NVIDIA\s+(RTX\s+)?([A-Z0-9]+)')
|
124 |
|
@@ -216,790 +195,17 @@ def get_peak_bw(gpu_name):
|
|
216 |
def get_peak_flops(gpu_name, precision):
|
217 |
return PEAK_FLOPS_DICT[precision][gpu_name]
|
218 |
|
219 |
-
def
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
}
|
229 |
-
kvs = []
|
230 |
-
true_kvs = []
|
231 |
-
attn_score = []
|
232 |
-
|
233 |
-
# Calculate KV sizes
|
234 |
-
per_token_kv_size = 2 * n_layers * d_head * n_kv_heads # Default calculation
|
235 |
-
|
236 |
-
if "DeepSeek" in model_name:
|
237 |
-
if hasattr(hf_config, "kv_lora_rank") and hasattr(hf_config, "qk_rope_head_dim"):
|
238 |
-
per_token_kv_size = n_layers * (hf_config.kv_lora_rank + hf_config.qk_rope_head_dim)
|
239 |
-
|
240 |
-
# Process each output
|
241 |
-
for x in outputs:
|
242 |
-
output_len = len(x.outputs[0].token_ids)
|
243 |
-
context_prefill_size = len(x.prompt_token_ids)
|
244 |
-
|
245 |
-
# Calculate attention scores
|
246 |
-
if "DeepSeek" in model_name and hasattr(hf_config, "qk_rope_head_dim") and hasattr(hf_config, "qk_nope_head_dim") and hasattr(hf_config, "v_head_dim"):
|
247 |
-
q_head_dim = hf_config.qk_rope_head_dim + hf_config.qk_nope_head_dim
|
248 |
-
origin_per_token_k_state_size = n_layers * n_attn_heads * q_head_dim
|
249 |
-
origin_per_token_v_state_size = n_layers * n_attn_heads * hf_config.v_head_dim
|
250 |
-
attention_score = context_prefill_size * origin_per_token_k_state_size + (output_len - 1) * origin_per_token_k_state_size / 2
|
251 |
-
attention_score += context_prefill_size * origin_per_token_v_state_size + (output_len - 1) * origin_per_token_v_state_size / 2
|
252 |
-
attention_score = attention_score / 1e12
|
253 |
-
else:
|
254 |
-
origin_per_token_kv_states_size = n_layers * n_attn_heads * d_head
|
255 |
-
attention_score = context_prefill_size * origin_per_token_kv_states_size + (output_len - 1) * origin_per_token_kv_states_size / 2
|
256 |
-
attention_score = attention_score * 2 / 1e12
|
257 |
-
|
258 |
-
# Store attention scores and KV sizes
|
259 |
-
attn_score.append(attention_score)
|
260 |
-
kv_size = context_prefill_size * per_token_kv_size + (output_len - 1) * per_token_kv_size / 2
|
261 |
-
kv_size = kv_size / 1e12
|
262 |
-
true_kv = (context_prefill_size * per_token_kv_size + output_len * per_token_kv_size) / 1e12 * 1e3
|
263 |
-
kvs.append(kv_size)
|
264 |
-
true_kvs.append(true_kv)
|
265 |
-
|
266 |
-
# Calculate aggregate values
|
267 |
-
kv_size = sum(kvs)
|
268 |
-
true_kv_size = sum(true_kvs) * 1e3
|
269 |
-
attention_score = sum(attn_score) / len(attn_score)
|
270 |
-
|
271 |
-
# Calculate attention size per token
|
272 |
-
if "DeepSeek" in model_name and hasattr(hf_config, "qk_rope_head_dim") and hasattr(hf_config, "qk_nope_head_dim") and hasattr(hf_config, "v_head_dim") and hasattr(hf_config, "kv_lora_rank"):
|
273 |
-
q_head_dim = hf_config.qk_rope_head_dim + hf_config.qk_nope_head_dim
|
274 |
-
if not hasattr(hf_config, "q_lora_rank") or not hf_config.q_lora_rank:
|
275 |
-
attention_size_per_token = (d_model * n_attn_heads * q_head_dim) + \
|
276 |
-
(d_model * (hf_config.kv_lora_rank + hf_config.qk_rope_head_dim)) + \
|
277 |
-
(hf_config.kv_lora_rank * n_attn_heads * (q_head_dim - hf_config.qk_rope_head_dim + hf_config.v_head_dim)) + \
|
278 |
-
(hf_config.v_head_dim * n_attn_heads * d_model)
|
279 |
-
attention_size_per_token = attention_size_per_token / 1e12
|
280 |
-
else:
|
281 |
-
attention_size_per_token = (d_model * hf_config.q_lora_rank) + \
|
282 |
-
(hf_config.q_lora_rank * n_attn_heads * q_head_dim) + \
|
283 |
-
(d_model * (hf_config.kv_lora_rank + hf_config.qk_rope_head_dim)) + \
|
284 |
-
(hf_config.kv_lora_rank * n_attn_heads * (q_head_dim - hf_config.qk_rope_head_dim + hf_config.v_head_dim)) + \
|
285 |
-
(hf_config.v_head_dim * n_attn_heads * d_model)
|
286 |
-
attention_size_per_token = attention_size_per_token / 1e12
|
287 |
-
else:
|
288 |
-
attention_size_per_token = d_model * (n_attn_heads * d_head + n_kv_heads * d_head * 2) + n_attn_heads * d_head * d_model
|
289 |
-
attention_size_per_token = attention_size_per_token / 1e12
|
290 |
-
|
291 |
-
# Calculate expert sizes
|
292 |
-
expert_size = d_ff * 3 * d_model / 1e12
|
293 |
-
shared_experts_size_total = 0
|
294 |
-
deepseek_dense_ffn_size = 0
|
295 |
-
deepseek_sparse_layer_num = 0
|
296 |
-
|
297 |
-
if "Qwen" in model_name and hasattr(hf_config, "moe_intermediate_size") and hasattr(hf_config, "shared_expert_intermediate_size"):
|
298 |
-
d_ff = hf_config.moe_intermediate_size
|
299 |
-
d_ff_share = hf_config.shared_expert_intermediate_size
|
300 |
-
shared_experts_size = d_ff_share * 3 * d_model
|
301 |
-
expert_size = d_ff * 3 * d_model
|
302 |
-
shared_experts_size_total = shared_experts_size / 1e12
|
303 |
-
expert_size = expert_size / 1e12
|
304 |
-
elif "Qwen3" in model_name and hasattr(hf_config, "moe_intermediate_size"):
|
305 |
-
d_ff = hf_config.moe_intermediate_size
|
306 |
-
expert_size = d_ff * 3 * d_model
|
307 |
-
expert_size = expert_size / 1e12
|
308 |
-
elif "DeepSeek" in model_name and hasattr(hf_config, "moe_intermediate_size") and hasattr(hf_config, "intermediate_size") and hasattr(hf_config, "first_k_dense_replace"):
|
309 |
-
d_ff = hf_config.moe_intermediate_size
|
310 |
-
d_ff_dense = hf_config.intermediate_size
|
311 |
-
deepseek_num_dense_layer = hf_config.first_k_dense_replace
|
312 |
-
shared_experts_size = d_ff * 3 * d_model
|
313 |
-
expert_size = d_ff * 3 * d_model
|
314 |
-
shared_experts = 2
|
315 |
-
shared_experts_size_total = shared_experts_size * shared_experts / 1e12
|
316 |
-
expert_size = expert_size / 1e12
|
317 |
-
deepseek_sparse_layer_num = n_layers - deepseek_num_dense_layer
|
318 |
-
deepseek_dense_ffn_size = d_ff_dense * 3 * d_model / 1e12
|
319 |
-
|
320 |
-
# Calculate S-MBU and S-MFU
|
321 |
-
if "Qwen" in model_name and not "Qwen3" in model_name:
|
322 |
-
smbu = ((n_layers*(avg_activated_experts * expert_size + shared_experts_size_total + attention_size_per_token) +
|
323 |
-
kv_size) * precision/ (batch_size / decoding_tp)) / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
324 |
-
smfu = (n_layers * (attention_size_per_token + n_experts_per_tok * expert_size + shared_experts_size_total) + attention_score) \
|
325 |
-
* 2 * decoding_tp / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
326 |
-
elif "Qwen3" in model_name:
|
327 |
-
smbu = ((n_layers * (avg_activated_experts * expert_size + attention_size_per_token) +
|
328 |
-
kv_size) * precision/ (batch_size / decoding_tp)) / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
329 |
-
smfu = (n_layers * (attention_size_per_token + n_experts_per_tok * expert_size) + attention_score) \
|
330 |
-
* 2 * decoding_tp / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
331 |
-
elif "DeepSeek" in model_name:
|
332 |
-
smbu = ((n_layers * attention_size_per_token + deepseek_sparse_layer_num * \
|
333 |
-
(avg_activated_experts * expert_size + shared_experts_size_total) + \
|
334 |
-
deepseek_num_dense_layer * deepseek_dense_ffn_size + \
|
335 |
-
kv_size) * precision/ (batch_size / decoding_tp)) / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
336 |
-
smfu = (n_layers * attention_size_per_token + deepseek_sparse_layer_num * \
|
337 |
-
(n_experts_per_tok * expert_size + shared_experts_size_total) + \
|
338 |
-
deepseek_num_dense_layer * deepseek_dense_ffn_size + attention_score) \
|
339 |
-
* 2 * decoding_tp / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
340 |
-
else:
|
341 |
-
smbu = ((n_layers*(avg_activated_experts * expert_size + attention_size_per_token) +
|
342 |
-
kv_size) * precision/ (batch_size / decoding_tp) ) / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
343 |
-
smfu = (n_layers * (attention_size_per_token + n_experts_per_tok * expert_size) + attention_score) \
|
344 |
-
* 2 * decoding_tp / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
345 |
-
|
346 |
-
return {
|
347 |
-
'smbu': smbu,
|
348 |
-
'smfu': smfu,
|
349 |
-
'kv_size': true_kv_size,
|
350 |
-
'decoding_throughput': decoding_tp
|
351 |
-
}
|
352 |
-
|
353 |
-
def _calculate_batch_metrics_sglang(outputs, decoding_tp, n_layers, d_model,
|
354 |
-
n_attn_heads, d_head, n_kv_heads, n_experts_per_tok, d_ff,
|
355 |
-
avg_activated_experts, hf_config, num_gpus, model_name,
|
356 |
-
used_dtype, batch_size, precision, ttft=None, prefill_tp=None):
|
357 |
-
"""Calculate metrics for a batch of outputs"""
|
358 |
-
# Initialize hardware specs and output lists
|
359 |
-
hardware_specs = _get_hardware_specs(used_dtype)
|
360 |
-
output_data = _extract_output_data(outputs)
|
361 |
-
|
362 |
-
# Calculate model-specific sizes
|
363 |
-
per_token_kv_size = _calculate_kv_size(model_name, hf_config, n_layers, d_head, n_kv_heads)
|
364 |
-
attention_size_per_token = _calculate_attention_size(model_name, hf_config, d_model, n_attn_heads, d_head, n_kv_heads)
|
365 |
-
expert_config = _calculate_expert_config(model_name, hf_config, d_ff, d_model, n_layers)
|
366 |
-
|
367 |
-
# Process outputs and calculate metrics
|
368 |
-
metrics_data = _process_outputs(output_data, per_token_kv_size, attention_size_per_token,
|
369 |
-
model_name, hf_config, n_layers, n_attn_heads, d_head)
|
370 |
-
|
371 |
-
# Calculate throughput metrics
|
372 |
-
if ttft is None or prefill_tp is None:
|
373 |
-
ttft, prefill_tp = _calculate_throughput_metrics(batch_size, output_data['prefill_lengths'],
|
374 |
-
output_data['max_duration'])
|
375 |
-
|
376 |
-
|
377 |
-
# Calculate S-MBU and S-MFU
|
378 |
-
smbu_smfu_metrics = _calculate_smbu_smfu(model_name, n_layers, attention_size_per_token,
|
379 |
-
expert_config, avg_activated_experts, metrics_data,
|
380 |
-
hardware_specs, num_gpus, precision, ttft, prefill_tp,
|
381 |
-
batch_size, decoding_tp)
|
382 |
-
|
383 |
-
return {
|
384 |
-
'prefill_smbu': smbu_smfu_metrics['prefill_smbu'],
|
385 |
-
'prefill_smfu': smbu_smfu_metrics['prefill_smfu'],
|
386 |
-
'decoding_smbu': smbu_smfu_metrics['decoding_smbu'],
|
387 |
-
'decoding_smfu': smbu_smfu_metrics['decoding_smfu'],
|
388 |
-
'kv_size': metrics_data['true_kv_size'],
|
389 |
-
'decoding_throughput': decoding_tp,
|
390 |
-
'prefill_tp': prefill_tp,
|
391 |
-
'ttft': ttft
|
392 |
-
}
|
393 |
-
|
394 |
-
|
395 |
-
def _get_hardware_specs(used_dtype):
|
396 |
-
"""Get hardware specifications"""
|
397 |
-
gpu_type = get_gpu_details()
|
398 |
-
return {
|
399 |
-
"peak_bandwidth_tb": get_peak_bw(gpu_type) / 1e12,
|
400 |
-
"peak_flops_tf": get_peak_flops(gpu_type, precision=used_dtype) / 1e12,
|
401 |
-
}
|
402 |
-
|
403 |
-
|
404 |
-
def _extract_output_data(outputs):
|
405 |
-
"""Extract relevant data from outputs"""
|
406 |
-
prefill_lengths = []
|
407 |
-
output_lengths = []
|
408 |
-
max_duration = 0.0
|
409 |
-
|
410 |
-
for x in outputs:
|
411 |
-
output_lengths.append(x['meta_info']['completion_tokens'])
|
412 |
-
prefill_lengths.append(x['meta_info']['prompt_tokens'])
|
413 |
-
max_duration = max(max_duration, x['meta_info']['e2e_latency'])
|
414 |
-
|
415 |
-
return {
|
416 |
-
'prefill_lengths': prefill_lengths,
|
417 |
-
'output_lengths': output_lengths,
|
418 |
-
'max_duration': max_duration
|
419 |
-
}
|
420 |
-
|
421 |
-
|
422 |
-
def _calculate_kv_size(model_name, hf_config, n_layers, d_head, n_kv_heads):
|
423 |
-
"""Calculate per-token KV size based on model type"""
|
424 |
-
if "DeepSeek" in model_name and hasattr(hf_config, "kv_lora_rank") and hasattr(hf_config, "qk_rope_head_dim"):
|
425 |
-
return n_layers * (hf_config.kv_lora_rank + hf_config.qk_rope_head_dim)
|
426 |
-
return 2 * n_layers * d_head * n_kv_heads
|
427 |
-
|
428 |
-
|
429 |
-
def _calculate_attention_size(model_name, hf_config, d_model, n_attn_heads, d_head, n_kv_heads):
|
430 |
-
"""Calculate attention size per token based on model type"""
|
431 |
-
if ("DeepSeek" in model_name and
|
432 |
-
hasattr(hf_config, "qk_rope_head_dim") and
|
433 |
-
hasattr(hf_config, "qk_nope_head_dim") and
|
434 |
-
hasattr(hf_config, "v_head_dim") and
|
435 |
-
hasattr(hf_config, "kv_lora_rank")):
|
436 |
-
|
437 |
-
return _calculate_deepseek_attention_size(hf_config, d_model, n_attn_heads)
|
438 |
-
|
439 |
-
return (d_model * (n_attn_heads * d_head + n_kv_heads * d_head * 2) +
|
440 |
-
n_attn_heads * d_head * d_model) / 1e12
|
441 |
-
|
442 |
-
|
443 |
-
def _calculate_deepseek_attention_size(hf_config, d_model, n_attn_heads):
|
444 |
-
"""Calculate DeepSeek-specific attention size"""
|
445 |
-
q_head_dim = hf_config.qk_rope_head_dim + hf_config.qk_nope_head_dim
|
446 |
-
|
447 |
-
base_size = ((d_model * (hf_config.kv_lora_rank + hf_config.qk_rope_head_dim)) +
|
448 |
-
(hf_config.kv_lora_rank * n_attn_heads *
|
449 |
-
(q_head_dim - hf_config.qk_rope_head_dim + hf_config.v_head_dim)) +
|
450 |
-
(hf_config.v_head_dim * n_attn_heads * d_model))
|
451 |
-
|
452 |
-
if hasattr(hf_config, "q_lora_rank") and hf_config.q_lora_rank:
|
453 |
-
q_size = (d_model * hf_config.q_lora_rank +
|
454 |
-
hf_config.q_lora_rank * n_attn_heads * q_head_dim)
|
455 |
-
else:
|
456 |
-
q_size = d_model * n_attn_heads * q_head_dim
|
457 |
-
|
458 |
-
return (base_size + q_size) / 1e12
|
459 |
-
|
460 |
-
|
461 |
-
def _calculate_expert_config(model_name, hf_config, d_ff, d_model, n_layers):
|
462 |
-
"""Calculate expert configuration based on model type"""
|
463 |
-
config = {
|
464 |
-
'expert_size': d_ff * 3 * d_model / 1e12,
|
465 |
-
'shared_experts_size_total': 0,
|
466 |
-
'deepseek_dense_ffn_size': 0,
|
467 |
-
'deepseek_sparse_layer_num': 0,
|
468 |
-
'deepseek_num_dense_layer': 0
|
469 |
-
}
|
470 |
-
|
471 |
-
if "Qwen" in model_name and not "Qwen3" in model_name:
|
472 |
-
config.update(_get_qwen_expert_config(hf_config, d_model))
|
473 |
-
elif "Qwen3" in model_name:
|
474 |
-
config.update(_get_qwen3_expert_config(hf_config, d_model))
|
475 |
-
elif "DeepSeek" in model_name:
|
476 |
-
config.update(_get_deepseek_expert_config(hf_config, d_model, n_layers))
|
477 |
-
|
478 |
-
return config
|
479 |
-
|
480 |
-
|
481 |
-
def _get_qwen_expert_config(hf_config, d_model):
|
482 |
-
"""Get Qwen-specific expert configuration"""
|
483 |
-
if (hasattr(hf_config, "moe_intermediate_size") and
|
484 |
-
hasattr(hf_config, "shared_expert_intermediate_size")):
|
485 |
-
|
486 |
-
return {
|
487 |
-
'expert_size': hf_config.moe_intermediate_size * 3 * d_model / 1e12,
|
488 |
-
'shared_experts_size_total': hf_config.shared_expert_intermediate_size * 3 * d_model / 1e12
|
489 |
-
}
|
490 |
-
return {}
|
491 |
-
|
492 |
-
|
493 |
-
def _get_qwen3_expert_config(hf_config, d_model):
|
494 |
-
"""Get Qwen3-specific expert configuration"""
|
495 |
-
if hasattr(hf_config, "moe_intermediate_size"):
|
496 |
-
return {
|
497 |
-
'expert_size': hf_config.moe_intermediate_size * 3 * d_model / 1e12
|
498 |
-
}
|
499 |
-
return {}
|
500 |
-
|
501 |
-
|
502 |
-
def _get_deepseek_expert_config(hf_config, d_model, n_layers):
|
503 |
-
"""Get DeepSeek-specific expert configuration"""
|
504 |
-
if (hasattr(hf_config, "moe_intermediate_size") and
|
505 |
-
hasattr(hf_config, "intermediate_size") and
|
506 |
-
hasattr(hf_config, "first_k_dense_replace")):
|
507 |
-
|
508 |
-
deepseek_num_dense_layer = hf_config.first_k_dense_replace
|
509 |
-
return {
|
510 |
-
'expert_size': hf_config.moe_intermediate_size * 3 * d_model / 1e12,
|
511 |
-
'shared_experts_size_total': hf_config.moe_intermediate_size * 3 * d_model * 2 / 1e12,
|
512 |
-
'deepseek_dense_ffn_size': hf_config.intermediate_size * 3 * d_model / 1e12,
|
513 |
-
'deepseek_sparse_layer_num': n_layers - deepseek_num_dense_layer,
|
514 |
-
'deepseek_num_dense_layer': deepseek_num_dense_layer
|
515 |
-
}
|
516 |
-
return {}
|
517 |
-
|
518 |
-
|
519 |
-
def _process_outputs(output_data, per_token_kv_size, attention_size_per_token,
|
520 |
-
model_name, hf_config, n_layers, n_attn_heads, d_head):
|
521 |
-
"""Process outputs to calculate KV sizes and attention scores"""
|
522 |
-
kvs = []
|
523 |
-
true_kvs = []
|
524 |
-
attn_scores = []
|
525 |
-
|
526 |
-
for prefill_len, output_len in zip(output_data['prefill_lengths'], output_data['output_lengths']):
|
527 |
-
# Calculate attention score
|
528 |
-
attn_score = _calculate_attention_score(model_name, hf_config, prefill_len, output_len,
|
529 |
-
n_layers, n_attn_heads, d_head)
|
530 |
-
attn_scores.append(attn_score)
|
531 |
-
|
532 |
-
# Calculate KV sizes
|
533 |
-
kv_size = (prefill_len * per_token_kv_size + (output_len - 1) * per_token_kv_size / 2) / 1e12
|
534 |
-
true_kv = (prefill_len * per_token_kv_size + output_len * per_token_kv_size) / 1e9
|
535 |
-
|
536 |
-
kvs.append(kv_size)
|
537 |
-
true_kvs.append(true_kv)
|
538 |
-
|
539 |
-
return {
|
540 |
-
'kv_size': sum(kvs),
|
541 |
-
'true_kv_size': sum(true_kvs) * 1e3,
|
542 |
-
'attention_score': sum(attn_scores) / len(attn_scores)
|
543 |
-
}
|
544 |
-
|
545 |
-
|
546 |
-
def _calculate_attention_score(model_name, hf_config, prefill_len, output_len,
|
547 |
-
n_layers, n_attn_heads, d_head):
|
548 |
-
"""Calculate attention score for a single output"""
|
549 |
-
if ("DeepSeek" in model_name and
|
550 |
-
hasattr(hf_config, "qk_rope_head_dim") and
|
551 |
-
hasattr(hf_config, "qk_nope_head_dim") and
|
552 |
-
hasattr(hf_config, "v_head_dim")):
|
553 |
-
|
554 |
-
q_head_dim = hf_config.qk_rope_head_dim + hf_config.qk_nope_head_dim
|
555 |
-
k_size = n_layers * n_attn_heads * q_head_dim
|
556 |
-
v_size = n_layers * n_attn_heads * hf_config.v_head_dim
|
557 |
-
|
558 |
-
score = (prefill_len * k_size + (output_len - 1) * k_size / 2 +
|
559 |
-
prefill_len * v_size + (output_len - 1) * v_size / 2)
|
560 |
-
else:
|
561 |
-
kv_size = n_layers * n_attn_heads * d_head
|
562 |
-
score = (prefill_len * kv_size + (output_len - 1) * kv_size / 2) * 2
|
563 |
-
|
564 |
-
return score / 1e12
|
565 |
-
|
566 |
-
|
567 |
-
def _calculate_throughput_metrics(batch_size, prefill_lengths, max_duration):
|
568 |
-
"""Calculate throughput metrics"""
|
569 |
-
total_prefill = sum(prefill_lengths)
|
570 |
-
prefill_tp = total_prefill / (max_duration)
|
571 |
-
ttft = max_duration / batch_size
|
572 |
-
return ttft, prefill_tp
|
573 |
-
|
574 |
-
|
575 |
-
def _calculate_smbu_smfu(model_name, n_layers, attention_size_per_token, expert_config,
|
576 |
-
avg_activated_experts, metrics_data, hardware_specs, num_gpus,
|
577 |
-
precision, ttft, prefill_tp, batch_size, decoding_tp):
|
578 |
-
"""Calculate S-MBU and S-MFU metrics"""
|
579 |
-
prefill_activation = avg_activated_experts[1]
|
580 |
-
decode_steps_activation = avg_activated_experts[2:]
|
581 |
-
|
582 |
-
# Calculate prefill metrics
|
583 |
-
prefill_smbu, prefill_smfu = _calculate_prefill_metrics(
|
584 |
-
model_name, n_layers, attention_size_per_token, expert_config,
|
585 |
-
prefill_activation, metrics_data['attention_score'], hardware_specs,
|
586 |
-
num_gpus, precision, ttft, prefill_tp
|
587 |
-
)
|
588 |
-
|
589 |
-
# Calculate decoding metrics
|
590 |
-
decoding_smbu, decoding_smfu = _calculate_decoding_metrics(
|
591 |
-
model_name, n_layers, attention_size_per_token, expert_config,
|
592 |
-
decode_steps_activation, metrics_data, hardware_specs,
|
593 |
-
num_gpus, precision, batch_size, decoding_tp
|
594 |
-
)
|
595 |
-
|
596 |
-
return {
|
597 |
-
'prefill_smbu': prefill_smbu,
|
598 |
-
'prefill_smfu': prefill_smfu,
|
599 |
-
'decoding_smbu': decoding_smbu,
|
600 |
-
'decoding_smfu': decoding_smfu
|
601 |
-
}
|
602 |
-
|
603 |
-
|
604 |
-
def _calculate_prefill_metrics(model_name, n_layers, attention_size_per_token, expert_config,
|
605 |
-
prefill_activation, attention_score, hardware_specs,
|
606 |
-
num_gpus, precision, ttft, prefill_tp):
|
607 |
-
"""Calculate prefill S-MBU and S-MFU"""
|
608 |
-
model_calculators = {
|
609 |
-
'Qwen': _calculate_qwen_prefill,
|
610 |
-
'Qwen3': _calculate_qwen3_prefill,
|
611 |
-
'DeepSeek': _calculate_deepseek_prefill
|
612 |
-
}
|
613 |
-
|
614 |
-
for model_type, calculator in model_calculators.items():
|
615 |
-
if model_type in model_name and (model_type != 'Qwen' or 'Qwen3' not in model_name):
|
616 |
-
return calculator(n_layers, attention_size_per_token, expert_config,
|
617 |
-
prefill_activation, attention_score, hardware_specs,
|
618 |
-
num_gpus, precision, ttft, prefill_tp)
|
619 |
-
|
620 |
-
# Default case
|
621 |
-
return _calculate_default_prefill(n_layers, attention_size_per_token, expert_config,
|
622 |
-
prefill_activation, attention_score, hardware_specs,
|
623 |
-
num_gpus, precision, ttft, prefill_tp)
|
624 |
-
|
625 |
-
|
626 |
-
def _calculate_decoding_metrics(model_name, n_layers, attention_size_per_token, expert_config,
|
627 |
-
decode_steps_activation, metrics_data, hardware_specs,
|
628 |
-
num_gpus, precision, batch_size, decoding_tp):
|
629 |
-
"""Calculate decoding S-MBU and S-MFU"""
|
630 |
-
decoding_smbus = []
|
631 |
-
|
632 |
-
for activation in decode_steps_activation:
|
633 |
-
if "Qwen" in model_name and "Qwen3" not in model_name:
|
634 |
-
smbu, smfu = _calculate_qwen_decoding(n_layers, attention_size_per_token, expert_config,
|
635 |
-
activation, metrics_data, hardware_specs, num_gpus,
|
636 |
-
precision, batch_size, decoding_tp)
|
637 |
-
elif "Qwen3" in model_name:
|
638 |
-
smbu, smfu = _calculate_qwen3_decoding(n_layers, attention_size_per_token, expert_config,
|
639 |
-
activation, metrics_data, hardware_specs, num_gpus,
|
640 |
-
precision, batch_size, decoding_tp)
|
641 |
-
elif "DeepSeek" in model_name:
|
642 |
-
smbu, smfu = _calculate_deepseek_decoding(n_layers, attention_size_per_token, expert_config,
|
643 |
-
activation, metrics_data, hardware_specs, num_gpus,
|
644 |
-
precision, batch_size, decoding_tp)
|
645 |
-
else:
|
646 |
-
smbu, smfu = _calculate_default_decoding(n_layers, attention_size_per_token, expert_config,
|
647 |
-
activation, metrics_data, hardware_specs, num_gpus,
|
648 |
-
precision, batch_size, decoding_tp)
|
649 |
-
decoding_smbus.append(smbu)
|
650 |
-
|
651 |
-
return sum(decoding_smbus) / len(decoding_smbus), smfu
|
652 |
-
|
653 |
-
|
654 |
-
# Helper functions for specific model calculations
|
655 |
-
def _calculate_qwen_prefill(n_layers, attention_size_per_token, expert_config, prefill_activation,
|
656 |
-
attention_score, hardware_specs, num_gpus, precision, ttft, prefill_tp):
|
657 |
-
smbu_numerator = (n_layers * (prefill_activation * expert_config['expert_size'] +
|
658 |
-
expert_config['shared_experts_size_total'] +
|
659 |
-
attention_size_per_token)) * precision / ttft
|
660 |
-
smbu = smbu_numerator / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
661 |
-
|
662 |
-
smfu_numerator = (n_layers * (attention_size_per_token + expert_config['expert_size'] +
|
663 |
-
expert_config['shared_experts_size_total']) + attention_score) * 2 * prefill_tp
|
664 |
-
smfu = smfu_numerator / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
665 |
-
|
666 |
-
return smbu, smfu
|
667 |
-
|
668 |
-
|
669 |
-
def _calculate_qwen3_prefill(n_layers, attention_size_per_token, expert_config, prefill_activation,
|
670 |
-
attention_score, hardware_specs, num_gpus, precision, ttft, prefill_tp):
|
671 |
-
smbu_numerator = (n_layers * (prefill_activation * expert_config['expert_size'] +
|
672 |
-
attention_size_per_token)) * precision / ttft
|
673 |
-
smbu = smbu_numerator / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
674 |
-
|
675 |
-
smfu_numerator = (n_layers * (attention_size_per_token + expert_config['expert_size']) +
|
676 |
-
attention_score) * 2 * prefill_tp
|
677 |
-
smfu = smfu_numerator / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
678 |
-
|
679 |
-
return smbu, smfu
|
680 |
-
|
681 |
-
|
682 |
-
def _calculate_deepseek_prefill(n_layers, attention_size_per_token, expert_config, prefill_activation,
|
683 |
-
attention_score, hardware_specs, num_gpus, precision, ttft, prefill_tp):
|
684 |
-
smbu_numerator = ((n_layers * attention_size_per_token +
|
685 |
-
expert_config['deepseek_sparse_layer_num'] *
|
686 |
-
(prefill_activation * expert_config['expert_size'] +
|
687 |
-
expert_config['shared_experts_size_total']) +
|
688 |
-
expert_config['deepseek_num_dense_layer'] *
|
689 |
-
expert_config['deepseek_dense_ffn_size']) * precision / ttft)
|
690 |
-
smbu = smbu_numerator / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
691 |
-
|
692 |
-
smfu_numerator = ((n_layers * attention_size_per_token +
|
693 |
-
expert_config['deepseek_sparse_layer_num'] *
|
694 |
-
(expert_config['expert_size'] + expert_config['shared_experts_size_total']) +
|
695 |
-
expert_config['deepseek_num_dense_layer'] *
|
696 |
-
expert_config['deepseek_dense_ffn_size'] + attention_score) * 2 * prefill_tp)
|
697 |
-
smfu = smfu_numerator / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
698 |
-
|
699 |
-
return smbu, smfu
|
700 |
-
|
701 |
-
|
702 |
-
def _calculate_default_prefill(n_layers, attention_size_per_token, expert_config, prefill_activation,
|
703 |
-
attention_score, hardware_specs, num_gpus, precision, ttft, prefill_tp):
|
704 |
-
# Default implementation
|
705 |
-
smbu_numerator = (n_layers * (prefill_activation * expert_config['expert_size'] +
|
706 |
-
attention_size_per_token)) * precision / ttft
|
707 |
-
smbu = smbu_numerator / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
708 |
-
|
709 |
-
smfu_numerator = (n_layers * (attention_size_per_token + expert_config['expert_size']) +
|
710 |
-
attention_score) * 2 * prefill_tp
|
711 |
-
smfu = smfu_numerator / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
712 |
-
|
713 |
-
return smbu, smfu
|
714 |
-
|
715 |
-
|
716 |
-
def _calculate_qwen_decoding(n_layers, attention_size_per_token, expert_config, activation,
|
717 |
-
metrics_data, hardware_specs, num_gpus, precision, batch_size, decoding_tp):
|
718 |
-
smbu_numerator = ((n_layers * (activation * expert_config['expert_size'] +
|
719 |
-
expert_config['shared_experts_size_total'] +
|
720 |
-
attention_size_per_token) +
|
721 |
-
metrics_data['kv_size']) * precision / (batch_size / decoding_tp))
|
722 |
-
smbu = smbu_numerator / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
723 |
-
|
724 |
-
smfu_numerator = ((n_layers * (attention_size_per_token + expert_config['expert_size'] +
|
725 |
-
expert_config['shared_experts_size_total']) +
|
726 |
-
metrics_data['attention_score']) * 2 * decoding_tp)
|
727 |
-
smfu = smfu_numerator / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
728 |
-
|
729 |
-
return smbu, smfu
|
730 |
-
|
731 |
-
|
732 |
-
def _calculate_qwen3_decoding(n_layers, attention_size_per_token, expert_config, activation,
|
733 |
-
metrics_data, hardware_specs, num_gpus, precision, batch_size, decoding_tp):
|
734 |
-
smbu_numerator = ((n_layers * (activation * expert_config['expert_size'] +
|
735 |
-
attention_size_per_token) +
|
736 |
-
metrics_data['kv_size']) * precision / (batch_size / decoding_tp))
|
737 |
-
smbu = smbu_numerator / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
738 |
-
|
739 |
-
smfu_numerator = ((n_layers * (attention_size_per_token + expert_config['expert_size']) +
|
740 |
-
metrics_data['attention_score']) * 2 * decoding_tp)
|
741 |
-
smfu = smfu_numerator / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
742 |
-
|
743 |
-
return smbu, smfu
|
744 |
-
|
745 |
-
|
746 |
-
def _calculate_deepseek_decoding(n_layers, attention_size_per_token, expert_config, activation,
|
747 |
-
metrics_data, hardware_specs, num_gpus, precision, batch_size, decoding_tp):
|
748 |
-
smbu_numerator = ((n_layers * attention_size_per_token +
|
749 |
-
expert_config['deepseek_sparse_layer_num'] *
|
750 |
-
(activation * expert_config['expert_size'] +
|
751 |
-
expert_config['shared_experts_size_total']) +
|
752 |
-
expert_config['deepseek_num_dense_layer'] *
|
753 |
-
expert_config['deepseek_dense_ffn_size'] +
|
754 |
-
metrics_data['kv_size']) * precision / (batch_size / decoding_tp))
|
755 |
-
smbu = smbu_numerator / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
756 |
-
|
757 |
-
smfu_numerator = ((n_layers * attention_size_per_token +
|
758 |
-
expert_config['deepseek_sparse_layer_num'] *
|
759 |
-
(expert_config['expert_size'] + expert_config['shared_experts_size_total']) +
|
760 |
-
expert_config['deepseek_num_dense_layer'] *
|
761 |
-
expert_config['deepseek_dense_ffn_size'] +
|
762 |
-
metrics_data['attention_score']) * 2 * decoding_tp)
|
763 |
-
smfu = smfu_numerator / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
764 |
-
|
765 |
-
return smbu, smfu
|
766 |
-
|
767 |
-
|
768 |
-
def _calculate_default_decoding(n_layers, attention_size_per_token, expert_config, activation,
|
769 |
-
metrics_data, hardware_specs, num_gpus, precision, batch_size, decoding_tp):
|
770 |
-
smbu_numerator = ((n_layers * (activation * expert_config['expert_size'] +
|
771 |
-
attention_size_per_token) +
|
772 |
-
metrics_data['kv_size']) * precision / (batch_size / decoding_tp))
|
773 |
-
smbu = smbu_numerator / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
774 |
-
|
775 |
-
smfu_numerator = ((n_layers * (attention_size_per_token + expert_config['expert_size']) +
|
776 |
-
metrics_data['attention_score']) * 2 * decoding_tp)
|
777 |
-
smfu = smfu_numerator / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
778 |
-
|
779 |
-
return smbu, smfu
|
780 |
-
|
781 |
-
def _calculate_batch_metrics_hflm(output_len, context_prefill_size, decoding_tp, n_layers, d_model,
|
782 |
-
n_attn_heads, d_head, n_kv_heads, n_experts_per_tok, d_ff,
|
783 |
-
avg_activated_experts, hf_config, num_gpus, model_name,
|
784 |
-
used_dtype, batch_size, precision):
|
785 |
-
"""Calculate metrics for a batch of outputs"""
|
786 |
-
gpu_type = get_gpu_details()
|
787 |
-
hardware_specs = {
|
788 |
-
"peak_bandwidth_tb": get_peak_bw(gpu_type) / 1e12,
|
789 |
-
"peak_flops_tf": get_peak_flops(gpu_type, precision=used_dtype) / 1e12,
|
790 |
-
}
|
791 |
-
|
792 |
-
# Calculate KV sizes
|
793 |
-
per_token_kv_size = 2 * n_layers * d_head * n_kv_heads # Default calculation
|
794 |
-
|
795 |
-
if "DeepSeek" in model_name:
|
796 |
-
if hasattr(hf_config, "kv_lora_rank") and hasattr(hf_config, "qk_rope_head_dim"):
|
797 |
-
per_token_kv_size = n_layers * (hf_config.kv_lora_rank + hf_config.qk_rope_head_dim)
|
798 |
-
|
799 |
-
|
800 |
-
# Calculate attention scores
|
801 |
-
if "DeepSeek" in model_name and hasattr(hf_config, "qk_rope_head_dim") and hasattr(hf_config, "qk_nope_head_dim") and hasattr(hf_config, "v_head_dim"):
|
802 |
-
q_head_dim = hf_config.qk_rope_head_dim + hf_config.qk_nope_head_dim
|
803 |
-
origin_per_token_k_state_size = n_layers * n_attn_heads * q_head_dim
|
804 |
-
origin_per_token_v_state_size = n_layers * n_attn_heads * hf_config.v_head_dim
|
805 |
-
attention_score = context_prefill_size * origin_per_token_k_state_size + (output_len - 1) * origin_per_token_k_state_size / 2
|
806 |
-
attention_score += context_prefill_size * origin_per_token_v_state_size + (output_len - 1) * origin_per_token_v_state_size / 2
|
807 |
-
attention_score = attention_score / 1e12
|
808 |
else:
|
809 |
-
|
810 |
-
attention_score = context_prefill_size * origin_per_token_kv_states_size + (output_len - 1) * origin_per_token_kv_states_size / 2
|
811 |
-
attention_score = attention_score * 2 / 1e12
|
812 |
-
|
813 |
-
# Store attention scores and KV sizes
|
814 |
-
kv_size = context_prefill_size * per_token_kv_size + (output_len - 1) * per_token_kv_size / 2
|
815 |
-
kv_size = kv_size / 1e12
|
816 |
-
true_kv = (context_prefill_size * per_token_kv_size + output_len * per_token_kv_size) / 1e12 * 1e3
|
817 |
-
|
818 |
-
# Calculate aggregate values
|
819 |
-
kv_size = kv_size * batch_size
|
820 |
-
true_kv_size = true_kv * batch_size * 1e3
|
821 |
-
# Calculate attention size per token
|
822 |
-
if "DeepSeek" in model_name and hasattr(hf_config, "qk_rope_head_dim") and hasattr(hf_config, "qk_nope_head_dim") and hasattr(hf_config, "v_head_dim") and hasattr(hf_config, "kv_lora_rank"):
|
823 |
-
q_head_dim = hf_config.qk_rope_head_dim + hf_config.qk_nope_head_dim
|
824 |
-
if not hasattr(hf_config, "q_lora_rank") or not hf_config.q_lora_rank:
|
825 |
-
attention_size_per_token = (d_model * n_attn_heads * q_head_dim) + \
|
826 |
-
(d_model * (hf_config.kv_lora_rank + hf_config.qk_rope_head_dim)) + \
|
827 |
-
(hf_config.kv_lora_rank * n_attn_heads * (q_head_dim - hf_config.qk_rope_head_dim + hf_config.v_head_dim)) + \
|
828 |
-
(hf_config.v_head_dim * n_attn_heads * d_model)
|
829 |
-
attention_size_per_token = attention_size_per_token / 1e12
|
830 |
-
else:
|
831 |
-
attention_size_per_token = (d_model * hf_config.q_lora_rank) + \
|
832 |
-
(hf_config.q_lora_rank * n_attn_heads * q_head_dim) + \
|
833 |
-
(d_model * (hf_config.kv_lora_rank + hf_config.qk_rope_head_dim)) + \
|
834 |
-
(hf_config.kv_lora_rank * n_attn_heads * (q_head_dim - hf_config.qk_rope_head_dim + hf_config.v_head_dim)) + \
|
835 |
-
(hf_config.v_head_dim * n_attn_heads * d_model)
|
836 |
-
attention_size_per_token = attention_size_per_token / 1e12
|
837 |
-
else:
|
838 |
-
attention_size_per_token = d_model * (n_attn_heads * d_head + n_kv_heads * d_head * 2) + n_attn_heads * d_head * d_model
|
839 |
-
attention_size_per_token = attention_size_per_token / 1e12
|
840 |
-
|
841 |
-
# Calculate expert sizes
|
842 |
-
expert_size = d_ff * 3 * d_model / 1e12
|
843 |
-
shared_experts_size_total = 0
|
844 |
-
deepseek_dense_ffn_size = 0
|
845 |
-
deepseek_sparse_layer_num = 0
|
846 |
-
|
847 |
-
if "Qwen" in model_name and hasattr(hf_config, "moe_intermediate_size") and hasattr(hf_config, "shared_expert_intermediate_size"):
|
848 |
-
d_ff = hf_config.moe_intermediate_size
|
849 |
-
d_ff_share = hf_config.shared_expert_intermediate_size
|
850 |
-
shared_experts_size = d_ff_share * 3 * d_model
|
851 |
-
expert_size = d_ff * 3 * d_model
|
852 |
-
shared_experts_size_total = shared_experts_size / 1e12
|
853 |
-
expert_size = expert_size / 1e12
|
854 |
-
elif "Qwen3" in model_name and hasattr(hf_config, "moe_intermediate_size"):
|
855 |
-
d_ff = hf_config.moe_intermediate_size
|
856 |
-
expert_size = d_ff * 3 * d_model
|
857 |
-
expert_size = expert_size / 1e12
|
858 |
-
elif "DeepSeek" in model_name and hasattr(hf_config, "moe_intermediate_size") and hasattr(hf_config, "intermediate_size") and hasattr(hf_config, "first_k_dense_replace"):
|
859 |
-
d_ff = hf_config.moe_intermediate_size
|
860 |
-
d_ff_dense = hf_config.intermediate_size
|
861 |
-
deepseek_num_dense_layer = hf_config.first_k_dense_replace
|
862 |
-
shared_experts_size = d_ff * 3 * d_model
|
863 |
-
expert_size = d_ff * 3 * d_model
|
864 |
-
shared_experts = 2
|
865 |
-
shared_experts_size_total = shared_experts_size * shared_experts / 1e12
|
866 |
-
expert_size = expert_size / 1e12
|
867 |
-
deepseek_sparse_layer_num = n_layers - deepseek_num_dense_layer
|
868 |
-
deepseek_dense_ffn_size = d_ff_dense * 3 * d_model / 1e12
|
869 |
-
|
870 |
-
# Calculate S-MBU and S-MFU
|
871 |
-
if "Qwen" in model_name:
|
872 |
-
smbu = ((n_layers*(avg_activated_experts * expert_size + shared_experts_size_total + attention_size_per_token) +
|
873 |
-
kv_size) * precision/(batch_size / decoding_tp) ) / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
874 |
-
smfu = (n_layers * (attention_size_per_token + n_experts_per_tok * expert_size + shared_experts_size_total) + attention_score) \
|
875 |
-
* 2 * decoding_tp / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
876 |
-
elif "Qwen3" in model_name:
|
877 |
-
smbu = ((n_layers * (avg_activated_experts * expert_size + attention_size_per_token) +
|
878 |
-
kv_size) * precision/(batch_size / decoding_tp) ) / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
879 |
-
smfu = (n_layers * (attention_size_per_token + n_experts_per_tok * expert_size) + attention_score) \
|
880 |
-
* 2 * decoding_tp / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
881 |
-
elif "DeepSeek" in model_name:
|
882 |
-
smbu = ((n_layers * attention_size_per_token + deepseek_sparse_layer_num * \
|
883 |
-
(avg_activated_experts * expert_size + shared_experts_size_total) + \
|
884 |
-
deepseek_num_dense_layer * deepseek_dense_ffn_size + \
|
885 |
-
kv_size) * precision/(batch_size / decoding_tp) ) / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
886 |
-
smfu = (n_layers * attention_size_per_token + deepseek_sparse_layer_num * \
|
887 |
-
(n_experts_per_tok * expert_size + shared_experts_size_total) + \
|
888 |
-
deepseek_num_dense_layer * deepseek_dense_ffn_size + attention_score) \
|
889 |
-
* 2 * decoding_tp / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
890 |
-
else:
|
891 |
-
smbu = ((n_layers*(avg_activated_experts * expert_size + attention_size_per_token) +
|
892 |
-
kv_size) * precision/(batch_size / decoding_tp) ) / (num_gpus * hardware_specs['peak_bandwidth_tb'])
|
893 |
-
smfu = (n_layers * (attention_size_per_token + n_experts_per_tok * expert_size) + attention_score) \
|
894 |
-
* 2 * decoding_tp / (num_gpus * hardware_specs['peak_flops_tf'] / 2)
|
895 |
-
|
896 |
-
return {
|
897 |
-
'smbu': smbu,
|
898 |
-
'smfu': smfu,
|
899 |
-
'kv_size': true_kv_size,
|
900 |
-
'decoding_throughput': decoding_tp,
|
901 |
-
'ttft': 0
|
902 |
-
}
|
903 |
-
class ModelInfoRetriever:
|
904 |
-
def __init__(self, model_name: str, precision: str = 'float16'):
|
905 |
-
if precision not in ['float32', 'float16', 'bfloat16', 'int8', 'int4', 'awq', 'gptq', 'fp8', 'fp4']:
|
906 |
-
raise ValueError("Precision must be one of ['float32', 'float16', 'bfloat16', 'int8', 'int4', 'awq', 'gptq', 'fp8', 'fp4']")
|
907 |
-
self.model_name = model_name
|
908 |
-
self.precision = precision
|
909 |
-
self.config = AutoConfig.from_pretrained(model_name, trust_remote_code=True)
|
910 |
-
self.model_type = self.config.model_type
|
911 |
-
|
912 |
-
def get_model_precision_bits(self):
|
913 |
-
"""Returns bit width used by the given quantization format."""
|
914 |
-
if self.precision == 'float32':
|
915 |
-
return 4
|
916 |
-
if self.precision in ['float16', 'bfloat16']:
|
917 |
-
return 2
|
918 |
-
if self.precision in ['int8', 'fp8']:
|
919 |
-
return 1
|
920 |
-
if self.precision in ['int4', 'fp4', 'gptq', 'awq']:
|
921 |
-
return 0.5
|
922 |
-
raise ValueError(f"Unsupported precision: {self.precision}")
|
923 |
-
|
924 |
-
def get_attention_info(self):
|
925 |
-
"""Returns attention-related info"""
|
926 |
-
return {
|
927 |
-
'num_attention_heads': getattr(self.config, "num_attention_heads", None),
|
928 |
-
'num_key_value_heads': getattr(self.config, "num_key_value_heads", getattr(self.config, "num_kv_heads", None)),
|
929 |
-
'head_dim': getattr(self.config, "head_dim", getattr(self.config, "hidden_size", None) // getattr(self.config, "num_attention_heads", 1))
|
930 |
-
}
|
931 |
-
|
932 |
-
def get_rope_info(self):
|
933 |
-
"""Returns RoPE (rotary embedding) info if available"""
|
934 |
-
if hasattr(self.config, "rope_scaling"):
|
935 |
-
return {
|
936 |
-
"type": self.config.rope_scaling.get("type"),
|
937 |
-
"factor": self.config.rope_scaling.get("factor")
|
938 |
-
}
|
939 |
-
elif hasattr(self.config, "use_alibi"):
|
940 |
-
return {"type": "alibi", "enabled": self.config.use_alibi}
|
941 |
-
else:
|
942 |
-
return {"type": "none"}
|
943 |
-
|
944 |
-
def get_moe_info(self, d_model=None):
|
945 |
-
"""Returns MoE configuration such as number of experts and FFN dim"""
|
946 |
-
if d_model is None:
|
947 |
-
d_model = getattr(self.config, "hidden_size", None)
|
948 |
-
|
949 |
-
num_experts = (
|
950 |
-
getattr(self.config, "num_local_experts", None) or
|
951 |
-
getattr(self.config, "num_experts", None) or
|
952 |
-
getattr(self.config, "n_routed_experts", None) or
|
953 |
-
getattr(getattr(self.config, "ffn_config", {}), "moe_num_experts", None) or
|
954 |
-
1
|
955 |
-
)
|
956 |
-
n_experts_per_tok = (
|
957 |
-
getattr(self.config, "num_experts_per_tok", None) or
|
958 |
-
getattr(self.config, "num_selected_experts", None) or
|
959 |
-
getattr(getattr(self.config, "ffn_config", {}), "moe_top_k", None) or
|
960 |
-
1
|
961 |
-
)
|
962 |
-
d_ff = (
|
963 |
-
getattr(self.config, "ffn_dim", None) or
|
964 |
-
getattr(self.config, "intermediate_size", None) or
|
965 |
-
getattr(self.config, "d_ff", None) or
|
966 |
-
(d_model * getattr(self.config, "ff_ratio", 4)) or
|
967 |
-
getattr(getattr(self.config, "ffn_config", {}), "ffn_hidden_size", None) or
|
968 |
-
(4 * d_model)
|
969 |
-
)
|
970 |
-
|
971 |
-
return {
|
972 |
-
"num_experts": num_experts,
|
973 |
-
"experts_per_token": n_experts_per_tok,
|
974 |
-
"ffn_dim": d_ff
|
975 |
-
}
|
976 |
-
|
977 |
-
def get_architecture_info(self):
|
978 |
-
"""Returns model-wide architecture info"""
|
979 |
-
return {
|
980 |
-
"model_type": self.model_type,
|
981 |
-
"hidden_size": getattr(self.config, "hidden_size", None),
|
982 |
-
"num_hidden_layers": getattr(self.config, "num_hidden_layers", None),
|
983 |
-
"max_position_embeddings": getattr(self.config, "max_position_embeddings", None),
|
984 |
-
"vocab_size": getattr(self.config, "vocab_size", None),
|
985 |
-
"architectures": getattr(self.config, "architectures", []),
|
986 |
-
}
|
987 |
-
|
988 |
-
def summarize(self):
|
989 |
-
"""Aggregate all extracted info in a dictionary"""
|
990 |
-
d_model = getattr(self.config, "hidden_size", None)
|
991 |
-
return {
|
992 |
-
"model_name": self.model_name,
|
993 |
-
"model_type": self.model_type,
|
994 |
-
"precision_bits": self.get_model_precision_bits(),
|
995 |
-
"architecture": self.get_architecture_info(),
|
996 |
-
"attention": self.get_attention_info(),
|
997 |
-
"rope": self.get_rope_info(),
|
998 |
-
"moe": self.get_moe_info(d_model)
|
999 |
-
}
|
1000 |
-
|
1001 |
-
|
1002 |
|
1003 |
-
|
1004 |
-
|
1005 |
-
# print(get_gpu_details())
|
|
|
4 |
import re
|
5 |
import os
|
6 |
import GPUtil
|
|
|
|
|
7 |
|
8 |
try:
|
9 |
from src.display.utils import GPU_TEMP, GPU_Mem, GPU_Power, GPU_Util, GPU_Name
|
|
|
12 |
from display.utils import GPU_TEMP, GPU_Mem, GPU_Power, GPU_Util, GPU_Name
|
13 |
|
14 |
MEM_BW_DICT ={
|
15 |
+
"NVIDIA-A100-PCIe-80GB": 1935,
|
16 |
+
"NVIDIA-A100-SXM-80GB": 2039,
|
17 |
+
"NVIDIA-H100-PCIe-80GB": 2039,
|
18 |
+
"NVIDIA-RTX-A5000-24GB": 768
|
|
|
19 |
}
|
20 |
|
21 |
PEAK_FLOPS_DICT = {
|
22 |
"float32":{
|
23 |
"NVIDIA-A100-PCIe-80GB": 312e12,
|
24 |
+
"NVIDIA-A100-SXM-80GB": 312e12,
|
25 |
"NVIDIA-H100-PCIe-80GB": 756e12,
|
26 |
+
"NVIDIA-RTX-A5000-24GB": 222.2e12
|
|
|
27 |
},
|
28 |
"float16":{
|
29 |
"NVIDIA-A100-PCIe-80GB": 624e12,
|
30 |
+
"NVIDIA-A100-SXM-80GB": 624e12,
|
31 |
"NVIDIA-H100-PCIe-80GB": 1513e12,
|
32 |
+
"NVIDIA-RTX-A5000-24GB": 444.4e12
|
|
|
33 |
},
|
34 |
"bfloat16":{
|
35 |
"NVIDIA-A100-PCIe-80GB": 624e12,
|
36 |
+
"NVIDIA-A100-SXM-80GB": 624e12,
|
37 |
"NVIDIA-H100-PCIe-80GB": 1513e12,
|
38 |
+
"NVIDIA-RTX-A5000-24GB": 444.4e12
|
|
|
39 |
},
|
40 |
+
"8bit":{
|
41 |
"NVIDIA-A100-PCIe-80GB": 1248e12,
|
42 |
+
"NVIDIA-A100-SXM-80GB": 1248e12,
|
43 |
"NVIDIA-H100-PCIe-80GB": 3026e12,
|
44 |
+
"NVIDIA-RTX-A5000-24GB": 889e12
|
|
|
45 |
},
|
46 |
+
"4bit": {
|
47 |
+
"NVIDIA-A100-PCIe-80GB": 2496e12,
|
48 |
+
"NVIDIA-A100-SXM-80GB": 2496e12,
|
49 |
+
"NVIDIA-H100-PCIe-80GB": 6052e12,
|
50 |
+
"NVIDIA-RTX-A5000-24GB": 1778e12
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
}
|
52 |
+
|
53 |
}
|
54 |
|
55 |
def my_snapshot_download(repo_id, revision, local_dir, repo_type, max_workers):
|
|
|
97 |
# print(f"gpu_indices: {gpu_indices}")
|
98 |
gpu_stats = []
|
99 |
|
100 |
+
gpu_info_pattern = re.compile(r'(\d+)C\s+P\d+\s+(\d+)W / \d+W\s+\|\s+(\d+)MiB / \d+MiB\s+\|\s+(\d+)%')
|
101 |
# gpu_name_pattern = re.compile(r'NVIDIA\s+([\w\s]+\d+(?:\s*GB)?)')
|
102 |
gpu_name_pattern = re.compile(r'NVIDIA\s+(RTX\s+)?([A-Z0-9]+)')
|
103 |
|
|
|
195 |
def get_peak_flops(gpu_name, precision):
|
196 |
return PEAK_FLOPS_DICT[precision][gpu_name]
|
197 |
|
198 |
+
def transfer_precision2bytes(precision):
|
199 |
+
if precision == "float32":
|
200 |
+
return 4
|
201 |
+
elif precision in ["float16", "bfloat16"]:
|
202 |
+
return 2
|
203 |
+
elif precision == "8bit":
|
204 |
+
return 1
|
205 |
+
elif precision == "4bit":
|
206 |
+
return 0.5
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
else:
|
208 |
+
raise ValueError(f"Unsupported precision: {precision}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
|
210 |
+
if __name__ == "__main__":
|
211 |
+
print(analyze_gpu_stats(parse_nvidia_smi()))
|
|