Spaces:
Running
Running
Add support for zero-bubble-1P.
Browse files- README.md +5 -0
- main.py +21 -1
- src/execution_model.py +43 -15
- src/strategies.py +69 -15
- src/visualizer.py +73 -18
README.md
CHANGED
@@ -44,6 +44,11 @@ uv run python main.py strategy=interleave num_devices=4 num_stages=8 num_batches
|
|
44 |
```
|
45 |

|
46 |
|
|
|
|
|
|
|
|
|
|
|
47 |
## Configuration
|
48 |
|
49 |
The default configuration is in `conf/config.yaml`. You can override any parameter on the command line or create configuration groups for different scenarios.
|
|
|
44 |
```
|
45 |

|
46 |
|
47 |
+
Running for ZB-1P strategy:
|
48 |
+
```bash
|
49 |
+
uv run python main.py strategy=zb1p num_devices=4 num_stages=8 num_batches=8
|
50 |
+
```
|
51 |
+
|
52 |
## Configuration
|
53 |
|
54 |
The default configuration is in `conf/config.yaml`. You can override any parameter on the command line or create configuration groups for different scenarios.
|
main.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
from src.execution_model import ScheduleConfig
|
2 |
-
from src.strategies import generate_1f1b_interleave_schedule, generate_1f1b_schedule
|
3 |
from src.visualizer import visualize_pipeline_parallelism_dash
|
4 |
import hydra
|
5 |
from omegaconf import DictConfig, OmegaConf
|
@@ -14,6 +14,8 @@ def main(cfg: DictConfig) -> None:
|
|
14 |
run_1f1b(cfg)
|
15 |
elif cfg.strategy == "interleave":
|
16 |
run_interleave(cfg)
|
|
|
|
|
17 |
else:
|
18 |
raise ValueError(f"Unknown strategy: {cfg.strategy}")
|
19 |
|
@@ -55,5 +57,23 @@ def run_interleave(cfg: DictConfig) -> None:
|
|
55 |
visualize_pipeline_parallelism_dash(schedule, port=cfg.visualization_port)
|
56 |
|
57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
if __name__ == "__main__":
|
59 |
main()
|
|
|
1 |
from src.execution_model import ScheduleConfig
|
2 |
+
from src.strategies import generate_1f1b_interleave_schedule, generate_1f1b_schedule, generate_zero_bubble_1p_schedule
|
3 |
from src.visualizer import visualize_pipeline_parallelism_dash
|
4 |
import hydra
|
5 |
from omegaconf import DictConfig, OmegaConf
|
|
|
14 |
run_1f1b(cfg)
|
15 |
elif cfg.strategy == "interleave":
|
16 |
run_interleave(cfg)
|
17 |
+
elif cfg.strategy == "zb1p":
|
18 |
+
run_zero_bubble_1p(cfg)
|
19 |
else:
|
20 |
raise ValueError(f"Unknown strategy: {cfg.strategy}")
|
21 |
|
|
|
57 |
visualize_pipeline_parallelism_dash(schedule, port=cfg.visualization_port)
|
58 |
|
59 |
|
60 |
+
def run_zero_bubble_1p(cfg: DictConfig) -> None:
|
61 |
+
"""Run zero bubble 1P pipeline parallelism simulation."""
|
62 |
+
# Convert OmegaConf to dict for op_times if it exists
|
63 |
+
op_times = OmegaConf.to_container(cfg.op_times) if hasattr(cfg, 'op_times') else None
|
64 |
+
|
65 |
+
schedule_config = ScheduleConfig(
|
66 |
+
num_devices=cfg.num_devices,
|
67 |
+
num_stages=cfg.num_stages,
|
68 |
+
num_batches=cfg.num_batches,
|
69 |
+
p2p_latency=cfg.p2p_latency,
|
70 |
+
op_times=op_times,
|
71 |
+
split_backward=True
|
72 |
+
)
|
73 |
+
schedule = generate_zero_bubble_1p_schedule(schedule_config)
|
74 |
+
schedule.execute()
|
75 |
+
visualize_pipeline_parallelism_dash(schedule, port=cfg.visualization_port)
|
76 |
+
|
77 |
+
|
78 |
if __name__ == "__main__":
|
79 |
main()
|
src/execution_model.py
CHANGED
@@ -36,6 +36,7 @@ class ScheduleConfig:
|
|
36 |
num_batches: int,
|
37 |
p2p_latency: float = 0.0,
|
38 |
placement_strategy: str = "standard",
|
|
|
39 |
op_times: Optional[Dict[str, Union[float, Dict[int, float]]]] = None,
|
40 |
):
|
41 |
self.num_devices = num_devices
|
@@ -43,12 +44,20 @@ class ScheduleConfig:
|
|
43 |
self.num_batches = num_batches
|
44 |
self.p2p_latency = p2p_latency
|
45 |
self.placement_strategy = placement_strategy
|
|
|
46 |
|
47 |
# Initialize default operation times
|
48 |
-
self.
|
49 |
-
|
50 |
-
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
# Update with user-provided operation times
|
54 |
if op_times:
|
@@ -119,9 +128,10 @@ class Schedule:
|
|
119 |
|
120 |
self.init_operations()
|
121 |
|
122 |
-
def init_operations(self
|
123 |
-
|
124 |
-
|
|
|
125 |
for batch_id in range(self.config.num_batches):
|
126 |
for stage_id in range(self.config.num_stages):
|
127 |
for op_type in op_types:
|
@@ -142,14 +152,32 @@ class Schedule:
|
|
142 |
self.config.p2p_latency,
|
143 |
)
|
144 |
)
|
145 |
-
|
146 |
-
if op.
|
147 |
-
|
148 |
-
(
|
149 |
-
|
150 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
)
|
152 |
-
)
|
153 |
|
154 |
device_index = self.dev_queues[op.device_id].ops.index(op)
|
155 |
if device_index > 0:
|
@@ -170,7 +198,7 @@ class Schedule:
|
|
170 |
print("-" * 80)
|
171 |
|
172 |
for op in self.dev_queues[dev_id].ops:
|
173 |
-
op_type =
|
174 |
start = f"{op.start_time:.2f}" if op.start_time is not None else "N/A"
|
175 |
end = f"{op.end_time:.2f}" if op.end_time is not None else "N/A"
|
176 |
|
|
|
36 |
num_batches: int,
|
37 |
p2p_latency: float = 0.0,
|
38 |
placement_strategy: str = "standard",
|
39 |
+
split_backward: bool = False,
|
40 |
op_times: Optional[Dict[str, Union[float, Dict[int, float]]]] = None,
|
41 |
):
|
42 |
self.num_devices = num_devices
|
|
|
44 |
self.num_batches = num_batches
|
45 |
self.p2p_latency = p2p_latency
|
46 |
self.placement_strategy = placement_strategy
|
47 |
+
self.split_backward = split_backward
|
48 |
|
49 |
# Initialize default operation times
|
50 |
+
if self.split_backward:
|
51 |
+
self.op_times = {
|
52 |
+
"forward": 1.0,
|
53 |
+
"backward_D": 1.0,
|
54 |
+
"backward_W": 1.0,
|
55 |
+
}
|
56 |
+
else:
|
57 |
+
self.op_times = {
|
58 |
+
"forward": 1.0,
|
59 |
+
"backward": 2.0,
|
60 |
+
}
|
61 |
|
62 |
# Update with user-provided operation times
|
63 |
if op_times:
|
|
|
128 |
|
129 |
self.init_operations()
|
130 |
|
131 |
+
def init_operations(self):
|
132 |
+
op_types = ["forward", "backward"]
|
133 |
+
if self.config.split_backward:
|
134 |
+
op_types = ["forward", "backward_D", "backward_W"]
|
135 |
for batch_id in range(self.config.num_batches):
|
136 |
for stage_id in range(self.config.num_stages):
|
137 |
for op_type in op_types:
|
|
|
152 |
self.config.p2p_latency,
|
153 |
)
|
154 |
)
|
155 |
+
if self.config.split_backward:
|
156 |
+
if op.op_type == "backward_D":
|
157 |
+
if op.stage_id < self.config.num_stages - 1:
|
158 |
+
deps.append(
|
159 |
+
(
|
160 |
+
self.get_op(op.batch_id, op.stage_id + 1, "backward_D"),
|
161 |
+
self.config.p2p_latency,
|
162 |
+
)
|
163 |
+
)
|
164 |
+
elif op.op_type == "backward_W":
|
165 |
+
if op.stage_id < self.config.num_stages - 1:
|
166 |
+
deps.append(
|
167 |
+
(
|
168 |
+
self.get_op(op.batch_id, op.stage_id, "backward_D"),
|
169 |
+
self.config.p2p_latency,
|
170 |
+
)
|
171 |
+
)
|
172 |
+
else:
|
173 |
+
if op.op_type == "backward":
|
174 |
+
if op.stage_id < self.config.num_stages - 1:
|
175 |
+
deps.append(
|
176 |
+
(
|
177 |
+
self.get_op(op.batch_id, op.stage_id + 1, "backward"),
|
178 |
+
self.config.p2p_latency,
|
179 |
+
)
|
180 |
)
|
|
|
181 |
|
182 |
device_index = self.dev_queues[op.device_id].ops.index(op)
|
183 |
if device_index > 0:
|
|
|
198 |
print("-" * 80)
|
199 |
|
200 |
for op in self.dev_queues[dev_id].ops:
|
201 |
+
op_type = op.op_type
|
202 |
start = f"{op.start_time:.2f}" if op.start_time is not None else "N/A"
|
203 |
end = f"{op.end_time:.2f}" if op.end_time is not None else "N/A"
|
204 |
|
src/strategies.py
CHANGED
@@ -5,6 +5,8 @@ from src.execution_model import Schedule, ScheduleConfig
|
|
5 |
def generate_1f1b_schedule(config: ScheduleConfig):
|
6 |
schedule = Schedule(config)
|
7 |
|
|
|
|
|
8 |
for i in range(config.num_devices):
|
9 |
fwd_batch_id = 0
|
10 |
bwd_batch_id = 0
|
@@ -12,30 +14,82 @@ def generate_1f1b_schedule(config: ScheduleConfig):
|
|
12 |
steady_batches = config.num_batches - warmup_batches
|
13 |
|
14 |
for _ in range(warmup_batches):
|
15 |
-
|
16 |
-
schedule.
|
17 |
-
|
18 |
-
)
|
19 |
fwd_batch_id += 1
|
20 |
|
21 |
for _ in range(steady_batches):
|
22 |
-
|
23 |
-
schedule.
|
24 |
-
|
25 |
-
)
|
26 |
fwd_batch_id += 1
|
27 |
-
|
28 |
-
schedule.
|
29 |
-
|
30 |
-
)
|
31 |
bwd_batch_id += 1
|
32 |
|
33 |
for _ in range(cooldown_batches):
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
schedule.dev_queues[i].add_operation(
|
36 |
-
schedule.get_op(
|
37 |
)
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
return schedule
|
41 |
|
|
|
5 |
def generate_1f1b_schedule(config: ScheduleConfig):
|
6 |
schedule = Schedule(config)
|
7 |
|
8 |
+
assert config.num_devices == config.num_stages, "num_devices must be equal to num_stages for 1F1B"
|
9 |
+
|
10 |
for i in range(config.num_devices):
|
11 |
fwd_batch_id = 0
|
12 |
bwd_batch_id = 0
|
|
|
14 |
steady_batches = config.num_batches - warmup_batches
|
15 |
|
16 |
for _ in range(warmup_batches):
|
17 |
+
schedule.dev_queues[i].add_operation(
|
18 |
+
schedule.get_op(fwd_batch_id, i, "forward")
|
19 |
+
)
|
|
|
20 |
fwd_batch_id += 1
|
21 |
|
22 |
for _ in range(steady_batches):
|
23 |
+
schedule.dev_queues[i].add_operation(
|
24 |
+
schedule.get_op(fwd_batch_id, i, "forward")
|
25 |
+
)
|
|
|
26 |
fwd_batch_id += 1
|
27 |
+
schedule.dev_queues[i].add_operation(
|
28 |
+
schedule.get_op(bwd_batch_id, i, "backward")
|
29 |
+
)
|
|
|
30 |
bwd_batch_id += 1
|
31 |
|
32 |
for _ in range(cooldown_batches):
|
33 |
+
schedule.dev_queues[i].add_operation(
|
34 |
+
schedule.get_op(bwd_batch_id, i, "backward")
|
35 |
+
)
|
36 |
+
bwd_batch_id += 1
|
37 |
+
|
38 |
+
return schedule
|
39 |
+
|
40 |
+
|
41 |
+
def generate_zero_bubble_1p_schedule(config: ScheduleConfig):
|
42 |
+
# Create a new schedule with split_backward=True to support backward_D and backward_W operations
|
43 |
+
schedule = Schedule(config)
|
44 |
+
total_batches = config.num_batches
|
45 |
+
assert config.num_devices == config.num_stages, "num_devices must be equal to num_stages for ZB-1P"
|
46 |
+
|
47 |
+
for i in range(config.num_devices):
|
48 |
+
fwd_batch_id = 0
|
49 |
+
bwd_d_batch_id = 0
|
50 |
+
bwd_w_batch_id = 0
|
51 |
+
|
52 |
+
cooldown_batches = warmup_batches = config.num_devices - i - 1
|
53 |
+
steady_batches = total_batches - warmup_batches
|
54 |
+
|
55 |
+
for _ in range(warmup_batches):
|
56 |
+
schedule.dev_queues[i].add_operation(
|
57 |
+
schedule.get_op(fwd_batch_id, i, "forward")
|
58 |
+
)
|
59 |
+
fwd_batch_id += 1
|
60 |
+
|
61 |
+
for _ in range(steady_batches):
|
62 |
+
schedule.dev_queues[i].add_operation(
|
63 |
+
schedule.get_op(fwd_batch_id, i, "forward")
|
64 |
+
)
|
65 |
+
schedule.dev_queues[i].add_operation(
|
66 |
+
schedule.get_op(bwd_d_batch_id, i, "backward_D")
|
67 |
+
)
|
68 |
+
if fwd_batch_id - bwd_w_batch_id >= config.num_devices - 1:
|
69 |
schedule.dev_queues[i].add_operation(
|
70 |
+
schedule.get_op(bwd_w_batch_id, i, "backward_W")
|
71 |
)
|
72 |
+
bwd_w_batch_id += 1
|
73 |
+
bwd_d_batch_id += 1
|
74 |
+
fwd_batch_id += 1
|
75 |
+
|
76 |
+
for _ in range(cooldown_batches):
|
77 |
+
schedule.dev_queues[i].add_operation(
|
78 |
+
schedule.get_op(bwd_d_batch_id, i, "backward_D")
|
79 |
+
)
|
80 |
+
|
81 |
+
schedule.dev_queues[i].add_operation(
|
82 |
+
schedule.get_op(bwd_w_batch_id, i, "backward_W")
|
83 |
+
)
|
84 |
+
|
85 |
+
bwd_w_batch_id += 1
|
86 |
+
bwd_d_batch_id += 1
|
87 |
+
|
88 |
+
while bwd_w_batch_id < total_batches:
|
89 |
+
schedule.dev_queues[i].add_operation(
|
90 |
+
schedule.get_op(bwd_w_batch_id, i, "backward_W")
|
91 |
+
)
|
92 |
+
bwd_w_batch_id += 1
|
93 |
|
94 |
return schedule
|
95 |
|
src/visualizer.py
CHANGED
@@ -45,10 +45,10 @@ def get_color(op_type: str, stage_id: int, num_devices: int):
|
|
45 |
# Color palettes for different virtual stages
|
46 |
forward_colors = [
|
47 |
"royalblue", # Stage 0
|
48 |
-
"
|
49 |
-
"
|
50 |
"steelblue", # Stage 3
|
51 |
-
"
|
52 |
"deepskyblue", # Stage 5
|
53 |
"mediumblue", # Stage 6
|
54 |
"mediumslateblue",# Stage 7
|
@@ -56,17 +56,46 @@ def get_color(op_type: str, stage_id: int, num_devices: int):
|
|
56 |
"darkslateblue" # Stage 9
|
57 |
]
|
58 |
|
|
|
59 |
backward_colors = [
|
60 |
-
"
|
61 |
-
"
|
62 |
-
"
|
63 |
-
"
|
64 |
-
"
|
65 |
-
"
|
66 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
"palegreen", # Stage 7
|
68 |
-
"
|
69 |
-
"
|
70 |
]
|
71 |
|
72 |
virtual_stage = stage_id // num_devices
|
@@ -78,6 +107,10 @@ def get_color(op_type: str, stage_id: int, num_devices: int):
|
|
78 |
return forward_colors[color_index]
|
79 |
elif op_type == "backward":
|
80 |
return backward_colors[color_index]
|
|
|
|
|
|
|
|
|
81 |
else:
|
82 |
raise ValueError(f"Invalid operation type: {op_type}")
|
83 |
|
@@ -129,7 +162,7 @@ def create_pipeline_figure(schedule_data: Dict[int, List[Dict]], max_time=None,
|
|
129 |
|
130 |
# Sort tasks by start time to ensure correct rendering
|
131 |
sorted_tasks = sorted(schedule_data[device], key=lambda t: t["start_time"])
|
132 |
-
|
133 |
for task in sorted_tasks:
|
134 |
# Determine task color and text color
|
135 |
if task["type"] == "forward":
|
@@ -140,6 +173,14 @@ def create_pipeline_figure(schedule_data: Dict[int, List[Dict]], max_time=None,
|
|
140 |
color = get_color(task["type"], task["stage"], num_devices)
|
141 |
text_color = "black"
|
142 |
name = "Backward"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
143 |
else:
|
144 |
color = empty_color
|
145 |
text_color = "black"
|
@@ -221,12 +262,24 @@ def create_pipeline_figure(schedule_data: Dict[int, List[Dict]], max_time=None,
|
|
221 |
name=f"Backward (VS {vs})",
|
222 |
color=get_color("backward", vs * num_devices, num_devices)
|
223 |
))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
|
225 |
# If no tasks found, add default legend items
|
226 |
if not legend_items:
|
227 |
legend_items = [
|
228 |
dict(name="Forward (VS 0)", color=get_color("forward", 0, num_devices)),
|
229 |
dict(name="Backward (VS 0)", color=get_color("backward", 0, num_devices)),
|
|
|
|
|
230 |
]
|
231 |
|
232 |
for i, item in enumerate(legend_items):
|
@@ -277,12 +330,12 @@ def create_pipeline_figure(schedule_data: Dict[int, List[Dict]], max_time=None,
|
|
277 |
yanchor="top",
|
278 |
y=1.02, # Position at the top
|
279 |
xanchor="right",
|
280 |
-
x=1.
|
281 |
title=dict(text="<b>Operation Types:</b>"),
|
282 |
itemsizing="constant",
|
283 |
tracegroupgap=0
|
284 |
),
|
285 |
-
width=
|
286 |
height=400, # Maintain current height
|
287 |
bargap=0,
|
288 |
bargroupgap=0,
|
@@ -304,7 +357,7 @@ def create_dash_app(schedule: Schedule, schedule_type="1f1b", enable_caching: bo
|
|
304 |
|
305 |
Args:
|
306 |
schedule: Schedule object to visualize
|
307 |
-
schedule_type: Type of schedule ("1f1b" or custom description)
|
308 |
enable_caching: Whether to cache the schedule data and figure
|
309 |
"""
|
310 |
# Process schedule data only once and cache it
|
@@ -381,7 +434,8 @@ def visualize_pipeline_parallelism_dash(
|
|
381 |
schedule: Schedule,
|
382 |
port: int = 8050,
|
383 |
debug: bool = False,
|
384 |
-
enable_caching: bool = True
|
|
|
385 |
):
|
386 |
"""
|
387 |
Launch a Dash app to visualize the pipeline schedule interactively.
|
@@ -391,7 +445,8 @@ def visualize_pipeline_parallelism_dash(
|
|
391 |
port: Port to run the Dash app on
|
392 |
debug: Whether to run the Dash app in debug mode
|
393 |
enable_caching: Whether to cache schedule data and figures
|
|
|
394 |
"""
|
395 |
-
app = create_dash_app(schedule, enable_caching=enable_caching)
|
396 |
print(f"Starting Dash app on http://localhost:{port}/")
|
397 |
app.run_server(debug=debug, port=port)
|
|
|
45 |
# Color palettes for different virtual stages
|
46 |
forward_colors = [
|
47 |
"royalblue", # Stage 0
|
48 |
+
"cornflowerblue", # Stage 1
|
49 |
+
"dodgerblue", # Stage 2
|
50 |
"steelblue", # Stage 3
|
51 |
+
"lightskyblue", # Stage 4
|
52 |
"deepskyblue", # Stage 5
|
53 |
"mediumblue", # Stage 6
|
54 |
"mediumslateblue",# Stage 7
|
|
|
56 |
"darkslateblue" # Stage 9
|
57 |
]
|
58 |
|
59 |
+
# Updated to orange/brown palette for backward operations
|
60 |
backward_colors = [
|
61 |
+
"darkorange", # Stage 0
|
62 |
+
"orange", # Stage 1
|
63 |
+
"sandybrown", # Stage 2
|
64 |
+
"peru", # Stage 3
|
65 |
+
"chocolate", # Stage 4
|
66 |
+
"sienna", # Stage 5
|
67 |
+
"saddlebrown", # Stage 6
|
68 |
+
"brown", # Stage 7
|
69 |
+
"darkgoldenrod", # Stage 8
|
70 |
+
"goldenrod" # Stage 9
|
71 |
+
]
|
72 |
+
|
73 |
+
# Updated to teal/turquoise palette for backward_D operations
|
74 |
+
backward_d_colors = [
|
75 |
+
"mediumaquamarine", # Stage 8
|
76 |
+
"cadetblue", # Stage 2
|
77 |
+
"lightseagreen", # Stage 6
|
78 |
+
"cyan", # Stage 0
|
79 |
+
"teal", # Stage 1
|
80 |
+
"mediumturquoise",# Stage 3
|
81 |
+
"turquoise", # Stage 4
|
82 |
+
"aquamarine", # Stage 5
|
83 |
+
"darkturquoise", # Stage 7
|
84 |
+
"paleturquoise" # Stage 9
|
85 |
+
]
|
86 |
+
|
87 |
+
# Updated to green palette for backward_W operations
|
88 |
+
backward_w_colors = [
|
89 |
+
"limegreen", # Stage 2
|
90 |
+
"forestgreen", # Stage 0
|
91 |
+
"green", # Stage 1
|
92 |
+
"seagreen", # Stage 3
|
93 |
+
"mediumseagreen", # Stage 4
|
94 |
+
"springgreen", # Stage 5
|
95 |
+
"mediumspringgreen", # Stage 6
|
96 |
"palegreen", # Stage 7
|
97 |
+
"lightgreen", # Stage 8
|
98 |
+
"darkseagreen" # Stage 9
|
99 |
]
|
100 |
|
101 |
virtual_stage = stage_id // num_devices
|
|
|
107 |
return forward_colors[color_index]
|
108 |
elif op_type == "backward":
|
109 |
return backward_colors[color_index]
|
110 |
+
elif op_type == "backward_D":
|
111 |
+
return backward_d_colors[color_index]
|
112 |
+
elif op_type == "backward_W":
|
113 |
+
return backward_w_colors[color_index]
|
114 |
else:
|
115 |
raise ValueError(f"Invalid operation type: {op_type}")
|
116 |
|
|
|
162 |
|
163 |
# Sort tasks by start time to ensure correct rendering
|
164 |
sorted_tasks = sorted(schedule_data[device], key=lambda t: t["start_time"])
|
165 |
+
|
166 |
for task in sorted_tasks:
|
167 |
# Determine task color and text color
|
168 |
if task["type"] == "forward":
|
|
|
173 |
color = get_color(task["type"], task["stage"], num_devices)
|
174 |
text_color = "black"
|
175 |
name = "Backward"
|
176 |
+
elif task["type"] == "backward_D":
|
177 |
+
color = get_color(task["type"], task["stage"], num_devices)
|
178 |
+
text_color = "black"
|
179 |
+
name = "Backward (Grad)"
|
180 |
+
elif task["type"] == "backward_W":
|
181 |
+
color = get_color(task["type"], task["stage"], num_devices)
|
182 |
+
text_color = "black"
|
183 |
+
name = "Backward (Weight)"
|
184 |
else:
|
185 |
color = empty_color
|
186 |
text_color = "black"
|
|
|
262 |
name=f"Backward (VS {vs})",
|
263 |
color=get_color("backward", vs * num_devices, num_devices)
|
264 |
))
|
265 |
+
# Add entries for split backward operations if this is a zb1p schedule
|
266 |
+
if any(task["type"] in ["backward_D", "backward_W"] for device in schedule_data for task in schedule_data[device]):
|
267 |
+
legend_items.append(dict(
|
268 |
+
name=f"Backward Grad (VS {vs})",
|
269 |
+
color=get_color("backward_D", vs * num_devices, num_devices)
|
270 |
+
))
|
271 |
+
legend_items.append(dict(
|
272 |
+
name=f"Backward Weight (VS {vs})",
|
273 |
+
color=get_color("backward_W", vs * num_devices, num_devices)
|
274 |
+
))
|
275 |
|
276 |
# If no tasks found, add default legend items
|
277 |
if not legend_items:
|
278 |
legend_items = [
|
279 |
dict(name="Forward (VS 0)", color=get_color("forward", 0, num_devices)),
|
280 |
dict(name="Backward (VS 0)", color=get_color("backward", 0, num_devices)),
|
281 |
+
dict(name="Backward Grad (VS 0)", color=get_color("backward_D", 0, num_devices)),
|
282 |
+
dict(name="Backward Weight (VS 0)", color=get_color("backward_W", 0, num_devices)),
|
283 |
]
|
284 |
|
285 |
for i, item in enumerate(legend_items):
|
|
|
330 |
yanchor="top",
|
331 |
y=1.02, # Position at the top
|
332 |
xanchor="right",
|
333 |
+
x=1.20, # Position further to the right to accommodate more items
|
334 |
title=dict(text="<b>Operation Types:</b>"),
|
335 |
itemsizing="constant",
|
336 |
tracegroupgap=0
|
337 |
),
|
338 |
+
width=2000, # Increase width to accommodate the expanded legend
|
339 |
height=400, # Maintain current height
|
340 |
bargap=0,
|
341 |
bargroupgap=0,
|
|
|
357 |
|
358 |
Args:
|
359 |
schedule: Schedule object to visualize
|
360 |
+
schedule_type: Type of schedule ("1f1b", "zb1p", or custom description)
|
361 |
enable_caching: Whether to cache the schedule data and figure
|
362 |
"""
|
363 |
# Process schedule data only once and cache it
|
|
|
434 |
schedule: Schedule,
|
435 |
port: int = 8050,
|
436 |
debug: bool = False,
|
437 |
+
enable_caching: bool = True,
|
438 |
+
schedule_type="1f1b"
|
439 |
):
|
440 |
"""
|
441 |
Launch a Dash app to visualize the pipeline schedule interactively.
|
|
|
445 |
port: Port to run the Dash app on
|
446 |
debug: Whether to run the Dash app in debug mode
|
447 |
enable_caching: Whether to cache schedule data and figures
|
448 |
+
schedule_type: Type of schedule ("1f1b", "zb1p", or custom description)
|
449 |
"""
|
450 |
+
app = create_dash_app(schedule, schedule_type=schedule_type, enable_caching=enable_caching)
|
451 |
print(f"Starting Dash app on http://localhost:{port}/")
|
452 |
app.run_server(debug=debug, port=port)
|