text
stringlengths
0
194
# Add timestamp and rank for better log readability
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
log_message = f"[{timestamp}] [Rank {rank}] {s}"
# Print to console if requested or if it's a specific "PRINT:" message
if console or s.startswith("PRINT:"):
actual_s = s[6:] if s.startswith("PRINT:") else s
print(actual_s) # Print to stdout for master process
if logfile:
with open(logfile, "a") as f:
f.write(log_message + "\n")
with open(logfile, "a") as f:
f.write(log_message + "\n")
print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True)
print0(f"PRINT: Parsed CLI args: {exp_args}", console=True)
print0(f"PRINT: Hyperparameters: {args}", console=True)
print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True)
if master_process:
print0(f"PRINT: Run directory: {run_dir_path_str}", console=True)
print0(code) # Log the code
# ... (other initial logs)
########################################
# Construct model and optimizer #
########################################
print0("PRINT: Constructing model...", console=True)
model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768,
max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda()
for m in model.modules():
if isinstance(m, nn.Embedding):
m.bfloat16()
print0("PRINT: Broadcasting model parameters...", console=True)
for param in model.parameters():
dist.broadcast(param.detach(), 0)
print0("PRINT: Model constructed and broadcasted.", console=True)
# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP ---
if exp_args.model_parameterization == "qkvo":
print0("PRINT: Collecting parameters for optimizers...", console=True)
head_params = [model.lm_head.weight]
embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds]
# Granular collection for attention and MLP parts
attn_q_params = []
attn_k_params = []
attn_v_params = []
attn_o_params = [] # W_O from c_proj
mlp_fc_params = []
mlp_proj_params = []
for block_module in model.blocks:
if block_module.attn is not None:
# These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class
if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w)
else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True)
if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w)
else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True)
if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w)
else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True)
attn_o_params.append(block_module.attn.c_proj.weight)
if block_module.mlp is not None:
mlp_fc_params.append(block_module.mlp.c_fc.weight)
mlp_proj_params.append(block_module.mlp.c_proj.weight)
# Combine into logical groups for experiments
attn_qk_group = attn_q_params + attn_k_params
attn_vo_group = attn_v_params + attn_o_params
all_attn_matrices = attn_qk_group + attn_vo_group
mlp_w1_group = mlp_fc_params
mlp_w2_group = mlp_proj_params
all_mlp_matrices = mlp_fc_params + mlp_proj_params
# Scalar parameters (all others not explicitly grouped as matrices)
matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices)
scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check]
for p_scalar in scalar_params: # Sanity check
if p_scalar.ndim >=2:
print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True)
# Determine parameter distribution based on optimizer_mode
muon_params_target_list = []
adam_matrix_target_list = [] # Matrices that Adam will handle specifically
adam_matrix_lr = 0.001 # LR for matrices if Adam handles them (can be tuned)
current_optimizer_mode = exp_args.optimizer_mode
print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True)
if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params"
print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True)
muon_params_target_list = all_attn_matrices + all_mlp_matrices
# Adam handles embeds, head, scalars by default. No extra matrices for Adam here.
elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP
print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True)
muon_params_target_list = attn_qk_group
adam_matrix_target_list = attn_vo_group + all_mlp_matrices