File size: 59,464 Bytes
aaafea4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from torch.nn import Transformer
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
import pytorch_lightning as pl  # Import PyTorch Lightning
from pytorch_lightning.loggers import WandbLogger  # Import WandbLogger
from pytorch_lightning.callbacks import (
    ModelCheckpoint,
    EarlyStopping,
)  # Import Callbacks
import math
import os
import pandas as pd
from sklearn.model_selection import train_test_split
import time
import wandb  # Import wandb


from tokenizers import (
    Tokenizer,
    models,
    pre_tokenizers,
    decoders,
    trainers,
)

import logging
import gc

# --- Basic Logging Setup ---
logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)

# --- 1. Configuration & Hyperparameters ---

# Model Hyperparameters (Scaled up for H100s - ADJUST AS NEEDED based on memory)
# Note: BPE might benefit from a slightly larger vocab size than the regex approach
SRC_VOCAB_SIZE_ESTIMATE = 10000  # Increased estimate for SMILES BPE
TGT_VOCAB_SIZE_ESTIMATE = 14938  # Increased estimate for IUPAC
EMB_SIZE = 2048  # Embedding dimension (d_model) - Increased significantly
NHEAD = 8  # Number of attention heads (must divide EMB_SIZE) - Increased
FFN_HID_DIM = (
    4096  # Feedforward network hidden dimension (e.g., 4 * EMB_SIZE) - Increased
)
NUM_ENCODER_LAYERS = 12  # Number of layers in Encoder - Increased
NUM_DECODER_LAYERS = 12  # Number of layers in Decoder - Increased
DROPOUT = 0.1  # Dropout rate (can sometimes be reduced slightly for larger models)
MAX_LEN = 384  # Maximum sequence length (consider increasing if needed/possible)

# Training Hyperparameters
ACCELERATOR = "gpu"
DEVICES = 6  # Number of H100 GPUs to use
STRATEGY = "ddp"  # Distributed Data Parallel Strategy
PRECISION = "16-mixed"  # Use mixed precision for speed and memory saving on H100s
BATCH_SIZE_PER_GPU = 48  # Adjust based on H100 GPU memory (e.g., 32, 48, 64) - Effective BS = BATCH_SIZE_PER_GPU * DEVICES
ACCUMULATE_GRAD_BATCHES = (
    1  # Increase if BATCH_SIZE_PER_GPU needs to be smaller due to memory
)
NUM_EPOCHS = 50  # Increase for potentially longer training needed for larger models
LEARNING_RATE = 5e-5  # Might need adjustment for larger models/batch sizes
WEIGHT_DECAY = 1e-2
GRAD_CLIP_NORM = 1.0
VALIDATION_SPLIT = 0.05  # Use a smaller validation split if the dataset is huge
RANDOM_SEED = 42
PATIENCE = 5  # Early stopping patience
NUM_WORKERS = 8  # Adjust based on CPU cores and system capabilities

# Special Token Indices
PAD_IDX = 0
SOS_IDX = 1
EOS_IDX = 2
UNK_IDX = 3

# File Paths
# *** CHANGED SMILES TOKENIZER FILENAME ***
SMILES_TOKENIZER_FILE = "smiles_bytelevel_bpe_tokenizer_scaled.json"
IUPAC_TOKENIZER_FILE = "iupac_unigram_tokenizer_scaled.json"
INPUT_CSV_FILE = "data_clean.csv"  # <--- Your input CSV file path

# Output files for data splits
TRAIN_SMILES_FILE = "train.smi"
TRAIN_IUPAC_FILE = "train.iupac"
VAL_SMILES_FILE = "val.smi"
VAL_IUPAC_FILE = "val.iupac"
CHECKPOINT_DIR = "checkpoints"  # Directory to save model checkpoints
BEST_MODEL_FILENAME = (
    "smiles-to-iupac-transformer-best"  # Filename format for checkpoints
)

# WandB Configuration
WANDB_PROJECT = "SMILES-to-IUPAC-Large-BPE"  # Updated project name slightly
WANDB_ENTITY = (
    "adrianmirza"  # Replace with your WandB entity (username or team name) if desired
)
WANDB_RUN_NAME = f"transformer_BPE_E{EMB_SIZE}_H{NHEAD}_L{NUM_ENCODER_LAYERS}_BS{BATCH_SIZE_PER_GPU * DEVICES}_LR{LEARNING_RATE}"

# Store hparams for logging
hparams = {
    "src_tokenizer_type": "ByteLevelBPE",  # Added tokenizer type info
    "tgt_tokenizer_type": "Unigram",
    "src_vocab_size_estimate": SRC_VOCAB_SIZE_ESTIMATE,
    "tgt_vocab_size_estimate": TGT_VOCAB_SIZE_ESTIMATE,
    "emb_size": EMB_SIZE,
    "nhead": NHEAD,
    "ffn_hid_dim": FFN_HID_DIM,
    "num_encoder_layers": NUM_ENCODER_LAYERS,
    "num_decoder_layers": NUM_DECODER_LAYERS,
    "dropout": DROPOUT,
    "max_len": MAX_LEN,
    "batch_size_per_gpu": BATCH_SIZE_PER_GPU,
    "effective_batch_size": BATCH_SIZE_PER_GPU * DEVICES * ACCUMULATE_GRAD_BATCHES,
    "num_epochs": NUM_EPOCHS,
    "learning_rate": LEARNING_RATE,
    "weight_decay": WEIGHT_DECAY,
    "grad_clip_norm": GRAD_CLIP_NORM,
    "validation_split": VALIDATION_SPLIT,
    "random_seed": RANDOM_SEED,
    "patience": PATIENCE,
    "precision": PRECISION,
    "gpus": DEVICES,
    "strategy": STRATEGY,
    "num_workers": NUM_WORKERS,
}

# --- 2. Token izers (Modified SMILES Tokenizer) ---


# --- 2.a SMILES ByteLevel BPE Tokenizer (Replaced WordLevel Regex) ---
def get_smiles_tokenizer(
    train_files=None,
    vocab_size=30000,
    min_frequency=2,
    tokenizer_path=SMILES_TOKENIZER_FILE,
):
    """Creates or loads a Byte-Level BPE tokenizer for SMILES."""
    if os.path.exists(tokenizer_path):
        logging.info(f"Loading existing SMILES tokenizer from {tokenizer_path}")
        try:
            tokenizer = Tokenizer.from_file(tokenizer_path)
            # Verify special tokens after loading
            if (
                tokenizer.token_to_id("<pad>") != PAD_IDX
                or tokenizer.token_to_id("<sos>") != SOS_IDX
                or tokenizer.token_to_id("<eos>") != EOS_IDX
                or tokenizer.token_to_id("<unk>") != UNK_IDX
            ):
                logging.warning(
                    "Special token ID mismatch after loading SMILES tokenizer. Re-check config."
                )
            # Check if it's actually a BPE model (basic check)
            if not isinstance(tokenizer.model, models.BPE):
                logging.warning(
                    f"Loaded tokenizer from {tokenizer_path} is not a BPE model. Retraining."
                )
                raise TypeError("Incorrect tokenizer model type loaded.")
            return tokenizer
        except Exception as e:
            logging.error(f"Failed to load SMILES tokenizer: {e}. Retraining...")

    logging.info("Creating and training SMILES Byte-Level BPE tokenizer...")
    # Use BPE model
    tokenizer = Tokenizer(models.BPE(unk_token="<unk>"))

    # Use ByteLevel pre-tokenizer - this handles any character sequence
    # add_prefix_space=False is generally suitable for SMILES as it doesn't rely on spaces
    tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
    # Use ByteLevel decoder
    tokenizer.decoder = decoders.ByteLevel()

    special_tokens = ["<pad>", "<sos>", "<eos>", "<unk>"]
    # Use BpeTrainer
    trainer = trainers.BpeTrainer(
        vocab_size=vocab_size,
        min_frequency=min_frequency,
        special_tokens=special_tokens,
        # BPE specific options can be added here if needed, e.g.:
        # initial_alphabet=pre_tokenizers.ByteLevel.alphabet(), # Usually inferred
        # show_progress=True,
    )

    if train_files and all(os.path.exists(f) for f in train_files):
        logging.info(f"Training SMILES BPE tokenizer on: {train_files}")
        tokenizer.train(files=train_files, trainer=trainer)
        logging.info(
            f"SMILES BPE tokenizer trained. Final Vocab size: {tokenizer.get_vocab_size()}"
        )
        # Verify special token IDs after training
        if (
            tokenizer.token_to_id("<pad>") != PAD_IDX
            or tokenizer.token_to_id("<sos>") != SOS_IDX
            or tokenizer.token_to_id("<eos>") != EOS_IDX
            or tokenizer.token_to_id("<unk>") != UNK_IDX
        ):
            logging.warning(
                "Special token ID mismatch after training SMILES BPE tokenizer. Check trainer setup."
            )
        try:
            tokenizer.save(tokenizer_path)
            logging.info(f"SMILES BPE tokenizer saved to {tokenizer_path}")
        except Exception as e:
            logging.error(f"Failed to save SMILES BPE tokenizer: {e}")
    else:
        logging.error(
            "Training files not provided or not found for SMILES tokenizer. Cannot train."
        )
        # Manually add special tokens if training fails, so basic encoding/decoding might work
        tokenizer.add_special_tokens(special_tokens)

    return tokenizer


# --- 2.b IUPAC Unigram Tokenizer (No changes needed here) ---
def get_iupac_tokenizer(
    train_files=None,
    vocab_size=30000,
    min_frequency=2,
    tokenizer_path=IUPAC_TOKENIZER_FILE,
):
    """Creates or loads a Unigram tokenizer for IUPAC names."""
    if os.path.exists(tokenizer_path):
        logging.info(f"Loading existing IUPAC tokenizer from {tokenizer_path}")
        try:
            tokenizer = Tokenizer.from_file(tokenizer_path)
            if (
                tokenizer.token_to_id("<pad>") != PAD_IDX
                or tokenizer.token_to_id("<sos>") != SOS_IDX
                or tokenizer.token_to_id("<eos>") != EOS_IDX
                or tokenizer.token_to_id("<unk>") != UNK_IDX
            ):
                logging.warning(
                    "Special token ID mismatch after loading IUPAC tokenizer. Re-check config."
                )
            return tokenizer
        except Exception as e:
            logging.error(f"Failed to load IUPAC tokenizer: {e}. Retraining...")

    logging.info("Creating and training IUPAC Unigram tokenizer...")
    tokenizer = Tokenizer(models.Unigram())
    # Using Sequence of pre-tokenizers for IUPAC is reasonable
    pre_tokenizer_list = [
        pre_tokenizers.WhitespaceSplit(),  # Split by whitespace first
        pre_tokenizers.Punctuation(),  # Split punctuation
        pre_tokenizers.Digits(individual_digits=True),  # Split digits
    ]
    # Consider adding Metaspace if Unigram struggles with word boundaries after splits
    # tokenizer.pre_tokenizer = pre_tokenizers.Metaspace() # Alternative
    tokenizer.pre_tokenizer = pre_tokenizers.Sequence(pre_tokenizer_list)
    tokenizer.decoder = (
        decoders.Metaspace()
    )  # Metaspace decoder often works well with Unigram/BPE
    special_tokens = ["<pad>", "<sos>", "<eos>", "<unk>"]
    trainer = trainers.UnigramTrainer(
        vocab_size=vocab_size,
        special_tokens=special_tokens,
        unk_token="<unk>",
        # Unigram specific options can be added here
        # shrinking_factor=0.75,
        # n_sub_iterations=2,
    )

    if train_files and all(os.path.exists(f) for f in train_files):
        logging.info(f"Training IUPAC tokenizer on: {train_files}")
        tokenizer.train(files=train_files, trainer=trainer)
        logging.info(
            f"IUPAC tokenizer trained. Final Vocab size: {tokenizer.get_vocab_size()}"
        )
        # Verify special token IDs after training
        if (
            tokenizer.token_to_id("<pad>") != PAD_IDX
            or tokenizer.token_to_id("<sos>") != SOS_IDX
            or tokenizer.token_to_id("<eos>") != EOS_IDX
            or tokenizer.token_to_id("<unk>") != UNK_IDX
        ):
            logging.warning(
                "Special token ID mismatch after training IUPAC tokenizer. Check trainer setup."
            )
        try:
            tokenizer.save(tokenizer_path)
            logging.info(f"IUPAC tokenizer saved to {tokenizer_path}")
        except Exception as e:
            logging.error(f"Failed to save IUPAC tokenizer: {e}")
    else:
        logging.error(
            "Training files not provided or not found for IUPAC tokenizer. Cannot train."
        )
        tokenizer.add_special_tokens(special_tokens)

    return tokenizer


# --- 3. Model Definition (No changes needed) ---
class PositionalEncoding(nn.Module):
    """Injects positional information into the input embeddings."""

    def __init__(self, emb_size: int, dropout: float, maxlen: int = 5000):
        super().__init__()
        den = torch.exp(-torch.arange(0, emb_size, 2) * math.log(10000) / emb_size)
        pos = torch.arange(0, maxlen).reshape(maxlen, 1)
        pos_embedding = torch.zeros((maxlen, emb_size))
        pos_embedding[:, 0::2] = torch.sin(pos * den)
        pos_embedding[:, 1::2] = torch.cos(pos * den)
        pos_embedding = pos_embedding.unsqueeze(
            0
        )  # Add batch dimension for broadcasting
        self.dropout = nn.Dropout(dropout)
        self.register_buffer(
            "pos_embedding", pos_embedding
        )  # Shape [1, maxlen, emb_size]

    def forward(self, token_embedding: torch.Tensor):
        # token_embedding: Expected shape [batch_size, seq_len, emb_size]
        seq_len = token_embedding.size(1)
        # Slicing pos_embedding: [1, seq_len, emb_size]
        # Handle cases where seq_len might exceed buffer's maxlen during inference/edge cases
        if seq_len > self.pos_embedding.size(1):
            logging.warning(
                f"Input sequence length ({seq_len}) exceeds PositionalEncoding maxlen ({self.pos_embedding.size(1)}). Truncating positional encoding."
            )
            pos_to_add = self.pos_embedding[:, : self.pos_embedding.size(1), :]
            # Pad token_embedding if needed? Or error out? For now, just use available encoding.
            # This scenario shouldn't happen if MAX_LEN config is respected.
            output = token_embedding[:, : self.pos_embedding.size(1), :] + pos_to_add
        else:
            pos_to_add = self.pos_embedding[:, :seq_len, :]
            output = token_embedding + pos_to_add

        return self.dropout(output)


class TokenEmbedding(nn.Module):
    """Converts token indices to embeddings."""

    def __init__(self, vocab_size: int, emb_size):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, emb_size, padding_idx=PAD_IDX)
        self.emb_size = emb_size

    def forward(self, tokens: torch.Tensor):
        return self.embedding(tokens.long()) * math.sqrt(self.emb_size)


class Seq2SeqTransformer(nn.Module):
    """The main Encoder-Decoder Transformer model."""

    def __init__(
        self,
        num_encoder_layers: int,
        num_decoder_layers: int,
        emb_size: int,
        nhead: int,
        src_vocab_size: int,
        tgt_vocab_size: int,
        dim_feedforward: int,
        dropout: float = 0.1,
        max_len: int = MAX_LEN,
    ):  # Use MAX_LEN from config
        super().__init__()

        if emb_size % nhead != 0:
            raise ValueError(
                f"Embedding size ({emb_size}) must be divisible by the number of heads ({nhead})"
            )

        self.src_tok_emb = TokenEmbedding(src_vocab_size, emb_size)
        self.tgt_tok_emb = TokenEmbedding(tgt_vocab_size, emb_size)

        # Ensure PositionalEncoding maxlen is sufficient
        pe_maxlen = max(
            max_len, 5000
        )  # Use config MAX_LEN or default 5000, whichever is larger
        self.positional_encoding = PositionalEncoding(
            emb_size, dropout=dropout, maxlen=pe_maxlen
        )

        self.transformer = Transformer(
            d_model=emb_size,
            nhead=nhead,
            num_encoder_layers=num_encoder_layers,
            num_decoder_layers=num_decoder_layers,
            dim_feedforward=dim_feedforward,
            dropout=dropout,
            batch_first=True,
        )  # Use batch_first=True

        self.generator = nn.Linear(emb_size, tgt_vocab_size)
        self._init_weights()

    def _init_weights(self):
        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def forward(
        self,
        src: torch.Tensor,  # Input sequence (batch_size, src_len)
        trg: torch.Tensor,  # Target sequence (batch_size, tgt_len)
        tgt_mask: torch.Tensor,  # Target causal mask (tgt_len, tgt_len)
        src_padding_mask: torch.Tensor,  # Source padding mask (batch_size, src_len)
        tgt_padding_mask: torch.Tensor,  # Target padding mask (batch_size, tgt_len)
        memory_key_padding_mask: torch.Tensor,
    ):  # Memory padding mask (batch_size, src_len)
        # --- Ensure masks have correct dtype and device ---
        # Pytorch Transformer expects boolean masks where True indicates masking
        src_padding_mask = src_padding_mask.to(src.device)
        tgt_padding_mask = tgt_padding_mask.to(trg.device)
        memory_key_padding_mask = memory_key_padding_mask.to(src.device)
        # tgt_mask needs to be float for '-inf' filling, keep on target device
        tgt_mask = tgt_mask.to(trg.device)

        src_emb = self.positional_encoding(
            self.src_tok_emb(src)
        )  # [batch, src_len, dim]
        tgt_emb = self.positional_encoding(
            self.tgt_tok_emb(trg)
        )  # [batch, tgt_len, dim]

        outs = self.transformer(
            src=src_emb,
            tgt=tgt_emb,
            src_mask=None,  # Not typically needed for encoder unless custom masking
            tgt_mask=tgt_mask,  # Causal mask for decoder self-attn
            memory_mask=None,  # Not typically needed unless masking specific memory parts
            src_key_padding_mask=src_padding_mask,  # Mask padding in src K,V
            tgt_key_padding_mask=tgt_padding_mask,  # Mask padding in tgt Q
            memory_key_padding_mask=memory_key_padding_mask,
        )  # Mask padding in memory K,V for cross-attn
        # outs: [batch_size, tgt_len, emb_size]
        return self.generator(outs)  # [batch_size, tgt_len, tgt_vocab_size]

    def encode(self, src: torch.Tensor, src_padding_mask: torch.Tensor):
        src_padding_mask = src_padding_mask.to(
            src.device
        )  # Ensure mask is on correct device
        src_emb = self.positional_encoding(
            self.src_tok_emb(src)
        )  # [batch, src_len, dim]
        memory = self.transformer.encoder(
            src_emb, mask=None, src_key_padding_mask=src_padding_mask
        )
        return memory  # Returns memory: [batch_size, src_len, emb_size]

    def decode(
        self,
        tgt: torch.Tensor,
        memory: torch.Tensor,
        tgt_mask: torch.Tensor,
        tgt_padding_mask: torch.Tensor,
        memory_key_padding_mask: torch.Tensor,
    ):
        # Ensure masks are on correct device
        tgt_mask = tgt_mask.to(tgt.device)
        tgt_padding_mask = tgt_padding_mask.to(tgt.device)
        memory_key_padding_mask = memory_key_padding_mask.to(memory.device)

        tgt_emb = self.positional_encoding(
            self.tgt_tok_emb(tgt)
        )  # [batch, tgt_len, dim]
        output = self.transformer.decoder(
            tgt=tgt_emb,
            memory=memory,
            tgt_mask=tgt_mask,
            memory_mask=None,
            tgt_key_padding_mask=tgt_padding_mask,
            memory_key_padding_mask=memory_key_padding_mask,
        )
        return output  # Returns decoder output: [batch_size, tgt_len, emb_size]


# --- Helper function for mask creation (No changes needed) ---
def generate_square_subsequent_mask(sz: int, device: torch.device) -> torch.Tensor:
    """Generates an upper-triangular matrix for causal masking."""
    mask = (torch.triu(torch.ones((sz, sz), device=device)) == 1).transpose(0, 1)
    mask = (
        mask.float()
        .masked_fill(mask == 0, float("-inf"))
        .masked_fill(mask == 1, float(0.0))
    )
    return mask  # Shape [sz, sz]


def create_masks(
    src: torch.Tensor, tgt: torch.Tensor, pad_idx: int, device: torch.device
):
    """
    Creates all necessary masks for the Transformer model.
    Assumes src and tgt are inputs to the forward pass (tgt includes SOS, excludes EOS).
    Returns boolean masks where True indicates the position should be masked (ignored).
    """
    src_seq_len = src.shape[1]
    tgt_seq_len = tgt.shape[1]

    # Causal mask for decoder self-attention (float mask for PyTorch Transformer)
    tgt_mask = generate_square_subsequent_mask(
        tgt_seq_len, device
    )  # [tgt_len, tgt_len]

    # Padding masks (boolean, True where padded)
    src_padding_mask = src == pad_idx  # [batch_size, src_len]
    tgt_padding_mask = tgt == pad_idx  # [batch_size, tgt_len]
    memory_key_padding_mask = (
        src_padding_mask  # Used in decoder cross-attention [batch_size, src_len]
    )

    return tgt_mask, src_padding_mask, tgt_padding_mask, memory_key_padding_mask


# --- 4. Data Handling (Dataset and Collate Function - No changes needed) ---
class SmilesIupacDataset(Dataset):
    """Dataset class for SMILES-IUPAC pairs, reading from pre-split files."""

    def __init__(self, smiles_file: str, iupac_file: str):
        logging.info(f"Loading data from {smiles_file} and {iupac_file}")
        try:
            with open(smiles_file, "r", encoding="utf-8") as f_smi:
                self.smiles = [line.strip() for line in f_smi if line.strip()]
            with open(iupac_file, "r", encoding="utf-8") as f_iupac:
                self.iupac = [line.strip() for line in f_iupac if line.strip()]

            if len(self.smiles) != len(self.iupac):
                logging.warning(
                    f"Mismatch in number of lines: {smiles_file} ({len(self.smiles)}) vs {iupac_file} ({len(self.iupac)}). Trimming."
                )
                min_len = min(len(self.smiles), len(self.iupac))
                self.smiles = self.smiles[:min_len]
                self.iupac = self.iupac[:min_len]

            logging.info(
                f"Loaded {len(self.smiles)} pairs from {smiles_file}/{iupac_file}."
            )
            if len(self.smiles) == 0:
                logging.warning(f"Loaded 0 data pairs. Check files.")

        except FileNotFoundError:
            logging.error(
                f"Error: One or both files not found: {smiles_file}, {iupac_file}"
            )
            raise
        except Exception as e:
            logging.error(f"Error loading data: {e}")
            raise

    def __len__(self):
        return len(self.smiles)

    def __getitem__(self, idx):
        return self.smiles[idx], self.iupac[idx]


def collate_fn(
    batch, smiles_tokenizer, iupac_tokenizer, pad_idx, sos_idx, eos_idx, max_len
):
    """Collates data samples into batches."""
    src_batch, tgt_batch = [], []
    skipped_count = 0
    for src_sample, tgt_sample in batch:
        try:
            # Encode source (SMILES)
            src_encoded = smiles_tokenizer.encode(src_sample)
            # Truncate source if needed (including potential special tokens if added by encode)
            src_ids = src_encoded.ids[:max_len]
            if not src_ids:  # Skip if encoding results in empty sequence
                skipped_count += 1
                continue
            src_tensor = torch.tensor(src_ids, dtype=torch.long)

            # Encode target (IUPAC)
            tgt_encoded = iupac_tokenizer.encode(tgt_sample)
            # Truncate target allowing space for SOS and EOS
            tgt_ids = tgt_encoded.ids[: max_len - 2]
            if (
                not tgt_ids
            ):  # Skip if encoding results in empty sequence (after truncation)
                skipped_count += 1
                continue
            # Add SOS and EOS tokens
            tgt_tensor = torch.tensor([sos_idx] + tgt_ids + [eos_idx], dtype=torch.long)

            src_batch.append(src_tensor)
            tgt_batch.append(tgt_tensor)
        except Exception as e:
            # Log infrequent warnings for skipping
            # if skipped_count < 5: # Log only the first few skips per batch
            #     logging.warning(f"Skipping sample due to error during tokenization/tensor creation: {e}. SMILES: '{src_sample[:50]}...', IUPAC: '{tgt_sample[:50]}...'")
            skipped_count += 1
            continue

    # if skipped_count > 0:
    #     logging.debug(f"Skipped {skipped_count} samples in this batch during collation.")

    if not src_batch or not tgt_batch:
        # Return empty tensors if the whole batch was skipped
        return torch.tensor([]), torch.tensor([])

    try:
        # Pad sequences
        src_batch_padded = pad_sequence(
            src_batch, batch_first=True, padding_value=pad_idx
        )
        tgt_batch_padded = pad_sequence(
            tgt_batch, batch_first=True, padding_value=pad_idx
        )
    except Exception as e:
        logging.error(
            f"Error during padding: {e}. Src lengths: {[len(s) for s in src_batch]}, Tgt lengths: {[len(t) for t in tgt_batch]}"
        )
        # Return empty tensors on padding error
        return torch.tensor([]), torch.tensor([])

    return src_batch_padded, tgt_batch_padded


# --- 5. PyTorch Lightning Module (No changes needed) ---
class SmilesIupacLitModule(pl.LightningModule):
    def __init__(
        self, src_vocab_size: int, tgt_vocab_size: int, hparams_dict: dict
    ):  # Pass hparams dictionary
        super().__init__()
        # Use save_hyperparameters() to automatically save args to self.hparams
        # and make them accessible in checkpoints and loggers
        self.save_hyperparameters(hparams_dict)

        self.model = Seq2SeqTransformer(
            num_encoder_layers=self.hparams.num_encoder_layers,
            num_decoder_layers=self.hparams.num_decoder_layers,
            emb_size=self.hparams.emb_size,
            nhead=self.hparams.nhead,
            src_vocab_size=src_vocab_size,  # Pass actual vocab size
            tgt_vocab_size=tgt_vocab_size,  # Pass actual vocab size
            dim_feedforward=self.hparams.ffn_hid_dim,
            dropout=self.hparams.dropout,
            max_len=self.hparams.max_len,  # Pass max_len here
        )

        self.criterion = torch.nn.CrossEntropyLoss(ignore_index=PAD_IDX)

        # --- Count Parameters --- (Done once at initialization)
        total_params = sum(p.numel() for p in self.model.parameters())
        trainable_params = sum(
            p.numel() for p in self.model.parameters() if p.requires_grad
        )
        logging.info(f"Model Initialized:")
        logging.info(f"  Total Parameters: {total_params / 1_000_000:.2f} M")
        logging.info(f"  Trainable Parameters: {trainable_params / 1_000_000:.2f} M")
        # Log params to wandb hparams if logger is available
        # self.hparams are automatically logged by WandbLogger if passed to Trainer
        # We can add them explicitly if needed, but save_hyperparameters usually handles it.
        self.hparams.total_params_M = round(total_params / 1_000_000, 2)
        self.hparams.trainable_params_M = round(trainable_params / 1_000_000, 2)

    def forward(self, src, tgt):
        # This is the main forward pass used for inference/prediction if needed
        # For training/validation, we call the model directly in step methods
        # to handle mask creation explicitly.
        tgt_input = tgt[:, :-1]  # Prepare target input (remove EOS)
        tgt_mask, src_padding_mask, tgt_padding_mask, memory_key_padding_mask = (
            create_masks(
                src,
                tgt_input,
                PAD_IDX,
                self.device,  # Use self.device provided by Lightning
            )
        )
        logits = self.model(
            src,
            tgt_input,
            tgt_mask,
            src_padding_mask,
            tgt_padding_mask,
            memory_key_padding_mask,
        )
        return logits

    def training_step(self, batch, batch_idx):
        src, tgt = batch
        if src.numel() == 0 or tgt.numel() == 0:
            # logging.debug(f"Skipping empty batch {batch_idx} in training.")
            return None  # Skip empty batches

        tgt_input = tgt[:, :-1]  # Exclude EOS for input
        tgt_out = tgt[:, 1:]  # Exclude SOS for target labels

        # Create masks on the current device
        tgt_mask, src_padding_mask, tgt_padding_mask, memory_key_padding_mask = (
            create_masks(src, tgt_input, PAD_IDX, self.device)
        )

        try:
            logits = self.model(
                src=src,
                trg=tgt_input,
                tgt_mask=tgt_mask,
                src_padding_mask=src_padding_mask,
                tgt_padding_mask=tgt_padding_mask,
                memory_key_padding_mask=memory_key_padding_mask,
            )
            # logits: [batch_size, tgt_len-1, tgt_vocab_size]

            # Calculate loss
            # Reshape logits to [batch_size * (tgt_len-1), tgt_vocab_size]
            # Reshape tgt_out to [batch_size * (tgt_len-1)]
            loss = self.criterion(
                logits.reshape(-1, logits.shape[-1]), tgt_out.reshape(-1)
            )

            # Check for NaN/Inf loss (important with mixed precision)
            if not torch.isfinite(loss):
                logging.warning(
                    f"Non-finite loss encountered in training step {batch_idx}: {loss.item()}. Skipping update."
                )
                # Manually skip optimizer step if using manual optimization,
                # otherwise returning None might be sufficient for automatic opt.
                return None  # Returning None should prevent optimizer step

            # Log training loss
            # sync_dist=True is important for DDP to average loss across GPUs
            self.log(
                "train_loss",
                loss,
                on_step=True,
                on_epoch=True,
                prog_bar=True,
                logger=True,
                sync_dist=True,
                batch_size=src.size(0),
            )

            return loss

        except RuntimeError as e:
            if "CUDA out of memory" in str(e):
                logging.warning(
                    f"CUDA OOM error during training step {batch_idx} with shape src: {src.shape}, tgt: {tgt.shape}. Skipping batch."
                )
                gc.collect()
                torch.cuda.empty_cache()
                return None  # Skip update
            else:
                logging.error(f"Runtime error during training step {batch_idx}: {e}")
                # Optionally log shapes for debugging other runtime errors
                logging.error(f"Shapes - src: {src.shape}, tgt: {tgt.shape}")
                return None  # Skip update

    def validation_step(self, batch, batch_idx):
        src, tgt = batch
        if src.numel() == 0 or tgt.numel() == 0:
            # logging.debug(f"Skipping empty batch {batch_idx} in validation.")
            return None

        tgt_input = tgt[:, :-1]
        tgt_out = tgt[:, 1:]

        tgt_mask, src_padding_mask, tgt_padding_mask, memory_key_padding_mask = (
            create_masks(src, tgt_input, PAD_IDX, self.device)
        )

        try:
            logits = self.model(
                src,
                tgt_input,
                tgt_mask,
                src_padding_mask,
                tgt_padding_mask,
                memory_key_padding_mask,
            )
            loss = self.criterion(
                logits.reshape(-1, logits.shape[-1]), tgt_out.reshape(-1)
            )

            if torch.isfinite(loss):
                # Log validation loss (accumulated across batches and synced across GPUs at epoch end)
                # sync_dist=True ensures correct aggregation in DDP
                self.log(
                    "val_loss",
                    loss,
                    on_step=False,
                    on_epoch=True,
                    prog_bar=True,
                    logger=True,
                    sync_dist=True,
                    batch_size=src.size(0),
                )
            else:
                logging.warning(
                    f"Non-finite loss encountered during validation step {batch_idx}: {loss.item()}."
                )
            # PTL aggregates logged values automatically for the epoch
            # Returning the loss value itself isn't strictly necessary when using self.log
            # return loss

        except RuntimeError as e:
            # Don't crash validation if one batch fails (e.g., OOM on a particularly long sequence)
            logging.error(f"Runtime error during validation step {batch_idx}: {e}")
            if "CUDA out of memory" in str(e):
                logging.warning(
                    f"CUDA OOM error during validation step {batch_idx} with shape src: {src.shape}, tgt: {tgt.shape}. Skipping batch."
                )
                gc.collect()
                torch.cuda.empty_cache()
            else:
                logging.error(f"Shapes - src: {src.shape}, tgt: {tgt.shape}")
            # Return None or a placeholder if needed by some aggregation logic,
            # but self.log should handle the metric correctly even if some steps fail.
            return None

    def configure_optimizers(self):
        optimizer = torch.optim.AdamW(
            self.parameters(),  # self.parameters() includes all model parameters
            lr=self.hparams.learning_rate,
            weight_decay=self.hparams.weight_decay,
        )

        # --- Add Learning Rate Scheduler ---
        # Use linear warmup followed by linear decay (common for transformers)
        # Requires the 'transformers' library: pip install transformers
        try:
            from transformers import get_linear_schedule_with_warmup

            # Estimate total training steps if trainer is available
            # estimated_stepping_batches gives steps per epoch * num_epochs / num_devices (if using DDP)
            # For total steps across all devices * epochs, we might need to calculate differently or use a fixed large number if estimate isn't ready
            # Let's rely on estimated_stepping_batches, assuming it gives a reasonable estimate of steps the optimizer will take.
            # Note: Accessing self.trainer here might be tricky if it's not fully initialized yet.
            # A safer approach might be to calculate based on dataset size and epochs if possible,
            # or use a very large number for num_training_steps if decay to zero is desired eventually.
            # Let's try accessing trainer, but add a fallback.
            try:
                # This attribute is available after trainer setup, might work here.
                num_training_steps = self.trainer.estimated_stepping_batches
                logging.info(
                    f"Estimated stepping batches for LR schedule: {num_training_steps}"
                )
                if num_training_steps is None or num_training_steps <= 0:
                    logging.warning(
                        "Could not estimate stepping batches, using fallback for LR schedule."
                    )
                    # Fallback: Calculate based on assumed dataset size / effective batch size * epochs
                    # This requires knowing the dataset size, which isn't directly available here.
                    # Using a large fixed number as a simpler fallback if decay is desired eventually.
                    # Or, calculate based on hparams if dataset size was stored? No.
                    # Let's default to a large number if estimate fails.
                    num_training_steps = 1_000_000  # Adjust this large number if needed
            except AttributeError:
                logging.warning(
                    "self.trainer not available yet in configure_optimizers. Using fallback step count for LR schedule."
                )
                num_training_steps = 1_000_000  # Adjust this large number if needed

            # Set warmup steps (e.g., 5% of total steps)
            num_warmup_steps = int(0.05 * num_training_steps)
            logging.info(
                f"LR Scheduler: Total steps ~{num_training_steps}, Warmup steps: {num_warmup_steps}"
            )

            scheduler = get_linear_schedule_with_warmup(
                optimizer,
                num_warmup_steps=num_warmup_steps,
                num_training_steps=num_training_steps,
            )

            lr_scheduler_config = {
                "scheduler": scheduler,
                "interval": "step",  # Call scheduler after each training step
                "frequency": 1,
                "name": "linear_warmup_decay_lr",  # Optional: Name for logging
            }
            logging.info("Using Linear Warmup/Decay LR Scheduler.")
            return {"optimizer": optimizer, "lr_scheduler": lr_scheduler_config}

        except ImportError:
            logging.warning(
                "'transformers' library not found. Cannot create linear warmup scheduler. Using constant LR."
            )
            return optimizer
        except Exception as e:
            logging.error(
                f"Error setting up LR scheduler: {e}. Using constant LR.", exc_info=True
            )
            return optimizer


# --- 6. Inference (Translation) (No changes needed) ---
# These functions remain largely the same but will take the LightningModule instance


def greedy_decode(
    model: pl.LightningModule,  # Takes the LightningModule
    src: torch.Tensor,
    src_padding_mask: torch.Tensor,
    max_len: int,
    sos_idx: int,
    eos_idx: int,
    device: torch.device,
) -> torch.Tensor:
    """Performs greedy decoding using the LightningModule's model."""
    # model.eval() # Lightning handles eval mode during inference/testing
    transformer_model = model.model  # Access the underlying Seq2SeqTransformer

    try:
        with torch.no_grad():
            # Use the model's encode/decode methods
            memory = transformer_model.encode(
                src, src_padding_mask
            )  # [1, src_len, emb_size]
            memory = memory.to(device)
            # Ensure memory_key_padding_mask is also on the correct device for decode
            memory_key_padding_mask = src_padding_mask.to(memory.device)  # [1, src_len]

            ys = (
                torch.ones(1, 1).fill_(sos_idx).type(torch.long).to(device)
            )  # [1, 1] (Batch size 1)

            for i in range(max_len - 1):
                tgt_seq_len = ys.shape[1]
                # Create masks for the current decoded sequence length
                tgt_mask = generate_square_subsequent_mask(tgt_seq_len, device).to(
                    device
                )  # [curr_len, curr_len]
                # No padding in target during greedy decode yet
                tgt_padding_mask = torch.zeros(ys.shape, dtype=torch.bool).to(
                    device
                )  # [1, curr_len]

                # Use the model's decode method
                out = transformer_model.decode(
                    ys, memory, tgt_mask, tgt_padding_mask, memory_key_padding_mask
                )
                # out: [1, curr_len, emb_size]

                # Get the logits for the last token generated
                last_token_logits = transformer_model.generator(
                    out[:, -1, :]
                )  # [1, tgt_vocab_size]
                prob = last_token_logits  # Use logits directly for argmax
                _, next_word = torch.max(prob, dim=1)
                next_word = next_word.item()

                # Append the predicted token ID
                ys = torch.cat(
                    [ys, torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1
                )

                # Stop if EOS token is generated
                if next_word == eos_idx:
                    break
        # Return the generated sequence, excluding the initial SOS token
        return ys[:, 1:]

    except RuntimeError as e:
        logging.error(f"Runtime error during greedy decode: {e}")
        if "CUDA out of memory" in str(e):
            gc.collect()
            torch.cuda.empty_cache()
        # Return an empty tensor on error
        return torch.tensor([[]], dtype=torch.long, device=device)


def translate(
    model: pl.LightningModule,  # Takes the LightningModule
    src_sentence: str,
    smiles_tokenizer,
    iupac_tokenizer,
    device: torch.device,
    max_len: int,
    sos_idx: int,
    eos_idx: int,
    pad_idx: int,
) -> str:
    """Translates a single SMILES string using the LightningModule."""
    model.eval()  # Ensure model is in eval mode for inference

    try:
        src_encoded = smiles_tokenizer.encode(src_sentence)
        if not src_encoded or len(src_encoded.ids) == 0:
            logging.warning(f"Encoding failed for SMILES: {src_sentence}")
            return "[Encoding Error]"
        # Truncate source sequence if needed before creating tensor
        src_ids = src_encoded.ids[:max_len]
        if not src_ids:
            logging.warning(
                f"Source sequence empty after truncation for SMILES: {src_sentence}"
            )
            return "[Encoding Error - Empty Src]"

    except Exception as e:
        logging.error(f"Error tokenizing SMILES '{src_sentence}': {e}")
        return "[Encoding Error]"

    # Create tensor and move to device
    src = (
        torch.tensor(src_ids, dtype=torch.long).unsqueeze(0).to(device)
    )  # Add batch dimension
    # Create padding mask (boolean, True where padded)
    # For single sentence inference, there's no padding unless the original sequence was shorter than max_len
    # and we padded it, but here we just take the IDs. The mask should reflect the actual length.
    # However, the model expects a mask, even if it's all False for non-padded sequences.
    src_padding_mask = src == pad_idx  # [1, src_len]

    # Perform greedy decoding
    tgt_tokens_tensor = greedy_decode(
        model=model,  # Pass the LightningModule
        src=src,
        src_padding_mask=src_padding_mask,
        max_len=max_len,  # Use the configured max_len for generation limit
        sos_idx=sos_idx,
        eos_idx=eos_idx,
        device=device,
    )

    # Decode the generated token IDs
    if tgt_tokens_tensor.numel() > 0:
        tgt_tokens = tgt_tokens_tensor.flatten().cpu().numpy().tolist()
        try:
            # Decode using the target tokenizer, skipping special tokens like <pad>, <sos>, <eos>
            translation = iupac_tokenizer.decode(tgt_tokens, skip_special_tokens=True)
            return translation
        except Exception as e:
            logging.error(f"Error decoding target tokens {tgt_tokens}: {e}")
            return "[Decoding Error]"
    else:
        # Log if decoding returned an empty tensor (might happen on error in greedy_decode)
        # logging.warning(f"Greedy decode returned empty tensor for SMILES: {src_sentence}")
        return "[Decoding Error - Empty Output]"


# --- 7. Main Execution Script (Minor updates for clarity) ---
if __name__ == "__main__":
    pl.seed_everything(RANDOM_SEED, workers=True)  # Seed everything for reproducibility

    # --- Create Checkpoint Directory ---
    os.makedirs(CHECKPOINT_DIR, exist_ok=True)

    # --- Load Data from CSV and Split ---
    # (Keep this data preparation step outside the Lightning Module)
    logging.info(f"Loading and splitting data from {INPUT_CSV_FILE}...")
    # (Re-using the data loading and splitting logic from the original script)
    try:
        # Load with dtype specification for potentially large files
        df = pd.read_csv(INPUT_CSV_FILE, dtype={"SMILES": str, "Systematic": str})
        logging.info(f"Initial rows loaded: {len(df)}")
        if "SMILES" not in df.columns:
            raise ValueError("CSV must contain 'SMILES' column.")
        if "Systematic" not in df.columns:
            raise ValueError("CSV must contain 'Systematic' (IUPAC name) column.")
        df.rename(columns={"Systematic": "IUPAC"}, inplace=True)

        initial_rows = len(df)
        df.dropna(subset=["SMILES", "IUPAC"], inplace=True)
        rows_after_na = len(df)
        if initial_rows > rows_after_na:
            logging.info(
                f"Dropped {initial_rows - rows_after_na} rows with missing values."
            )
        # Strip whitespace and filter empty strings more efficiently
        df = df[df["SMILES"].str.strip().astype(bool)]
        df = df[df["IUPAC"].str.strip().astype(bool)]
        df["SMILES"] = df["SMILES"].str.strip()
        df["IUPAC"] = df["IUPAC"].str.strip()
        rows_after_empty = len(df)
        if rows_after_na > rows_after_empty:
            logging.info(
                f"Dropped {rows_after_na - rows_after_empty} rows with empty strings after stripping."
            )

        smiles_data = df["SMILES"].tolist()
        iupac_data = df["IUPAC"].tolist()
        logging.info(f"Loaded {len(smiles_data)} valid pairs from CSV.")
        del df
        gc.collect()  # Free memory

        if len(smiles_data) < 10:
            raise ValueError(
                f"Not enough valid data ({len(smiles_data)}) for split. Need at least 10."
            )

        train_smi, val_smi, train_iupac, val_iupac = train_test_split(
            smiles_data,
            iupac_data,
            test_size=VALIDATION_SPLIT,
            random_state=RANDOM_SEED,
        )
        logging.info(f"Split: {len(train_smi)} train, {len(val_smi)} validation.")
        del smiles_data, iupac_data
        gc.collect()  # Free memory

        logging.info("Writing split data to files...")
        with open(TRAIN_SMILES_FILE, "w", encoding="utf-8") as f:
            f.write("\n".join(train_smi))
        with open(TRAIN_IUPAC_FILE, "w", encoding="utf-8") as f:
            f.write("\n".join(train_iupac))
        with open(VAL_SMILES_FILE, "w", encoding="utf-8") as f:
            f.write("\n".join(val_smi))
        with open(VAL_IUPAC_FILE, "w", encoding="utf-8") as f:
            f.write("\n".join(val_iupac))
        logging.info(
            f"Split files written: {TRAIN_SMILES_FILE}, {TRAIN_IUPAC_FILE}, {VAL_SMILES_FILE}, {VAL_IUPAC_FILE}"
        )
        del train_smi, val_smi, train_iupac, val_iupac
        gc.collect()  # Free memory

    except FileNotFoundError:
        logging.error(f"Fatal error: Input CSV file not found at {INPUT_CSV_FILE}")
        exit(1)
    except ValueError as ve:
        logging.error(f"Fatal error during data preparation: {ve}")
        exit(1)
    except Exception as e:
        logging.error(f"Fatal error during data preparation: {e}", exc_info=True)
        exit(1)
    # --- End Data Preparation ---

    # --- Initialize Tokenizers ---
    logging.info("Initializing Tokenizers...")
    # Ensure training files exist before attempting to train tokenizers
    if not os.path.exists(TRAIN_SMILES_FILE) or not os.path.exists(TRAIN_IUPAC_FILE):
        logging.error(
            f"Training files ({TRAIN_SMILES_FILE}, {TRAIN_IUPAC_FILE}) not found. Cannot train tokenizers."
        )
        exit(1)

    smiles_tokenizer = get_smiles_tokenizer(
        train_files=[TRAIN_SMILES_FILE],
        vocab_size=SRC_VOCAB_SIZE_ESTIMATE,
        tokenizer_path=SMILES_TOKENIZER_FILE,
    )
    iupac_tokenizer = get_iupac_tokenizer(
        train_files=[TRAIN_IUPAC_FILE],
        vocab_size=TGT_VOCAB_SIZE_ESTIMATE,
        tokenizer_path=IUPAC_TOKENIZER_FILE,
    )

    ACTUAL_SRC_VOCAB_SIZE = smiles_tokenizer.get_vocab_size()
    ACTUAL_TGT_VOCAB_SIZE = iupac_tokenizer.get_vocab_size()
    logging.info(f"Actual SMILES Vocab Size: {ACTUAL_SRC_VOCAB_SIZE}")
    logging.info(f"Actual IUPAC Vocab Size: {ACTUAL_TGT_VOCAB_SIZE}")
    # Update hparams with actual sizes (will be logged by WandbLogger)
    hparams["actual_src_vocab_size"] = ACTUAL_SRC_VOCAB_SIZE
    hparams["actual_tgt_vocab_size"] = ACTUAL_TGT_VOCAB_SIZE

    # --- Setup WandB Logger ---
    # Ensure WANDB_ENTITY is set if required, otherwise it uses default
    if WANDB_ENTITY is None:
        logging.warning(
            "WANDB_ENTITY not set. WandB will log to your default entity. Set WANDB_ENTITY='your_username_or_team' to specify."
        )

    wandb_logger = WandbLogger(
        project=WANDB_PROJECT,
        entity=WANDB_ENTITY,  # Set your entity here or leave as None
        name=WANDB_RUN_NAME,
        config=hparams,  # Log hyperparameters defined above
        # log_model='all' # Log model checkpoints to WandB (can consume significant storage)
        # log_model=True # Log best model checkpoint based on monitor
    )

    # --- Initialize Datasets and DataLoaders ---
    logging.info("Creating Datasets and DataLoaders...")
    try:
        train_dataset = SmilesIupacDataset(TRAIN_SMILES_FILE, TRAIN_IUPAC_FILE)
        val_dataset = SmilesIupacDataset(VAL_SMILES_FILE, VAL_IUPAC_FILE)
        if len(train_dataset) == 0 or len(val_dataset) == 0:
            logging.error(
                "Training or validation dataset is empty. Check data splitting and file content."
            )
            exit(1)
    except Exception as e:
        logging.error(f"Error creating Datasets: {e}", exc_info=True)
        exit(1)

    # Create partial function for collate_fn to pass tokenizers and params
    def collate_fn_partial(batch):
        return collate_fn(
            batch,
            smiles_tokenizer,
            iupac_tokenizer,
            PAD_IDX,
            SOS_IDX,
            EOS_IDX,
            hparams["max_len"],
        )

    # Use persistent_workers=True if num_workers > 0 for efficiency, especially with DDP
    persistent_workers = NUM_WORKERS > 0 and STRATEGY == "ddp"  # Recommended for DDP

    train_dataloader = DataLoader(
        train_dataset,
        batch_size=BATCH_SIZE_PER_GPU,
        shuffle=True,
        collate_fn=collate_fn_partial,
        num_workers=NUM_WORKERS,
        pin_memory=True,
        persistent_workers=persistent_workers,
        drop_last=True,
    )  # Drop last incomplete batch in training for DDP consistency
    val_dataloader = DataLoader(
        val_dataset,
        batch_size=BATCH_SIZE_PER_GPU,  # Use same batch size for validation
        shuffle=False,
        collate_fn=collate_fn_partial,
        num_workers=NUM_WORKERS,
        pin_memory=True,
        persistent_workers=persistent_workers,
        drop_last=False,
    )  # Keep all validation batches

    # --- Initialize Model ---
    logging.info("Initializing Lightning Module...")
    # Pass hparams dictionary directly, PTL handles it via save_hyperparameters
    model = SmilesIupacLitModule(
        src_vocab_size=ACTUAL_SRC_VOCAB_SIZE,
        tgt_vocab_size=ACTUAL_TGT_VOCAB_SIZE,
        hparams_dict=hparams,
    )

    # Optional: Log model topology to WandB (do this after model init, before training)
    # Note: watch can sometimes slow down training start, especially with large models
    # wandb_logger.watch(model, log='all', log_freq=100) # Log gradients and parameters

    # --- Define Callbacks ---
    checkpoint_callback = ModelCheckpoint(
        dirpath=CHECKPOINT_DIR,
        filename=BEST_MODEL_FILENAME + "-{epoch:02d}-{val_loss:.4f}",
        save_top_k=1,  # Save only the best model
        verbose=True,
        monitor="val_loss",  # Monitor validation loss
        mode="min",  # Save the model with the minimum validation loss
        save_last=True,  # Optionally save the last checkpoint as well
    )
    early_stopping_callback = EarlyStopping(
        monitor="val_loss",
        patience=PATIENCE,  # Number of epochs with no improvement after which training will be stopped
        verbose=True,
        mode="min",
    )

    # --- Initialize PyTorch Lightning Trainer ---
    logging.info(
        f"Initializing PyTorch Lightning Trainer (GPUs={DEVICES}, Strategy='{STRATEGY}', Precision='{PRECISION}')..."
    )
    trainer = pl.Trainer(
        accelerator=ACCELERATOR,
        devices=DEVICES,
        strategy=STRATEGY,
        precision=PRECISION,
        max_epochs=NUM_EPOCHS,
        logger=wandb_logger,  # Use WandbLogger
        callbacks=[checkpoint_callback, early_stopping_callback],
        gradient_clip_val=GRAD_CLIP_NORM,  # Gradient clipping
        accumulate_grad_batches=ACCUMULATE_GRAD_BATCHES,  # Gradient accumulation
        log_every_n_steps=50,  # How often to log metrics (steps across all GPUs)
        # deterministic=True, # Might slow down training, use for debugging reproducibility if needed
        # profiler="simple", # Optional: Add profiler ("simple", "advanced", "pytorch") for performance analysis
        # Checkpointing behavior is controlled by ModelCheckpoint callback
        # enable_checkpointing=True, # Default is True if callbacks has ModelCheckpoint
    )

    # --- Start Training ---
    logging.info(
        f"Starting training with Effective Batch Size: {hparams['effective_batch_size']}..."
    )
    start_time = time.time()
    try:
        trainer.fit(model, train_dataloader, val_dataloader)
        training_duration = time.time() - start_time
        logging.info(
            f"Training finished in {training_duration / 3600:.2f} hours ({training_duration:.2f} seconds)."
        )

        # Log best model path and score
        best_path = checkpoint_callback.best_model_path
        best_score = checkpoint_callback.best_model_score  # This is a tensor, get value
        if best_score is not None:
            logging.info(
                f"Best model checkpoint saved at: {best_path} with val_loss: {best_score.item():.4f}"
            )
            # Log best score to wandb summary
            wandb_logger.experiment.summary["best_val_loss"] = best_score.item()
            wandb_logger.experiment.summary["best_model_path"] = best_path
        else:
            logging.warning(
                "Could not retrieve best model score from checkpoint callback."
            )

    except Exception as e:
        logging.error(f"Fatal error during training: {e}", exc_info=True)
        # Ensure wandb run is finished even on error
        if wandb.run is not None:
            wandb.finish(exit_code=1)  # Mark as failed run
        exit(1)

    # --- Load Best Model for Final Translation Examples ---
    best_model_path_to_load = checkpoint_callback.best_model_path
    logging.info(
        f"\nLoading best model from {best_model_path_to_load} for translation examples..."
    )
    final_model = None
    if best_model_path_to_load and os.path.exists(best_model_path_to_load):
        try:
            # Load the model using the Lightning checkpoint loading mechanism
            # Pass hparams_dict again in case it's needed and not perfectly saved/loaded
            final_model = SmilesIupacLitModule.load_from_checkpoint(
                best_model_path_to_load,
                # Provide necessary args again if they weren't saved in hparams properly
                # (though save_hyperparameters should handle this)
                src_vocab_size=ACTUAL_SRC_VOCAB_SIZE,
                tgt_vocab_size=ACTUAL_TGT_VOCAB_SIZE,
                hparams_dict=hparams,  # Pass the original hparams
            )
            # Determine device for inference (use the first GPU if available)
            inference_device = torch.device(
                f"{ACCELERATOR}:0"
                if ACCELERATOR == "gpu" and torch.cuda.is_available()
                else "cpu"
            )
            final_model = final_model.to(inference_device)
            final_model.eval()  # Set to evaluation mode
            final_model.freeze()  # Freeze weights for inference
            logging.info(
                f"Best model loaded successfully to {inference_device} for final translation."
            )
        except Exception as e:
            logging.error(
                f"Error loading saved model from {best_model_path_to_load}: {e}",
                exc_info=True,
            )
            final_model = None  # Ensure final_model is None if loading fails
    else:
        logging.error(
            f"Error: Best model checkpoint path not found or invalid: '{best_model_path_to_load}'. Cannot perform final translation."
        )

    # --- Example Translation (using some validation samples) ---
    if final_model:
        logging.info("\n--- Example Translations (using validation data) ---")
        num_examples = 20  # Show more examples
        try:
            # Load validation samples directly from the files
            val_smi_examples = []
            val_iupac_examples = []
            if os.path.exists(VAL_SMILES_FILE) and os.path.exists(VAL_IUPAC_FILE):
                with (
                    open(VAL_SMILES_FILE, "r", encoding="utf-8") as f_smi,
                    open(VAL_IUPAC_FILE, "r", encoding="utf-8") as f_iupac,
                ):
                    for i, (smi_line, iupac_line) in enumerate(zip(f_smi, f_iupac)):
                        if i >= num_examples:
                            break
                        val_smi_examples.append(smi_line.strip())
                        val_iupac_examples.append(iupac_line.strip())
            else:
                logging.warning(
                    f"Validation files ({VAL_SMILES_FILE}, {VAL_IUPAC_FILE}) not found. Cannot show examples."
                )

            if len(val_smi_examples) > 0:
                print("\n" + "=" * 40)
                print(
                    f"Example Translations (First {len(val_smi_examples)} Validation Samples)"
                )
                print("=" * 40)
                # Use the device the model was loaded onto
                inference_device = next(final_model.parameters()).device
                translation_examples = []  # For potential logging to wandb
                for i in range(len(val_smi_examples)):
                    smi = val_smi_examples[i]
                    true_iupac = val_iupac_examples[i]
                    predicted_iupac = translate(
                        model=final_model,  # Use the loaded best model
                        src_sentence=smi,
                        smiles_tokenizer=smiles_tokenizer,
                        iupac_tokenizer=iupac_tokenizer,
                        device=inference_device,  # Use model's device
                        max_len=hparams["max_len"],
                        sos_idx=SOS_IDX,
                        eos_idx=EOS_IDX,
                        pad_idx=PAD_IDX,
                    )
                    print(f"\nExample {i + 1}:")
                    print(f"  SMILES:        {smi}")
                    print(f"  True IUPAC:    {true_iupac}")
                    print(f"  Predicted IUPAC: {predicted_iupac}")
                    print("-" * 30)
                    # Prepare data for wandb table
                    translation_examples.append([smi, true_iupac, predicted_iupac])

                print("=" * 40 + "\n")

                # Log examples to a WandB Table
                try:
                    columns = ["SMILES", "True IUPAC", "Predicted IUPAC"]
                    wandb_table = wandb.Table(
                        data=translation_examples, columns=columns
                    )
                    wandb_logger.experiment.log(
                        {"validation_translations": wandb_table}
                    )
                    logging.info("Logged translation examples to WandB Table.")
                except Exception as wb_err:
                    logging.error(
                        f"Failed to log translation examples to WandB: {wb_err}"
                    )

            else:
                logging.warning("Could not load validation samples for examples.")
        except Exception as e:
            logging.error(f"Error during example translation phase: {e}", exc_info=True)
    else:
        logging.warning(
            "Skipping final translation examples as the best model could not be loaded."
        )

    # --- Finish WandB Run ---
    if wandb.run is not None:
        wandb.finish()
        logging.info("WandB run finished.")
    else:
        logging.info("No active WandB run to finish.")

    logging.info("Script finished.")