haneulpark commited on
Commit
ccd1d8e
·
verified ·
1 Parent(s): 30a12b9

Upload Molecule3D_preprocessing.py

Browse files
Files changed (1) hide show
  1. Molecule3D_preprocessing.py +240 -0
Molecule3D_preprocessing.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This is a script for Molecule3D dataset preprocessing
2
+
3
+ # 1. Load modules
4
+ import pandas as pd
5
+ import numpy as np
6
+ import urllib.request
7
+ import tqdm
8
+ import rdkit
9
+ from rdkit import Chem
10
+ import os
11
+ import molvs
12
+ import csv
13
+ import json
14
+
15
+ standardizer = molvs.Standardizer()
16
+ fragment_remover = molvs.fragment.FragmentRemover()
17
+
18
+
19
+ # 2. Download the original dataset
20
+ # Original data
21
+ # Molecule3D: A Benchmark for Predicting 3D Geometries from Molecular Graphs
22
+ # Zhao Xu, Youzhi Luo, Xuan Zhang, Xinyi Xu, Yaochen Xie, Meng Liu, Kaleb Dickerson, Cheng Deng, Maho Nakata, Shuiwang Ji
23
+
24
+ # Please download the files from the link below:
25
+ # https://drive.google.com/drive/u/2/folders/1y-EyoDYMvWZwClc2uvXrM4_hQBtM85BI
26
+ # Suppose the files have been downloaded and unzipped
27
+
28
+
29
+
30
+ # 3. This part adds SMILES in addition to SDF and save CSV files
31
+ # List of file ranges and corresponding SDF/CSV filenames
32
+ file_ranges = [
33
+ (0, 1000000),
34
+ (1000001, 2000000),
35
+ (2000001, 3000000),
36
+ (3000001, 3899647)
37
+ ]
38
+
39
+ # Base directory for input and output files
40
+ base_dir = '/YOUR LOCAL DIRECTORY/' # Please change this part
41
+
42
+ for start, end in file_ranges:
43
+ sdf_file = os.path.join(base_dir, f'combined_mols_{start}_to_{end}.sdf')
44
+ output_csv = os.path.join(base_dir, f'smiles_{start}_{end}.csv')
45
+
46
+ # Read the SDF file
47
+ suppl = Chem.SDMolSupplier(sdf_file)
48
+
49
+ # Write to CSV file with SMILES
50
+ with open(output_csv, mode='w', newline='') as file:
51
+ writer = csv.writer(file)
52
+ writer.writerow(['index', 'SMILES'])
53
+
54
+ for idx, mol in enumerate(suppl):
55
+ if mol is None:
56
+ continue
57
+
58
+ smiles = Chem.MolToSmiles(mol)
59
+ writer.writerow([f'{idx + start + 1}', smiles])
60
+
61
+
62
+ ''' These files are expected to be stored:
63
+ smiles_sdf_0_1000000.csv
64
+ smiles_sdf_1000001_2000000.csv
65
+ smiles_sdf_2000001_3000000.csv
66
+ smiles_sdf_3000001_3899647.csv'''
67
+
68
+
69
+
70
+ # 4. Check if there are any missing SMILES or sdf
71
+
72
+ df1 = pd.read_csv(f'{base_dir}/smiles_sdf_0_1000000.csv') # Suppose that you have already change the 'base_dir' above
73
+ df2 = pd.read_csv(f'{base_dir}/smiles_sdf_1000001_2000000.csv')
74
+ df3 = pd.read_csv(f'{base_dir}/smiles_sdf_2000001_3000000.csv')
75
+ df4 = pd.read_csv(f'{base_dir}/smiles_sdf_3000001_3899647.csv')
76
+
77
+ missing_1 = df1[df1.isna().any(axis = 1)]
78
+ missing_2 = df2[df2.isna().any(axis = 1)]
79
+ missing_3 = df3[df3.isna().any(axis = 1)]
80
+ missing_4 = df4[df4.isna().any(axis = 1)]
81
+
82
+ print('For smiles_sdf_0_1000000.csv file : ', missing_1)
83
+ print('For smiles_sdf_1000001_2000000.csv file : ', missing_2)
84
+ print('For smiles_sdf_2000001_3000000.csv file : ', missing_3)
85
+ print('For smiles_sdf_3000001_3899647.csv file : ', missing_4)
86
+
87
+
88
+
89
+ # 5. Sanitize the molecules with MolVS
90
+
91
+ # This part would take a few hours
92
+ df1['X'] = [ \
93
+ rdkit.Chem.MolToSmiles(
94
+ fragment_remover.remove(
95
+ standardizer.standardize(
96
+ rdkit.Chem.MolFromSmiles(
97
+ smiles))))
98
+ for smiles in df1['SMILES']]
99
+
100
+ problems = []
101
+ for index, row in tqdm.tqdm(df1.iterrows()):
102
+ result = molvs.validate_smiles(row['X'])
103
+ if len(result) == 0:
104
+ continue
105
+ problems.append( (row['X'], result) )
106
+
107
+ # Most are because it includes the salt form and/or it is not neutralized
108
+ for result, alert in problems:
109
+ print(f"SMILES: {result}, problem: {alert[0]}")
110
+
111
+ df1.to_csv('smiles_sdf_0_1000000_sanitized.csv')
112
+
113
+ df2['X'] = [ \
114
+ rdkit.Chem.MolToSmiles(
115
+ fragment_remover.remove(
116
+ standardizer.standardize(
117
+ rdkit.Chem.MolFromSmiles(
118
+ smiles))))
119
+ for smiles in df2['SMILES']]
120
+
121
+ problems = []
122
+ for index, row in tqdm.tqdm(df2.iterrows()):
123
+ result = molvs.validate_smiles(row['X'])
124
+ if len(result) == 0:
125
+ continue
126
+ problems.append( (row['X'], result) )
127
+
128
+ # Most are because it includes the salt form and/or it is not neutralized
129
+ for result, alert in problems:
130
+ print(f"SMILES: {result}, problem: {alert[0]}")
131
+
132
+ df2.to_csv('smiles_sdf_1000001_2000000_sanitized.csv')
133
+
134
+ df3['X'] = [ \
135
+ rdkit.Chem.MolToSmiles(
136
+ fragment_remover.remove(
137
+ standardizer.standardize(
138
+ rdkit.Chem.MolFromSmiles(
139
+ smiles))))
140
+ for smiles in df3['SMILES']]
141
+
142
+ problems = []
143
+ for index, row in tqdm.tqdm(df3.iterrows()):
144
+ result = molvs.validate_smiles(row['X'])
145
+ if len(result) == 0:
146
+ continue
147
+ problems.append( (row['X'], result) )
148
+
149
+ # Most are because it includes the salt form and/or it is not neutralized
150
+ for result, alert in problems:
151
+ print(f"SMILES: {result}, problem: {alert[0]}")
152
+
153
+ df3.to_csv('smiles_sdf_2000001_3000000_sanitized.csv')
154
+
155
+ df4['X'] = [ \
156
+ rdkit.Chem.MolToSmiles(
157
+ fragment_remover.remove(
158
+ standardizer.standardize(
159
+ rdkit.Chem.MolFromSmiles(
160
+ smiles))))
161
+ for smiles in df4['SMILES']]
162
+
163
+ problems = []
164
+ for index, row in tqdm.tqdm(df4.iterrows()):
165
+ result = molvs.validate_smiles(row['X'])
166
+ if len(result) == 0:
167
+ continue
168
+ problems.append( (row['X'], result) )
169
+
170
+ # Most are because it includes the salt form and/or it is not neutralized
171
+ for result, alert in problems:
172
+ print(f"SMILES: {result}, problem: {alert[0]}")
173
+
174
+ df4.to_csv('smiles_sdf_3000001_3899647_sanitized.csv')
175
+
176
+
177
+
178
+ # 6. Concatenate four sanitized files to one long file
179
+ sanitized1 = pd.read_csv('smiles_sdf_0_1000000_sanitized.csv')
180
+ sanitized2 = pd.read_csv('smiles_sdf_1000001_2000000_sanitized.csv')
181
+ sanitized3 = pd.read_csv('smiles_sdf_2000001_3000000_sanitized.csv')
182
+ sanitized4 = pd.read_csv('smiles_sdf_3000001_3899647_sanitized.csv')
183
+
184
+ smiles_sdf_concatenated = pd.concat([sanitized1, sanitized2, sanitized3, sanitized4], ignore_index=True)
185
+
186
+ smiles_sdf_concatenated.to_csv('smiles_sdf_concatenated.csv', index = False)
187
+
188
+
189
+
190
+ # 7. Combine the properties file to the smiles_sdf_concatenated.csv
191
+ smiles_sdf_concatenated = pd.read_csv('smiles_sdf_concatenated.csv')
192
+
193
+ properties = pd.read_csv('properties.csv') # This file is also from the link provided above
194
+
195
+ smiles_sdf_properties_concatenated = pd.concat([smiles_sdf_concatenated, properties], axis=1)
196
+
197
+ smiles_sdf_properties_concatenated.to_csv('smiles_sdf_properties.csv', index = False)
198
+
199
+
200
+
201
+ # 8. Rename the columns
202
+ columns_selected = smiles_sdf_properties_concatenated[['Unnamed: 0', 'X', 'sdf', 'cid', 'dipole x', 'dipole y', 'dipole z', 'homo', 'lumo', 'homolumogap', 'scf energy']]
203
+ columns_selected.rename(columns={'Unnamed: 0': 'index', 'X': 'SMILES', 'homolumogap':'Y'}, inplace=True)
204
+
205
+ columns_selected.to_csv('Molecule3D_final.csv', index=False)
206
+
207
+
208
+
209
+ # 9. Split the dataset by using radom split and scaffold split
210
+ Molecule3D_final = pd.read_csv('Molecule3D_final.csv')
211
+
212
+ # Random split
213
+ with open('random_split_inds.json', 'r') as f: # random or scaffold
214
+ split_data = json.load(f)
215
+
216
+ random_train = Molecule3D_final[Molecule3D_final['index'].isin(split_data['train'])]
217
+ random_test = Molecule3D_final[Molecule3D_final['index'].isin(split_data['test'])]
218
+ random_valid = Molecule3D_final[Molecule3D_final['index'].isin(split_data['valid'])]
219
+
220
+ random_train.to_parquet('Molecule3D_random_train.parquet', index=False)
221
+ random_test.to_parquet('Molecule3D_random_test.parquet', index=False)
222
+ random_valid.to_parquet('Molecule3D_random_validation.parquet', index=False)
223
+
224
+
225
+ # Scaffold split
226
+ with open('scaffold_split_inds.json', 'r') as f: # random or scaffold
227
+ split_scaffold = json.load(f)
228
+
229
+ scaffold_train = Molecule3D_final[Molecule3D_final['index'].isin(split_scaffold['train'])]
230
+ scaffold_test = Molecule3D_final[Molecule3D_final['index'].isin(split_scaffold['test'])]
231
+ scaffold_valid = Molecule3D_final[Molecule3D_final['index'].isin(split_scaffold['valid'])]
232
+
233
+ scaffold_train.to_parquet('Molecule3D_scaffold_train.parquet', index=False)
234
+ scaffold_test.to_parquet('Molecule3D_scaffold_test.parquet', index=False)
235
+ scaffold_valid.to_parquet('Molecule3D_scaffold_validation.parquet', index=False)
236
+
237
+
238
+
239
+
240
+