Datasets:
Tasks:
Text2Text Generation
Modalities:
Text
Formats:
parquet
Size:
10M - 100M
Tags:
biology
License:
File size: 1,192 Bytes
3ee7b37 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
from transformers import T5Tokenizer
import polars as pl
from tqdm import tqdm
from functools import partial
def detokenize(seq: str, tokenizer: T5Tokenizer):
output = tokenizer.decode(seq)
output = output.replace(" ", "")
output = output.replace("</s>", "")
assert len(output) == len(seq[:-1])
return output
def main():
tokenizer = T5Tokenizer.from_pretrained('Rostlab/ProstT5', do_lower_case=False) # noqa
splits = {'test': 'data/test-00000-of-00001-b109fa020c25190c.parquet', 'valid': 'data/valid-00000-of-00001-6442282fee0bc004.parquet', 'train': 'data/train-*-of-*.parquet'} # noqa
detokenize_func = partial(detokenize, tokenizer=tokenizer)
for k, v in tqdm(splits.items()):
df = pl.scan_parquet('hf://datasets/Rostlab/ProstT5Dataset/' + v) # noqa
df = df.with_columns(pl.col("input_id_x").map_elements(detokenize_func, return_dtype=pl.String).alias("3di")) # noqa
df = df.with_columns(pl.col("input_id_y").map_elements(detokenize_func, return_dtype=pl.String).alias("protein")) # noqa
df = df.drop("input_id_x").drop("input_id_y")
df.sink_parquet(k + ".parquet")
if __name__ == "__main__":
main()
|