from transformers import T5Tokenizer import polars as pl from tqdm import tqdm from functools import partial def detokenize(seq: str, tokenizer: T5Tokenizer): output = tokenizer.decode(seq) output = output.replace(" ", "") output = output.replace("", "") assert len(output) == len(seq[:-1]) return output def main(): tokenizer = T5Tokenizer.from_pretrained('Rostlab/ProstT5', do_lower_case=False) # noqa splits = {'test': 'data/test-00000-of-00001-b109fa020c25190c.parquet', 'valid': 'data/valid-00000-of-00001-6442282fee0bc004.parquet', 'train': 'data/train-*-of-*.parquet'} # noqa detokenize_func = partial(detokenize, tokenizer=tokenizer) for k, v in tqdm(splits.items()): df = pl.scan_parquet('hf://datasets/Rostlab/ProstT5Dataset/' + v) # noqa df = df.with_columns(pl.col("input_id_x").map_elements(detokenize_func, return_dtype=pl.String).alias("3di")) # noqa df = df.with_columns(pl.col("input_id_y").map_elements(detokenize_func, return_dtype=pl.String).alias("protein")) # noqa df = df.drop("input_id_x").drop("input_id_y") df.sink_parquet(k + ".parquet") if __name__ == "__main__": main()