Datasets:

Modalities:
Tabular
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
File size: 2,854 Bytes
1be89f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import os

import click
import polars as pl


@click.command()
@click.option(
    "--src_dir",
    type=click.Path(exists=True, file_okay=False),
    required=True,
    help="Path to the directory containing source parquet files, e.g., './50m'.",
)
@click.option(
    "--dst_dir",
    type=click.Path(file_okay=False),
    required=False,
    help="Path to the directory where Parquet files will be saved. "
    "If not specified, Parquet files are saved in 'src_dir'. e.g., './out'.",
)
@click.option(
    "--file_name",
    type=str,
    default="multi_event",
    help="Base name for the output Parquet file. Default is 'multi_event'.",
)
def cli(src_dir: str, dst_dir: str, file_name: str):
    if dst_dir is None:
        dst_dir = src_dir

    print(f"{src_dir=}, {dst_dir=}, {file_name=}")
    make_multievent_dataset(src_dir, dst_dir, file_name)


def make_multievent_dataset(src_dir: str, dst_dir: str, file_name: str):
    os.makedirs(dst_dir, exist_ok=True)

    dislikes = pl.scan_parquet(os.path.join(src_dir, "dislikes.parquet"))
    likes = pl.scan_parquet(os.path.join(src_dir, "likes.parquet"))
    listens = pl.scan_parquet(os.path.join(src_dir, "listens.parquet"))
    undislikes = pl.scan_parquet(os.path.join(src_dir, "undislikes.parquet"))
    unlikes = pl.scan_parquet(os.path.join(src_dir, "unlikes.parquet"))

    events = pl.Enum(["listen", "dislike", "like", "undislike", "unlike"])

    combined_df = pl.concat(
        [
            listens.with_columns(
                pl.lit("listen").cast(events).alias("event_type"),
            ),
            dislikes.with_columns(
                pl.lit(None).alias("played_ratio_pct"),
                pl.lit(None).alias("track_length_seconds"),
                pl.lit("dislike").cast(events).alias("event_type"),
            ),
            likes.with_columns(
                pl.lit(None).alias("played_ratio_pct"),
                pl.lit(None).alias("track_length_seconds"),
                pl.lit("like").cast(events).alias("event_type"),
            ),
            undislikes.with_columns(
                pl.lit(None).alias("played_ratio_pct"),
                pl.lit(None).alias("track_length_seconds"),
                pl.lit("undislike").cast(events).alias("event_type"),
            ),
            unlikes.with_columns(
                pl.lit(None).alias("played_ratio_pct"),
                pl.lit(None).alias("track_length_seconds"),
                pl.lit("unlike").cast(events).alias("event_type"),
            ),
        ]
    ).sort(
        by=[
            "uid",
            "timestamp",
        ],
        maintain_order=True,
    )

    combined_df.with_columns(pl.col("event_type").cast(events)).sink_parquet(
        os.path.join(dst_dir, file_name + ".parquet"),
        compression="lz4",
        statistics=True,
    )


if __name__ == "__main__":
    cli()