|
""" |
|
Data used for experiments with MIMIR. Processed train/test splits for models trained on the Pile (for now). |
|
Processing data at HF end. |
|
""" |
|
|
|
from datasets import ( |
|
GeneratorBasedBuilder, |
|
SplitGenerator, |
|
DownloadManager, |
|
BuilderConfig, |
|
) |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
from typing import List |
|
|
|
|
|
_HOMEPAGE = "http://github.com/iamgroot42/mimir" |
|
|
|
_DESCRIPTION = """\ |
|
Member and non-member splits for our MI experiments using MIMIR. Data is available for each source. |
|
We also cache neighbors (generated for the NE attack). |
|
""" |
|
|
|
_CITATION = """\ |
|
@article{duan2024do, |
|
title={Do Membership Inference Attacks Work on Large Language Models?}, |
|
author={Duan*, Michael and \textbf{A. Suri*} and Mireshghallah, Niloofar and Min, Sewon and Shi, Weijia and Zettlemoyer, Luke and Tsvetkov, Yulia and Choi, Yejin and Evans, David and Hajishirzi, Hannaneh}, |
|
journal={arXiv preprint arXiv:???}, |
|
year={2024} |
|
} |
|
""" |
|
|
|
_DOWNLOAD_URL = "https://huggingface.co/datasets/iamgroot42/mimir/resolve/main/" |
|
|
|
|
|
class MimirConfig(BuilderConfig): |
|
"""BuilderConfig for Mimir dataset.""" |
|
|
|
def __init__(self, *args, subsets: List[str]=[], **kwargs): |
|
"""Constructs a MimirConfig. |
|
|
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(MimirConfig, self).__init__(**kwargs) |
|
self.subsets = subsets |
|
|
|
|
|
class MimirDataset(GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
BUILDER_CONFIG_CLASS = MimirConfig |
|
BUILDER_CONFIGS = [ |
|
MimirConfig( |
|
name="arxiv", |
|
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"], |
|
description="This split contains data from the Pile's Arxiv subset at various n-gram overlap thresholds" |
|
), |
|
MimirConfig( |
|
name="dm_mathematics", |
|
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"], |
|
description="This split contains data from the Pile's DM Mathematics subset at various n-gram overlap thresholds" |
|
), |
|
MimirConfig( |
|
name="github", |
|
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"], |
|
description="This split contains data from the Pile's GitHub subset at various n-gram overlap thresholds" |
|
), |
|
MimirConfig( |
|
name="hackernews", |
|
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"], |
|
description="This split contains data from the Pile's HackerNews subset at various n-gram overlap thresholds" |
|
), |
|
MimirConfig( |
|
name="pile_cc", |
|
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"], |
|
description="This split contains data from the Pile's Pile CC subset at various n-gram overlap thresholds" |
|
), |
|
MimirConfig( |
|
name="pubmed_central", |
|
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"], |
|
description="This split contains data from the Pile's PubMed Central subset at various n-gram overlap thresholds" |
|
), |
|
MimirConfig( |
|
name="wikipedia_(en)", |
|
subsets=["ngram_7_0.2", "ngram_13_0.2", "ngram_13_0.8"], |
|
description="This split contains data from the Pile's Wikipedia subset at various n-gram overlap thresholds" |
|
), |
|
MimirConfig( |
|
name="full_pile", description="This split contains data from multiple sources in the Pile", |
|
), |
|
MimirConfig( |
|
name="c4", description="This split contains data the C4 dataset", |
|
), |
|
MimirConfig( |
|
name="temporal_arxiv", |
|
subsets=["2020_08", "2021_01", "2021_06", "2022_01", "2022_06", "2023_01", "2023_06"], |
|
description="This split contains benchmarks where non-members are selected from various months from 2020-08 and onwards", |
|
), |
|
MimirConfig( |
|
name="temporal_wiki", description="This split contains benchmarks where non-members are selected from 2023-08 and onwards", |
|
), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features( |
|
{"text": datasets.Sequence(datasets.Value("string"))} |
|
), |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
|
|
) |
|
|
|
def _split_generators(self, dl_manager: DownloadManager): |
|
"""Returns SplitGenerators.""" |
|
|
|
NEIGHBOR_SUFFIX = "_neighbors_25_bert_in_place_swap" |
|
parent_dir = ( |
|
"cache_100_200_10000_512" |
|
if self.config.name == "full_pile" |
|
else "cache_100_200_1000_512" |
|
) |
|
|
|
if len(self.config.subsets) > 0: |
|
subset_splits = [f"{self.config.name}_{subset}" for subset in self.config.subsets] |
|
else: |
|
subset_splits = [self.config.name] |
|
|
|
file_paths = {} |
|
for subset_split in subset_splits: |
|
|
|
file_paths[f"{subset_split}_member"] = os.path.join(parent_dir, "train", subset_split + ".jsonl") |
|
file_paths[f"{subset_split}_nonmember"] = os.path.join(parent_dir, "test", subset_split + ".jsonl") |
|
|
|
|
|
file_paths[f"{subset_split}_member_neighbors"] = os.path.join( |
|
parent_dir, |
|
"train_neighbors", |
|
subset_split + f"{NEIGHBOR_SUFFIX}.jsonl", |
|
) |
|
file_paths[f"{subset_split}_nonmember_neighbors"] = os.path.join( |
|
parent_dir, |
|
"test_neighbors", |
|
subset_split + f"{NEIGHBOR_SUFFIX}.jsonl", |
|
) |
|
|
|
|
|
download_paths = [] |
|
k2i = {} |
|
for i, (k, v) in enumerate(file_paths.items()): |
|
download_paths.append(_DOWNLOAD_URL + v) |
|
k2i[k] = i |
|
data_dir = dl_manager.download_and_extract(download_paths) |
|
|
|
splits = [] |
|
for k in file_paths.keys(): |
|
splits.append(SplitGenerator(name=k, gen_kwargs={"file_path": data_dir[k2i[k]]})) |
|
return splits |
|
|
|
def _generate_examples(self, file_path): |
|
"""Yields examples.""" |
|
|
|
with open(file_path, "r") as f: |
|
for id, line in enumerate(f): |
|
data = json.loads(line) |
|
if type(data) != list: |
|
data = [data] |
|
yield id, {"text": data} |
|
|