debug / KakologArchives.py
rtrm's picture
rtrm HF Staff
Duplicate from KakologArchives/KakologArchives
ec615ff
import datasets
import json
import xml.etree.ElementTree as ET
from datasets import BuilderConfig, DatasetInfo, DownloadManager, GeneratorBasedBuilder, Features, SplitGenerator, Value
from typing import Any, Dict, Iterator, List, Tuple, Union
class KakologArchivesConfig(BuilderConfig):
def __init__(self, channel_id: Union[str, None] = None, year: Union[int, None] = None, number_of_files: Union[int, None] = None, **kwargs):
super(KakologArchivesConfig, self).__init__(**kwargs)
self.channel_id = channel_id
self.year = year
self.number_of_files = number_of_files
class KakologArchivesDatasetBuilder(GeneratorBasedBuilder):
VERSION = '1.0.0'
BUILDER_CONFIGS = [
KakologArchivesConfig(
name = 'all',
version = VERSION,
description = '全チャンネル/全期間のすべての過去ログコメントを取得します。180GB 近くあるため注意してください。',
),
KakologArchivesConfig(
name='sample',
version = VERSION,
description = 'サンプルとして、2022年中に投稿された TOKYO MX (実況チャンネル ID: jk9) のすべての過去ログコメントを取得します。1GB ほどあります。',
channel_id = 'jk9',
year = 2022,
),
]
DEFAULT_CONFIG_NAME = "all"
def _info(self) -> DatasetInfo:
return DatasetInfo(
description = 'ニコニコ実況のサービス開始から現在までのすべての過去ログコメントのデータセットです。',
homepage = 'https://jikkyo.tsukumijima.net/',
features = Features({
'thread': Value('string'),
'no': Value('int64'),
'vpos': Value('int64'),
'date': Value('int64'),
'date_usec': Value('int64'),
'user_id': Value('string'),
'mail': Value('string', id=None),
'premium': Value('bool'),
'anonymity': Value('bool'),
'content': Value('string'),
}),
)
def _split_generators(self, dl_manager: DownloadManager) -> List[SplitGenerator]:
def create_relative_paths(json_data: dict[str, Any], current_path: str = "") -> List[str]:
relative_paths = []
for key, value in json_data.items():
new_path = f"{current_path}/{key}" if current_path else key
if value is None:
relative_paths.append(new_path)
elif isinstance(value, dict):
relative_paths.extend(create_relative_paths(value, new_path))
return relative_paths
json_file = dl_manager.download('dataset_structure.json')
with open(json_file, 'r', encoding='utf-8') as f:
relative_paths = create_relative_paths(json.load(f))
if self.config.channel_id is not None:
relative_paths = [path for path in relative_paths if f"{self.config.channel_id}/" in path]
if self.config.year is not None:
relative_paths = [path for path in relative_paths if f"/{self.config.year}/" in path]
if self.config.number_of_files is not None:
relative_paths = relative_paths[:min(self.config.number_of_files, len(relative_paths))]
downloaded_files = dl_manager.download(relative_paths)
return [SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": downloaded_files})]
def _generate_examples(self, files: List[str]) -> Iterator[Tuple[int, Dict[str, Any]]]:
for idx_parent, file in enumerate(files):
with open(file, 'r', encoding='utf-8') as f:
contents = '<?xml version="1.0" encoding="UTF-8"?><packet>' + f.read() + '</packet>'
root = ET.fromstring(contents)
for idx, chat in enumerate(root.iter('chat')):
if 'deleted' in chat.attrib:
continue
chat_attrib = {
'thread': chat.attrib.get('thread', 'unknown'),
'no': int(chat.attrib.get('no', 0)),
'vpos': int(chat.attrib.get('vpos', 0)),
'date': int(chat.attrib.get('date', 0)),
'date_usec': int(chat.attrib.get('date_usec', 0)),
'user_id': chat.attrib.get('user_id', 'unknown'),
'mail': chat.attrib.get('mail', None),
'premium': chat.attrib.get('premium', '0') == '1',
'anonymity': chat.attrib.get('anonymity', '0') == '1',
}
yield idx_parent * 1000000 + idx, {**chat_attrib, 'content': chat.text}