File size: 3,184 Bytes
d9b5306 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
# %%
# jap2010を読み込んでparquetにするcode。実行不要
"""
テキストアーカイブ - 日本語ウェブコーパス 2010
http://www.s-yata.jp/corpus/nwc2010/texts/
を処理するcode。
#URLは常時は空いていないので注意。
wget http://www.s-yata.jp/corpus/nwc2010/texts/filelist
wget -i filelist
"""
# %%
from datasets import load_dataset
import glob
import pandas as pd
import lzma
from Touten import touten_insert, del_kaigyo
# %%
import os
out_dir = "../data/jap2010_parquet"
if not os.path.exists(out_dir):
os.mkdir(out_dir)
# %%
# %%
count_threshold = 3
max_lines = 2000
task_name = "00000001"
for xz_path in glob.glob("../data/jap2010/*.xz"):
task_name = xz_path.split("/")[-1].split(".")[0]
print(task_name)
path = f"../data/jap2010/{task_name}.xz"
# .xz ファイルを開いて内容をチャンクで読み込む
with lzma.open(path, 'rt') as file:
lines = file.readlines()
new_line_count = 0
current_doc = []
cnt = 0
text_len = 0
text_list = []
for line in (lines):
if line == "\n":
new_line_count += 1
else:
new_line_count = 0
current_doc.append(line)
if new_line_count >= count_threshold or len(current_doc) > max_lines:
text = "\n".join(current_doc)
text = text.strip()
text = text.replace("\n\n", "\n")
text = touten_insert(text)
text = del_kaigyo(text)
current_doc = []
new_line_count = 0
"""
if text!="":
d=json.dumps({"text":text},ensure_ascii=False)
with open(f"{out_dir}/{task_name}.jsonl","a") as f:
f.write(f"{d}\n")
"""
if text != "":
# print(len(text))
text_list.append(text)
text_len += len(text)
if text_len > 10**8:
# if len(text_list)>10:
# break
df = pd.DataFrame(text_list, columns=["text"]) # .reset_index()
df.to_parquet(f"{out_dir}/{task_name}_{cnt}.parquet")
cnt += 1
text_list = []
text_len = 0
# if cnt>100:
# break
# break
df = pd.DataFrame(text_list, columns=["text"]).reset_index()
df.to_parquet(f"{out_dir}/{task_name}_{cnt}.parquet")
# %%
text = text_list[1]
text = touten_insert(text)
text = del_kaigyo(text)
print(text)
# %%
text = del_kaigyo(text)
print(text)
# %%
t = "たくさんの「わたし」に見てもらいたいから、ダムの映画祭を、谷根千で、"
# end_filter(t)
# %%
df = pd.read_parquet("data/jap2010_parquet/00000001_0.parquet")
df
# %%
# huggingface-cli upload --repo-type=dataset hatakeyama-llm-team/japanese2010 .
# %%
# load test
dataset = load_dataset("hatakeyama-llm-team/japanese2010",
streaming=True,
split="train",
)
# %%
cnt = 0
for data in dataset:
print(data["text"][:10])
cnt += 1
if cnt > 10:
break
# %%
# %%
with open("t.txt", "w") as f:
f.write(data["text"])
# %%
|