Datasets:
said
commited on
Upload folder using huggingface_hub
Browse files- .gitattributes +1 -59
- README.md +43 -3
- data/combined_dataset.jsonl +3 -0
- json_mermaid.py +110 -0
.gitattributes
CHANGED
@@ -1,59 +1 @@
|
|
1 |
-
*.
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.mds filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
28 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
36 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
37 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
38 |
-
# Audio files - uncompressed
|
39 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
40 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
41 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
42 |
-
# Audio files - compressed
|
43 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
44 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
45 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
46 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
47 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
48 |
-
# Image files - uncompressed
|
49 |
-
*.bmp filter=lfs diff=lfs merge=lfs -text
|
50 |
-
*.gif filter=lfs diff=lfs merge=lfs -text
|
51 |
-
*.png filter=lfs diff=lfs merge=lfs -text
|
52 |
-
*.tiff filter=lfs diff=lfs merge=lfs -text
|
53 |
-
# Image files - compressed
|
54 |
-
*.jpg filter=lfs diff=lfs merge=lfs -text
|
55 |
-
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
56 |
-
*.webp filter=lfs diff=lfs merge=lfs -text
|
57 |
-
# Video files - compressed
|
58 |
-
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
-
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
@@ -1,3 +1,43 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# JSON Mermaid Dataset
|
2 |
+
|
3 |
+
This dataset contains conversations in ShareGPT format, designed for fine-tuning language models.
|
4 |
+
|
5 |
+
## Dataset Structure
|
6 |
+
|
7 |
+
The dataset follows the ShareGPT format, with each example containing a "conversations" field with an array of messages. Each message has:
|
8 |
+
- "from": Indicates who sent the message (typically "human" or "gpt")
|
9 |
+
- "value": The content of the message
|
10 |
+
|
11 |
+
Example:
|
12 |
+
```json
|
13 |
+
{
|
14 |
+
"conversations": [
|
15 |
+
{"from": "human", "value": "Hello, how are you?"},
|
16 |
+
{"from": "gpt", "value": "I'm doing well, thank you for asking! How can I help you today?"}
|
17 |
+
]
|
18 |
+
}
|
19 |
+
```
|
20 |
+
|
21 |
+
## Usage
|
22 |
+
|
23 |
+
You can load this dataset using the Hugging Face datasets library:
|
24 |
+
|
25 |
+
```python
|
26 |
+
from datasets import load_dataset
|
27 |
+
|
28 |
+
dataset = load_dataset("mugivara1/json-mermaid", split="train")
|
29 |
+
```
|
30 |
+
|
31 |
+
## Citation
|
32 |
+
|
33 |
+
If you use this dataset in your research, please cite:
|
34 |
+
|
35 |
+
```
|
36 |
+
@misc{json-mermaid,
|
37 |
+
author = {mugivara1},
|
38 |
+
title = {JSON Mermaid Dataset},
|
39 |
+
year = {2025},
|
40 |
+
publisher = {GitHub},
|
41 |
+
url = {https://huggingface.co/datasets/mugivara1/json-mermaid}
|
42 |
+
}
|
43 |
+
```
|
data/combined_dataset.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:47b715d52cc55009e9a1c463c6b4724c3360f5c70d4bf57a260a399f259ec002
|
3 |
+
size 106820118
|
json_mermaid.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The HuggingFace Datasets Authors.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
import json
|
17 |
+
import os
|
18 |
+
|
19 |
+
import datasets
|
20 |
+
|
21 |
+
_DESCRIPTION = """
|
22 |
+
ShareGPT format conversations dataset for fine-tuning language models.
|
23 |
+
"""
|
24 |
+
|
25 |
+
_CITATION = """\
|
26 |
+
@misc{json-mermaid,
|
27 |
+
author = {mugivara1},
|
28 |
+
title = {JSON Mermaid Dataset},
|
29 |
+
year = {2025},
|
30 |
+
publisher = {GitHub},
|
31 |
+
url = {https://huggingface.co/datasets/mugivara1/json-mermaid}
|
32 |
+
}
|
33 |
+
"""
|
34 |
+
|
35 |
+
_HOMEPAGE = "https://huggingface.co/datasets/mugivara1/json-mermaid"
|
36 |
+
|
37 |
+
# No URL since we are uploading files directly
|
38 |
+
_URLs = {}
|
39 |
+
|
40 |
+
|
41 |
+
class JsonMermaidConfig(datasets.BuilderConfig):
|
42 |
+
"""BuilderConfig for JsonMermaid."""
|
43 |
+
|
44 |
+
def __init__(self, **kwargs):
|
45 |
+
"""BuilderConfig for JsonMermaid.
|
46 |
+
Args:
|
47 |
+
**kwargs: keyword arguments forwarded to super.
|
48 |
+
"""
|
49 |
+
super(JsonMermaidConfig, self).__init__(**kwargs)
|
50 |
+
|
51 |
+
|
52 |
+
class JsonMermaid(datasets.GeneratorBasedBuilder):
|
53 |
+
"""ShareGPT format conversations for LLM fine-tuning."""
|
54 |
+
|
55 |
+
VERSION = datasets.Version("1.0.0")
|
56 |
+
BUILDER_CONFIGS = [
|
57 |
+
JsonMermaidConfig(
|
58 |
+
name="default",
|
59 |
+
version=VERSION,
|
60 |
+
description="ShareGPT format conversations",
|
61 |
+
),
|
62 |
+
]
|
63 |
+
DEFAULT_CONFIG_NAME = "default"
|
64 |
+
|
65 |
+
def _info(self):
|
66 |
+
features = datasets.Features({
|
67 |
+
"conversations": datasets.Sequence(
|
68 |
+
{
|
69 |
+
"from": datasets.Value("string"),
|
70 |
+
"value": datasets.Value("string"),
|
71 |
+
}
|
72 |
+
),
|
73 |
+
})
|
74 |
+
return datasets.DatasetInfo(
|
75 |
+
description=_DESCRIPTION,
|
76 |
+
features=features,
|
77 |
+
homepage=_HOMEPAGE,
|
78 |
+
citation=_CITATION,
|
79 |
+
)
|
80 |
+
|
81 |
+
def _split_generators(self, dl_manager):
|
82 |
+
"""Returns SplitGenerators."""
|
83 |
+
data_dir = "data"
|
84 |
+
return [
|
85 |
+
datasets.SplitGenerator(
|
86 |
+
name=datasets.Split.TRAIN,
|
87 |
+
gen_kwargs={
|
88 |
+
"filepath": os.path.join(data_dir, "combined_dataset.jsonl"),
|
89 |
+
"split": "train",
|
90 |
+
},
|
91 |
+
),
|
92 |
+
]
|
93 |
+
|
94 |
+
def _generate_examples(self, filepath, split):
|
95 |
+
"""Yields examples."""
|
96 |
+
with open(filepath, encoding="utf-8") as f:
|
97 |
+
for idx, line in enumerate(f):
|
98 |
+
data = json.loads(line)
|
99 |
+
# Handle the ShareGPT format - either directly use the conversations array
|
100 |
+
# or create one from the specified format in your data
|
101 |
+
if "conversations" in data:
|
102 |
+
conversations = data["conversations"]
|
103 |
+
else:
|
104 |
+
# If your data has a different structure, adapt this part
|
105 |
+
# This assumes each line is a list of messages in the format [{"from": "...", "value": "..."}]
|
106 |
+
conversations = data
|
107 |
+
|
108 |
+
yield idx, {
|
109 |
+
"conversations": conversations,
|
110 |
+
}
|