Initial commit from existing repo
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +16 -0
- assets/benchmark.svg +1216 -0
- assets/logo.png +3 -0
- assets/wechat.jpg +3 -0
- assets/wechat_npu.jpg +3 -0
- data/README.md +419 -0
- data/README_zh.md +419 -0
- data/alpaca_en_demo.json +0 -0
- data/alpaca_zh_demo.json +0 -0
- data/belle_multiturn/belle_multiturn.py +67 -0
- data/c4_demo.json +0 -0
- data/convert_data.ipynb +421 -0
- data/dataset_info.json +684 -0
- data/dpo_en_demo.json +0 -0
- data/dpo_zh_demo.json +0 -0
- data/glaive_toolcall_en_demo.json +0 -0
- data/glaive_toolcall_zh_demo.json +0 -0
- data/hh_rlhf_en/hh_rlhf_en.py +84 -0
- data/identity.json +457 -0
- data/kto_en_demo.json +0 -0
- data/mllm_demo.json +140 -0
- data/mllm_demo_data/1.jpg +3 -0
- data/mllm_demo_data/1.mp4 +3 -0
- data/mllm_demo_data/2.avi +3 -0
- data/mllm_demo_data/2.jpg +3 -0
- data/mllm_demo_data/3.jpg +3 -0
- data/mllm_demo_data/3.mp4 +3 -0
- data/mllm_video_demo.json +47 -0
- data/multi_step_merged_arc_v4.json +3 -0
- data/multi_step_verifiers_training.json +3 -0
- data/multi_step_verifiers_training.txt +3 -0
- data/re_arc_v3.json +3 -0
- data/re_arc_v3.txt +3 -0
- data/re_arc_v4.json +3 -0
- data/re_arc_v4.txt +3 -0
- data/ultra_chat/ultra_chat.py +60 -0
- data/wiki_demo.txt +0 -0
- docker/docker-cuda/Dockerfile +71 -0
- docker/docker-cuda/docker-compose.yml +36 -0
- docker/docker-npu/Dockerfile +45 -0
- docker/docker-npu/docker-compose.yml +32 -0
- docker/docker-rocm/Dockerfile +65 -0
- docker/docker-rocm/docker-compose.yml +32 -0
- evaluation/ceval/ceval.py +161 -0
- evaluation/ceval/ceval.zip +3 -0
- evaluation/ceval/mapping.json +210 -0
- evaluation/cmmlu/cmmlu.py +168 -0
- evaluation/cmmlu/cmmlu.zip +3 -0
- evaluation/cmmlu/mapping.json +270 -0
- evaluation/mmlu/mapping.json +230 -0
.gitattributes
CHANGED
@@ -33,3 +33,19 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
data/multi_step_merged_arc_v4.json filter=lfs diff=lfs merge=lfs -text
|
37 |
+
data/multi_step_verifiers_training.json filter=lfs diff=lfs merge=lfs -text
|
38 |
+
data/multi_step_verifiers_training.txt filter=lfs diff=lfs merge=lfs -text
|
39 |
+
data/re_arc_v3.json filter=lfs diff=lfs merge=lfs -text
|
40 |
+
data/re_arc_v3.txt filter=lfs diff=lfs merge=lfs -text
|
41 |
+
data/re_arc_v4.json filter=lfs diff=lfs merge=lfs -text
|
42 |
+
data/re_arc_v4.txt filter=lfs diff=lfs merge=lfs -text
|
43 |
+
assets/logo.png filter=lfs diff=lfs merge=lfs -text
|
44 |
+
assets/wechat.jpg filter=lfs diff=lfs merge=lfs -text
|
45 |
+
assets/wechat_npu.jpg filter=lfs diff=lfs merge=lfs -text
|
46 |
+
data/mllm_demo_data/1.jpg filter=lfs diff=lfs merge=lfs -text
|
47 |
+
data/mllm_demo_data/1.mp4 filter=lfs diff=lfs merge=lfs -text
|
48 |
+
data/mllm_demo_data/2.avi filter=lfs diff=lfs merge=lfs -text
|
49 |
+
data/mllm_demo_data/2.jpg filter=lfs diff=lfs merge=lfs -text
|
50 |
+
data/mllm_demo_data/3.jpg filter=lfs diff=lfs merge=lfs -text
|
51 |
+
data/mllm_demo_data/3.mp4 filter=lfs diff=lfs merge=lfs -text
|
assets/benchmark.svg
ADDED
|
assets/logo.png
ADDED
![]() |
Git LFS Details
|
assets/wechat.jpg
ADDED
![]() |
Git LFS Details
|
assets/wechat_npu.jpg
ADDED
![]() |
Git LFS Details
|
data/README.md
ADDED
@@ -0,0 +1,419 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
The [dataset_info.json](dataset_info.json) contains all available datasets. If you are using a custom dataset, please **make sure** to add a *dataset description* in `dataset_info.json` and specify `dataset: dataset_name` before training to use it.
|
2 |
+
|
3 |
+
Currently we support datasets in **alpaca** and **sharegpt** format.
|
4 |
+
|
5 |
+
```json
|
6 |
+
"dataset_name": {
|
7 |
+
"hf_hub_url": "the name of the dataset repository on the Hugging Face hub. (if specified, ignore script_url and file_name)",
|
8 |
+
"ms_hub_url": "the name of the dataset repository on the Model Scope hub. (if specified, ignore script_url and file_name)",
|
9 |
+
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore file_name)",
|
10 |
+
"file_name": "the name of the dataset folder or dataset file in this directory. (required if above are not specified)",
|
11 |
+
"formatting": "the format of the dataset. (optional, default: alpaca, can be chosen from {alpaca, sharegpt})",
|
12 |
+
"ranking": "whether the dataset is a preference dataset or not. (default: False)",
|
13 |
+
"subset": "the name of the subset. (optional, default: None)",
|
14 |
+
"split": "the name of dataset split to be used. (optional, default: train)",
|
15 |
+
"folder": "the name of the folder of the dataset repository on the Hugging Face hub. (optional, default: None)",
|
16 |
+
"num_samples": "the number of samples in the dataset to be used. (optional, default: None)",
|
17 |
+
"columns (optional)": {
|
18 |
+
"prompt": "the column name in the dataset containing the prompts. (default: instruction)",
|
19 |
+
"query": "the column name in the dataset containing the queries. (default: input)",
|
20 |
+
"response": "the column name in the dataset containing the responses. (default: output)",
|
21 |
+
"history": "the column name in the dataset containing the histories. (default: None)",
|
22 |
+
"messages": "the column name in the dataset containing the messages. (default: conversations)",
|
23 |
+
"system": "the column name in the dataset containing the system prompts. (default: None)",
|
24 |
+
"tools": "the column name in the dataset containing the tool description. (default: None)",
|
25 |
+
"images": "the column name in the dataset containing the image inputs. (default: None)",
|
26 |
+
"videos": "the column name in the dataset containing the videos inputs. (default: None)",
|
27 |
+
"chosen": "the column name in the dataset containing the chosen answers. (default: None)",
|
28 |
+
"rejected": "the column name in the dataset containing the rejected answers. (default: None)",
|
29 |
+
"kto_tag": "the column name in the dataset containing the kto tags. (default: None)"
|
30 |
+
},
|
31 |
+
"tags (optional, used for the sharegpt format)": {
|
32 |
+
"role_tag": "the key in the message represents the identity. (default: from)",
|
33 |
+
"content_tag": "the key in the message represents the content. (default: value)",
|
34 |
+
"user_tag": "the value of the role_tag represents the user. (default: human)",
|
35 |
+
"assistant_tag": "the value of the role_tag represents the assistant. (default: gpt)",
|
36 |
+
"observation_tag": "the value of the role_tag represents the tool results. (default: observation)",
|
37 |
+
"function_tag": "the value of the role_tag represents the function call. (default: function_call)",
|
38 |
+
"system_tag": "the value of the role_tag represents the system prompt. (default: system, can override system column)"
|
39 |
+
}
|
40 |
+
}
|
41 |
+
```
|
42 |
+
|
43 |
+
## Alpaca Format
|
44 |
+
|
45 |
+
### Supervised Fine-Tuning Dataset
|
46 |
+
|
47 |
+
* [Example dataset](alpaca_en_demo.json)
|
48 |
+
|
49 |
+
In supervised fine-tuning, the `instruction` column will be concatenated with the `input` column and used as the human prompt, then the human prompt would be `instruction\ninput`. The `output` column represents the model response.
|
50 |
+
|
51 |
+
The `system` column will be used as the system prompt if specified.
|
52 |
+
|
53 |
+
The `history` column is a list consisting of string tuples representing prompt-response pairs in the history messages. Note that the responses in the history **will also be learned by the model** in supervised fine-tuning.
|
54 |
+
|
55 |
+
```json
|
56 |
+
[
|
57 |
+
{
|
58 |
+
"instruction": "human instruction (required)",
|
59 |
+
"input": "human input (optional)",
|
60 |
+
"output": "model response (required)",
|
61 |
+
"system": "system prompt (optional)",
|
62 |
+
"history": [
|
63 |
+
["human instruction in the first round (optional)", "model response in the first round (optional)"],
|
64 |
+
["human instruction in the second round (optional)", "model response in the second round (optional)"]
|
65 |
+
]
|
66 |
+
}
|
67 |
+
]
|
68 |
+
```
|
69 |
+
|
70 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
71 |
+
|
72 |
+
```json
|
73 |
+
"dataset_name": {
|
74 |
+
"file_name": "data.json",
|
75 |
+
"columns": {
|
76 |
+
"prompt": "instruction",
|
77 |
+
"query": "input",
|
78 |
+
"response": "output",
|
79 |
+
"system": "system",
|
80 |
+
"history": "history"
|
81 |
+
}
|
82 |
+
}
|
83 |
+
```
|
84 |
+
|
85 |
+
### Pre-training Dataset
|
86 |
+
|
87 |
+
- [Example dataset](c4_demo.json)
|
88 |
+
|
89 |
+
In pre-training, only the `text` column will be used for model learning.
|
90 |
+
|
91 |
+
```json
|
92 |
+
[
|
93 |
+
{"text": "document"},
|
94 |
+
{"text": "document"}
|
95 |
+
]
|
96 |
+
```
|
97 |
+
|
98 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
99 |
+
|
100 |
+
```json
|
101 |
+
"dataset_name": {
|
102 |
+
"file_name": "data.json",
|
103 |
+
"columns": {
|
104 |
+
"prompt": "text"
|
105 |
+
}
|
106 |
+
}
|
107 |
+
```
|
108 |
+
|
109 |
+
### Preference Dataset
|
110 |
+
|
111 |
+
Preference datasets are used for reward modeling, DPO training, ORPO and SimPO training.
|
112 |
+
|
113 |
+
It requires a better response in `chosen` column and a worse response in `rejected` column.
|
114 |
+
|
115 |
+
```json
|
116 |
+
[
|
117 |
+
{
|
118 |
+
"instruction": "human instruction (required)",
|
119 |
+
"input": "human input (optional)",
|
120 |
+
"chosen": "chosen answer (required)",
|
121 |
+
"rejected": "rejected answer (required)"
|
122 |
+
}
|
123 |
+
]
|
124 |
+
```
|
125 |
+
|
126 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
127 |
+
|
128 |
+
```json
|
129 |
+
"dataset_name": {
|
130 |
+
"file_name": "data.json",
|
131 |
+
"ranking": true,
|
132 |
+
"columns": {
|
133 |
+
"prompt": "instruction",
|
134 |
+
"query": "input",
|
135 |
+
"chosen": "chosen",
|
136 |
+
"rejected": "rejected"
|
137 |
+
}
|
138 |
+
}
|
139 |
+
```
|
140 |
+
|
141 |
+
### KTO Dataset
|
142 |
+
|
143 |
+
An additional column `kto_tag` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
144 |
+
|
145 |
+
### Multimodal Image Dataset
|
146 |
+
|
147 |
+
An additional column `images` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
148 |
+
|
149 |
+
### Multimodal Video Dataset
|
150 |
+
|
151 |
+
An additional column `videos` is required. Please refer to the [sharegpt](#sharegpt-format) format for details.
|
152 |
+
|
153 |
+
## Sharegpt Format
|
154 |
+
|
155 |
+
### Supervised Fine-Tuning Dataset
|
156 |
+
|
157 |
+
- [Example dataset](glaive_toolcall_en_demo.json)
|
158 |
+
|
159 |
+
Compared to the alpaca format, the sharegpt format allows the datasets have **more roles**, such as human, gpt, observation and function. They are presented in a list of objects in the `conversations` column.
|
160 |
+
|
161 |
+
Note that the human and observation should appear in odd positions, while gpt and function should appear in even positions.
|
162 |
+
|
163 |
+
```json
|
164 |
+
[
|
165 |
+
{
|
166 |
+
"conversations": [
|
167 |
+
{
|
168 |
+
"from": "human",
|
169 |
+
"value": "human instruction"
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"from": "function_call",
|
173 |
+
"value": "tool arguments"
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"from": "observation",
|
177 |
+
"value": "tool result"
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"from": "gpt",
|
181 |
+
"value": "model response"
|
182 |
+
}
|
183 |
+
],
|
184 |
+
"system": "system prompt (optional)",
|
185 |
+
"tools": "tool description (optional)"
|
186 |
+
}
|
187 |
+
]
|
188 |
+
```
|
189 |
+
|
190 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
191 |
+
|
192 |
+
```json
|
193 |
+
"dataset_name": {
|
194 |
+
"file_name": "data.json",
|
195 |
+
"formatting": "sharegpt",
|
196 |
+
"columns": {
|
197 |
+
"messages": "conversations",
|
198 |
+
"system": "system",
|
199 |
+
"tools": "tools"
|
200 |
+
}
|
201 |
+
}
|
202 |
+
```
|
203 |
+
|
204 |
+
### Pre-training Dataset
|
205 |
+
|
206 |
+
Not yet supported, please use the [alpaca](#alpaca-format) format.
|
207 |
+
|
208 |
+
### Preference Dataset
|
209 |
+
|
210 |
+
- [Example dataset](dpo_en_demo.json)
|
211 |
+
|
212 |
+
Preference datasets in sharegpt format also require a better message in `chosen` column and a worse message in `rejected` column.
|
213 |
+
|
214 |
+
```json
|
215 |
+
[
|
216 |
+
{
|
217 |
+
"conversations": [
|
218 |
+
{
|
219 |
+
"from": "human",
|
220 |
+
"value": "human instruction"
|
221 |
+
},
|
222 |
+
{
|
223 |
+
"from": "gpt",
|
224 |
+
"value": "model response"
|
225 |
+
},
|
226 |
+
{
|
227 |
+
"from": "human",
|
228 |
+
"value": "human instruction"
|
229 |
+
}
|
230 |
+
],
|
231 |
+
"chosen": {
|
232 |
+
"from": "gpt",
|
233 |
+
"value": "chosen answer (required)"
|
234 |
+
},
|
235 |
+
"rejected": {
|
236 |
+
"from": "gpt",
|
237 |
+
"value": "rejected answer (required)"
|
238 |
+
}
|
239 |
+
}
|
240 |
+
]
|
241 |
+
```
|
242 |
+
|
243 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
244 |
+
|
245 |
+
```json
|
246 |
+
"dataset_name": {
|
247 |
+
"file_name": "data.json",
|
248 |
+
"formatting": "sharegpt",
|
249 |
+
"ranking": true,
|
250 |
+
"columns": {
|
251 |
+
"messages": "conversations",
|
252 |
+
"chosen": "chosen",
|
253 |
+
"rejected": "rejected"
|
254 |
+
}
|
255 |
+
}
|
256 |
+
```
|
257 |
+
|
258 |
+
### KTO Dataset
|
259 |
+
|
260 |
+
- [Example dataset](kto_en_demo.json)
|
261 |
+
|
262 |
+
KTO datasets require a extra `kto_tag` column containing the boolean human feedback.
|
263 |
+
|
264 |
+
```json
|
265 |
+
[
|
266 |
+
{
|
267 |
+
"conversations": [
|
268 |
+
{
|
269 |
+
"from": "human",
|
270 |
+
"value": "human instruction"
|
271 |
+
},
|
272 |
+
{
|
273 |
+
"from": "gpt",
|
274 |
+
"value": "model response"
|
275 |
+
}
|
276 |
+
],
|
277 |
+
"kto_tag": "human feedback [true/false] (required)"
|
278 |
+
}
|
279 |
+
]
|
280 |
+
```
|
281 |
+
|
282 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
283 |
+
|
284 |
+
```json
|
285 |
+
"dataset_name": {
|
286 |
+
"file_name": "data.json",
|
287 |
+
"formatting": "sharegpt",
|
288 |
+
"columns": {
|
289 |
+
"messages": "conversations",
|
290 |
+
"kto_tag": "kto_tag"
|
291 |
+
}
|
292 |
+
}
|
293 |
+
```
|
294 |
+
|
295 |
+
### Multimodal Image Dataset
|
296 |
+
|
297 |
+
- [Example dataset](mllm_demo.json)
|
298 |
+
|
299 |
+
Multimodal image datasets require a `images` column containing the paths to the input images.
|
300 |
+
|
301 |
+
The number of images should be identical to the `<image>` tokens in the conversations.
|
302 |
+
|
303 |
+
```json
|
304 |
+
[
|
305 |
+
{
|
306 |
+
"conversations": [
|
307 |
+
{
|
308 |
+
"from": "human",
|
309 |
+
"value": "<image>human instruction"
|
310 |
+
},
|
311 |
+
{
|
312 |
+
"from": "gpt",
|
313 |
+
"value": "model response"
|
314 |
+
}
|
315 |
+
],
|
316 |
+
"images": [
|
317 |
+
"image path (required)"
|
318 |
+
]
|
319 |
+
}
|
320 |
+
]
|
321 |
+
```
|
322 |
+
|
323 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
324 |
+
|
325 |
+
```json
|
326 |
+
"dataset_name": {
|
327 |
+
"file_name": "data.json",
|
328 |
+
"formatting": "sharegpt",
|
329 |
+
"columns": {
|
330 |
+
"messages": "conversations",
|
331 |
+
"images": "images"
|
332 |
+
}
|
333 |
+
}
|
334 |
+
```
|
335 |
+
|
336 |
+
### Multimodal Video Dataset
|
337 |
+
|
338 |
+
- [Example dataset](mllm_video_demo.json)
|
339 |
+
|
340 |
+
Multimodal video datasets require a `videos` column containing the paths to the input videos.
|
341 |
+
|
342 |
+
The number of videos should be identical to the `<video>` tokens in the conversations.
|
343 |
+
|
344 |
+
```json
|
345 |
+
[
|
346 |
+
{
|
347 |
+
"conversations": [
|
348 |
+
{
|
349 |
+
"from": "human",
|
350 |
+
"value": "<video>human instruction"
|
351 |
+
},
|
352 |
+
{
|
353 |
+
"from": "gpt",
|
354 |
+
"value": "model response"
|
355 |
+
}
|
356 |
+
],
|
357 |
+
"videos": [
|
358 |
+
"video path (required)"
|
359 |
+
]
|
360 |
+
}
|
361 |
+
]
|
362 |
+
```
|
363 |
+
|
364 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
365 |
+
|
366 |
+
```json
|
367 |
+
"dataset_name": {
|
368 |
+
"file_name": "data.json",
|
369 |
+
"formatting": "sharegpt",
|
370 |
+
"columns": {
|
371 |
+
"messages": "conversations",
|
372 |
+
"videos": "videos"
|
373 |
+
}
|
374 |
+
}
|
375 |
+
```
|
376 |
+
|
377 |
+
### OpenAI Format
|
378 |
+
|
379 |
+
The openai format is simply a special case of the sharegpt format, where the first message may be a system prompt.
|
380 |
+
|
381 |
+
```json
|
382 |
+
[
|
383 |
+
{
|
384 |
+
"messages": [
|
385 |
+
{
|
386 |
+
"role": "system",
|
387 |
+
"content": "system prompt (optional)"
|
388 |
+
},
|
389 |
+
{
|
390 |
+
"role": "user",
|
391 |
+
"content": "human instruction"
|
392 |
+
},
|
393 |
+
{
|
394 |
+
"role": "assistant",
|
395 |
+
"content": "model response"
|
396 |
+
}
|
397 |
+
]
|
398 |
+
}
|
399 |
+
]
|
400 |
+
```
|
401 |
+
|
402 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
403 |
+
|
404 |
+
```json
|
405 |
+
"dataset_name": {
|
406 |
+
"file_name": "data.json",
|
407 |
+
"formatting": "sharegpt",
|
408 |
+
"columns": {
|
409 |
+
"messages": "messages"
|
410 |
+
},
|
411 |
+
"tags": {
|
412 |
+
"role_tag": "role",
|
413 |
+
"content_tag": "content",
|
414 |
+
"user_tag": "user",
|
415 |
+
"assistant_tag": "assistant",
|
416 |
+
"system_tag": "system"
|
417 |
+
}
|
418 |
+
}
|
419 |
+
```
|
data/README_zh.md
ADDED
@@ -0,0 +1,419 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[dataset_info.json](dataset_info.json) 包含了所有可用的数据集。如果您希望使用自定义数据集,请**务必**在 `dataset_info.json` 文件中添加*数据集描述*,并通过修改 `dataset: 数据集名称` 配置来使用数据集。
|
2 |
+
|
3 |
+
目前我们支持 **alpaca** 格式和 **sharegpt** 格式的数据集。
|
4 |
+
|
5 |
+
```json
|
6 |
+
"数据集名称": {
|
7 |
+
"hf_hub_url": "Hugging Face 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
|
8 |
+
"ms_hub_url": "ModelScope 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
|
9 |
+
"script_url": "包含数据加载脚本的本地文件夹名称(若指定,则忽略 file_name)",
|
10 |
+
"file_name": "该目录下数据集文件夹或文件的名称(若上述参数未指定,则此项必需)",
|
11 |
+
"formatting": "数据集格式(可选,默认:alpaca,可以为 alpaca 或 sharegpt)",
|
12 |
+
"ranking": "是否为偏好数据集(可选,默认:False)",
|
13 |
+
"subset": "数据集子集的名称(可选,默认:None)",
|
14 |
+
"split": "所使用的数据集切分(可选,默认:train)",
|
15 |
+
"folder": "Hugging Face 仓库的文件夹名称(可选,默认:None)",
|
16 |
+
"num_samples": "该数据集所使用的样本数量。(可选,默认:None)",
|
17 |
+
"columns(可选)": {
|
18 |
+
"prompt": "数据集代表提示词的表头名称(默认:instruction)",
|
19 |
+
"query": "数据集代表请求的表头名称(默认:input)",
|
20 |
+
"response": "数据集代表回答的表头名称(默认:output)",
|
21 |
+
"history": "数据集代表历史对话的表头名称(默认:None)",
|
22 |
+
"messages": "数据集代表消息列表的表头名称(默认:conversations)",
|
23 |
+
"system": "数据集代表系统提示的表头名称(默认:None)",
|
24 |
+
"tools": "数据集代表工具描述的表头名称(默认:None)",
|
25 |
+
"images": "数据集代表图像输入的表头名称(默认:None)",
|
26 |
+
"videos": "数据集代表视频输入的表头名称(默认:None)",
|
27 |
+
"chosen": "数据集代表更优回答的表头名称(默认:None)",
|
28 |
+
"rejected": "数据集代表更差回答的表头名称(默认:None)",
|
29 |
+
"kto_tag": "数据集代表 KTO 标签的表头名称(默认:None)"
|
30 |
+
},
|
31 |
+
"tags(可选,用于 sharegpt 格式)": {
|
32 |
+
"role_tag": "消息中代表发送者身份的键名(默认:from)",
|
33 |
+
"content_tag": "消息中代表文本内容的键名(默认:value)",
|
34 |
+
"user_tag": "消息中代表用户的 role_tag(默认:human)",
|
35 |
+
"assistant_tag": "消息中代表助手的 role_tag(默认:gpt)",
|
36 |
+
"observation_tag": "消息中代表工具返回结果的 role_tag(默认:observation)",
|
37 |
+
"function_tag": "消息中代表工具调用的 role_tag(默认:function_call)",
|
38 |
+
"system_tag": "消息中代表系统提示的 role_tag(默认:system,会覆盖 system column)"
|
39 |
+
}
|
40 |
+
}
|
41 |
+
```
|
42 |
+
|
43 |
+
## Alpaca 格式
|
44 |
+
|
45 |
+
### 指令监督微调数据集
|
46 |
+
|
47 |
+
- [样例数据集](alpaca_zh_demo.json)
|
48 |
+
|
49 |
+
在指令监督微调时,`instruction` 列对应的内容会与 `input` 列对应的内容拼接后作为人类指令,即人类指令为 `instruction\ninput`。而 `output` 列对应的内容为模型回答。
|
50 |
+
|
51 |
+
如果指定,`system` 列对应的内容将被作为系统提示词。
|
52 |
+
|
53 |
+
`history` 列是由多个字符串二元组构成的列表,分别代表历史消息中每轮对话的指令和回答。注意在指令监督微调时,历史消息中的回答内容**也会被用于模型学习**。
|
54 |
+
|
55 |
+
```json
|
56 |
+
[
|
57 |
+
{
|
58 |
+
"instruction": "人类指令(必填)",
|
59 |
+
"input": "人类输入(选填)",
|
60 |
+
"output": "模型回答(必填)",
|
61 |
+
"system": "系统提示词(选填)",
|
62 |
+
"history": [
|
63 |
+
["第一轮指令(选填)", "第一轮回答(选填)"],
|
64 |
+
["第二轮指令(选填)", "第二轮回答(选填)"]
|
65 |
+
]
|
66 |
+
}
|
67 |
+
]
|
68 |
+
```
|
69 |
+
|
70 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
71 |
+
|
72 |
+
```json
|
73 |
+
"数据集名称": {
|
74 |
+
"file_name": "data.json",
|
75 |
+
"columns": {
|
76 |
+
"prompt": "instruction",
|
77 |
+
"query": "input",
|
78 |
+
"response": "output",
|
79 |
+
"system": "system",
|
80 |
+
"history": "history"
|
81 |
+
}
|
82 |
+
}
|
83 |
+
```
|
84 |
+
|
85 |
+
### 预训练数据集
|
86 |
+
|
87 |
+
- [样例数据集](c4_demo.json)
|
88 |
+
|
89 |
+
在预训练时,只有 `text` 列中的内容会用于模型学习。
|
90 |
+
|
91 |
+
```json
|
92 |
+
[
|
93 |
+
{"text": "document"},
|
94 |
+
{"text": "document"}
|
95 |
+
]
|
96 |
+
```
|
97 |
+
|
98 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
99 |
+
|
100 |
+
```json
|
101 |
+
"数据集名称": {
|
102 |
+
"file_name": "data.json",
|
103 |
+
"columns": {
|
104 |
+
"prompt": "text"
|
105 |
+
}
|
106 |
+
}
|
107 |
+
```
|
108 |
+
|
109 |
+
### 偏好数据集
|
110 |
+
|
111 |
+
偏好数据集用于奖励模型训练、DPO 训练、ORPO 训练和 SimPO 训练。
|
112 |
+
|
113 |
+
它需要在 `chosen` 列中提供更优的回答,并在 `rejected` 列中提供更差的回答。
|
114 |
+
|
115 |
+
```json
|
116 |
+
[
|
117 |
+
{
|
118 |
+
"instruction": "人类指令(必填)",
|
119 |
+
"input": "人类输入(选填)",
|
120 |
+
"chosen": "优质回答(必填)",
|
121 |
+
"rejected": "劣质回答(必填)"
|
122 |
+
}
|
123 |
+
]
|
124 |
+
```
|
125 |
+
|
126 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
127 |
+
|
128 |
+
```json
|
129 |
+
"数据集名称": {
|
130 |
+
"file_name": "data.json",
|
131 |
+
"ranking": true,
|
132 |
+
"columns": {
|
133 |
+
"prompt": "instruction",
|
134 |
+
"query": "input",
|
135 |
+
"chosen": "chosen",
|
136 |
+
"rejected": "rejected"
|
137 |
+
}
|
138 |
+
}
|
139 |
+
```
|
140 |
+
|
141 |
+
### KTO 数据集
|
142 |
+
|
143 |
+
KTO 数据集需要提供额外的 `kto_tag` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
144 |
+
|
145 |
+
### 多模态图像数据集
|
146 |
+
|
147 |
+
多模态图像数据集需要提供额外的 `images` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
148 |
+
|
149 |
+
### 多模态视频数据集
|
150 |
+
|
151 |
+
多模态视频数据集需要提供额外的 `videos` 列。详情请参阅 [sharegpt](#sharegpt-格式)。
|
152 |
+
|
153 |
+
## Sharegpt 格式
|
154 |
+
|
155 |
+
### 指令监督微调数据集
|
156 |
+
|
157 |
+
- [样例数据集](glaive_toolcall_zh_demo.json)
|
158 |
+
|
159 |
+
相比 alpaca 格式的数据集,sharegpt 格式支持**更多的角色种类**,例如 human、gpt、observation、function 等等。它们构成一个对象列表呈现在 `conversations` 列中。
|
160 |
+
|
161 |
+
注意其中 human 和 observation 必须出现在奇数位置,gpt 和 function 必须出现在偶数位置。
|
162 |
+
|
163 |
+
```json
|
164 |
+
[
|
165 |
+
{
|
166 |
+
"conversations": [
|
167 |
+
{
|
168 |
+
"from": "human",
|
169 |
+
"value": "人类指令"
|
170 |
+
},
|
171 |
+
{
|
172 |
+
"from": "function_call",
|
173 |
+
"value": "工具参数"
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"from": "observation",
|
177 |
+
"value": "工具结果"
|
178 |
+
},
|
179 |
+
{
|
180 |
+
"from": "gpt",
|
181 |
+
"value": "模型回答"
|
182 |
+
}
|
183 |
+
],
|
184 |
+
"system": "系统提示词(选填)",
|
185 |
+
"tools": "工具描述(选填)"
|
186 |
+
}
|
187 |
+
]
|
188 |
+
```
|
189 |
+
|
190 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
191 |
+
|
192 |
+
```json
|
193 |
+
"数据集名称": {
|
194 |
+
"file_name": "data.json",
|
195 |
+
"formatting": "sharegpt",
|
196 |
+
"columns": {
|
197 |
+
"messages": "conversations",
|
198 |
+
"system": "system",
|
199 |
+
"tools": "tools"
|
200 |
+
}
|
201 |
+
}
|
202 |
+
```
|
203 |
+
|
204 |
+
### 预训练数据集
|
205 |
+
|
206 |
+
尚不支持,请使用 [alpaca](#alpaca-格式) 格式。
|
207 |
+
|
208 |
+
### 偏好数据集
|
209 |
+
|
210 |
+
- [样例数据集](dpo_zh_demo.json)
|
211 |
+
|
212 |
+
Sharegpt 格式的偏好数据集同样需要在 `chosen` 列中提供更优的消息,并在 `rejected` 列中提供更差的消息。
|
213 |
+
|
214 |
+
```json
|
215 |
+
[
|
216 |
+
{
|
217 |
+
"conversations": [
|
218 |
+
{
|
219 |
+
"from": "human",
|
220 |
+
"value": "人类指令"
|
221 |
+
},
|
222 |
+
{
|
223 |
+
"from": "gpt",
|
224 |
+
"value": "模型回答"
|
225 |
+
},
|
226 |
+
{
|
227 |
+
"from": "human",
|
228 |
+
"value": "人类指令"
|
229 |
+
}
|
230 |
+
],
|
231 |
+
"chosen": {
|
232 |
+
"from": "gpt",
|
233 |
+
"value": "优质回答"
|
234 |
+
},
|
235 |
+
"rejected": {
|
236 |
+
"from": "gpt",
|
237 |
+
"value": "劣质回答"
|
238 |
+
}
|
239 |
+
}
|
240 |
+
]
|
241 |
+
```
|
242 |
+
|
243 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
244 |
+
|
245 |
+
```json
|
246 |
+
"数据集名称": {
|
247 |
+
"file_name": "data.json",
|
248 |
+
"formatting": "sharegpt",
|
249 |
+
"ranking": true,
|
250 |
+
"columns": {
|
251 |
+
"messages": "conversations",
|
252 |
+
"chosen": "chosen",
|
253 |
+
"rejected": "rejected"
|
254 |
+
}
|
255 |
+
}
|
256 |
+
```
|
257 |
+
|
258 |
+
### KTO 数据集
|
259 |
+
|
260 |
+
- [样例数据集](kto_en_demo.json)
|
261 |
+
|
262 |
+
KTO 数据集需要额外添加一个 `kto_tag` 列,包含 bool 类型的人类反馈。
|
263 |
+
|
264 |
+
```json
|
265 |
+
[
|
266 |
+
{
|
267 |
+
"conversations": [
|
268 |
+
{
|
269 |
+
"from": "human",
|
270 |
+
"value": "人类指令"
|
271 |
+
},
|
272 |
+
{
|
273 |
+
"from": "gpt",
|
274 |
+
"value": "模型回答"
|
275 |
+
}
|
276 |
+
],
|
277 |
+
"kto_tag": "人类反馈 [true/false](必填)"
|
278 |
+
}
|
279 |
+
]
|
280 |
+
```
|
281 |
+
|
282 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
283 |
+
|
284 |
+
```json
|
285 |
+
"数据集名称": {
|
286 |
+
"file_name": "data.json",
|
287 |
+
"formatting": "sharegpt",
|
288 |
+
"columns": {
|
289 |
+
"messages": "conversations",
|
290 |
+
"kto_tag": "kto_tag"
|
291 |
+
}
|
292 |
+
}
|
293 |
+
```
|
294 |
+
|
295 |
+
### 多模态图像数据集
|
296 |
+
|
297 |
+
- [样例数据集](mllm_demo.json)
|
298 |
+
|
299 |
+
多模态图像数据集需要额外添加一个 `images` 列,包含输入图像的路径。
|
300 |
+
|
301 |
+
注意图片的数量必须与文本中所有 `<image>` 标记的数量严格一致。
|
302 |
+
|
303 |
+
```json
|
304 |
+
[
|
305 |
+
{
|
306 |
+
"conversations": [
|
307 |
+
{
|
308 |
+
"from": "human",
|
309 |
+
"value": "<image>人类指令"
|
310 |
+
},
|
311 |
+
{
|
312 |
+
"from": "gpt",
|
313 |
+
"value": "模型回答"
|
314 |
+
}
|
315 |
+
],
|
316 |
+
"images": [
|
317 |
+
"图像路径(必填)"
|
318 |
+
]
|
319 |
+
}
|
320 |
+
]
|
321 |
+
```
|
322 |
+
|
323 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
324 |
+
|
325 |
+
```json
|
326 |
+
"数据集名称": {
|
327 |
+
"file_name": "data.json",
|
328 |
+
"formatting": "sharegpt",
|
329 |
+
"columns": {
|
330 |
+
"messages": "conversations",
|
331 |
+
"images": "images"
|
332 |
+
}
|
333 |
+
}
|
334 |
+
```
|
335 |
+
|
336 |
+
### 多模态视频数据集
|
337 |
+
|
338 |
+
- [样例数据集](mllm_video_demo.json)
|
339 |
+
|
340 |
+
多模态视频数据集需要额外添加一个 `videos` 列,包含输入视频的路径。
|
341 |
+
|
342 |
+
注意视频的数量必须与文本中所有 `<video>` 标记的数量严格一致。
|
343 |
+
|
344 |
+
```json
|
345 |
+
[
|
346 |
+
{
|
347 |
+
"conversations": [
|
348 |
+
{
|
349 |
+
"from": "human",
|
350 |
+
"value": "<video>人类指令"
|
351 |
+
},
|
352 |
+
{
|
353 |
+
"from": "gpt",
|
354 |
+
"value": "模型回答"
|
355 |
+
}
|
356 |
+
],
|
357 |
+
"videos": [
|
358 |
+
"视频路径(必填)"
|
359 |
+
]
|
360 |
+
}
|
361 |
+
]
|
362 |
+
```
|
363 |
+
|
364 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
365 |
+
|
366 |
+
```json
|
367 |
+
"数据集名称": {
|
368 |
+
"file_name": "data.json",
|
369 |
+
"formatting": "sharegpt",
|
370 |
+
"columns": {
|
371 |
+
"messages": "conversations",
|
372 |
+
"videos": "videos"
|
373 |
+
}
|
374 |
+
}
|
375 |
+
```
|
376 |
+
|
377 |
+
### OpenAI 格式
|
378 |
+
|
379 |
+
OpenAI 格式仅仅是 sharegpt 格式的一种特殊情况,其中第一条消息可能是系统提示词。
|
380 |
+
|
381 |
+
```json
|
382 |
+
[
|
383 |
+
{
|
384 |
+
"messages": [
|
385 |
+
{
|
386 |
+
"role": "system",
|
387 |
+
"content": "系统提示词(选填)"
|
388 |
+
},
|
389 |
+
{
|
390 |
+
"role": "user",
|
391 |
+
"content": "人类指令"
|
392 |
+
},
|
393 |
+
{
|
394 |
+
"role": "assistant",
|
395 |
+
"content": "模型回答"
|
396 |
+
}
|
397 |
+
]
|
398 |
+
}
|
399 |
+
]
|
400 |
+
```
|
401 |
+
|
402 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
403 |
+
|
404 |
+
```json
|
405 |
+
"数据集名称": {
|
406 |
+
"file_name": "data.json",
|
407 |
+
"formatting": "sharegpt",
|
408 |
+
"columns": {
|
409 |
+
"messages": "messages"
|
410 |
+
},
|
411 |
+
"tags": {
|
412 |
+
"role_tag": "role",
|
413 |
+
"content_tag": "content",
|
414 |
+
"user_tag": "user",
|
415 |
+
"assistant_tag": "assistant",
|
416 |
+
"system_tag": "system"
|
417 |
+
}
|
418 |
+
}
|
419 |
+
```
|
data/alpaca_en_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/alpaca_zh_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/belle_multiturn/belle_multiturn.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
|
4 |
+
import datasets
|
5 |
+
|
6 |
+
|
7 |
+
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
|
8 |
+
|
9 |
+
_DESCRIPTION = "BELLE multiturn chat dataset."
|
10 |
+
|
11 |
+
_CITATION = """\
|
12 |
+
@article{belle2023exploring,
|
13 |
+
title={Exploring the Impact of Instruction Data Scaling on Large Language Models: An Empirical Study on Real-World Use Cases},
|
14 |
+
author={Yunjie Ji, Yong Deng, Yan Gong, Yiping Peng, Qiang Niu, Lei Zhang, Baochang Ma, Xiangang Li},
|
15 |
+
journal={arXiv preprint arXiv:2303.14742},
|
16 |
+
year={2023}
|
17 |
+
}
|
18 |
+
"""
|
19 |
+
|
20 |
+
_HOMEPAGE = "{}/datasets/BelleGroup/multiturn_chat_0.8M".format(_HF_ENDPOINT)
|
21 |
+
_LICENSE = "gpl-3.0"
|
22 |
+
_URL = "{}/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json".format(_HF_ENDPOINT)
|
23 |
+
|
24 |
+
|
25 |
+
class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
26 |
+
VERSION = datasets.Version("0.0.0")
|
27 |
+
|
28 |
+
def _info(self):
|
29 |
+
features = datasets.Features(
|
30 |
+
{"conversations": [{"from": datasets.Value("string"), "value": datasets.Value("string")}]}
|
31 |
+
)
|
32 |
+
return datasets.DatasetInfo(
|
33 |
+
description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
|
34 |
+
)
|
35 |
+
|
36 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
37 |
+
file_path = dl_manager.download(_URL)
|
38 |
+
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": file_path})]
|
39 |
+
|
40 |
+
def _generate_examples(self, filepath: str):
|
41 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
42 |
+
for key, row in enumerate(f):
|
43 |
+
data = json.loads(row)
|
44 |
+
conversations = []
|
45 |
+
prompt = data["instruction"].strip()
|
46 |
+
response = data["output"].strip()
|
47 |
+
|
48 |
+
assist_idx = prompt.rfind("Assistant:")
|
49 |
+
human_idx = prompt.rfind("Human:")
|
50 |
+
query = prompt[human_idx + 6 : assist_idx].strip()
|
51 |
+
prompt = prompt[:human_idx].strip()
|
52 |
+
conversations.insert(0, {"from": "gpt", "value": response})
|
53 |
+
conversations.insert(0, {"from": "human", "value": query})
|
54 |
+
|
55 |
+
while prompt.rfind("Assistant:") != -1:
|
56 |
+
assist_idx = prompt.rfind("Assistant:")
|
57 |
+
human_idx = prompt.rfind("Human:")
|
58 |
+
if human_idx != -1:
|
59 |
+
old_query = prompt[human_idx + 6 : assist_idx].strip()
|
60 |
+
old_resp = prompt[assist_idx + 10 :].strip()
|
61 |
+
conversations.insert(0, {"from": "gpt", "value": old_resp})
|
62 |
+
conversations.insert(0, {"from": "human", "value": old_query})
|
63 |
+
else:
|
64 |
+
break
|
65 |
+
prompt = prompt[:human_idx].strip()
|
66 |
+
|
67 |
+
yield key, {"conversations": conversations}
|
data/c4_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/convert_data.ipynb
ADDED
@@ -0,0 +1,421 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [
|
8 |
+
{
|
9 |
+
"name": "stdout",
|
10 |
+
"output_type": "stream",
|
11 |
+
"text": [
|
12 |
+
"Token length: 726\n"
|
13 |
+
]
|
14 |
+
},
|
15 |
+
{
|
16 |
+
"ename": "KeyboardInterrupt",
|
17 |
+
"evalue": "",
|
18 |
+
"output_type": "error",
|
19 |
+
"traceback": [
|
20 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
21 |
+
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
|
22 |
+
"Cell \u001b[0;32mIn[1], line 118\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[38;5;66;03m#tmp[\"verifier\"][0] = tmp[\"verifier\"][0].replace(\"veri.*\\(\", \"solver(\")\u001b[39;00m\n\u001b[1;32m 117\u001b[0m new_dict[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmessages\u001b[39m\u001b[38;5;124m\"\u001b[39m]\u001b[38;5;241m.\u001b[39mappend({\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrole\u001b[39m\u001b[38;5;124m\"\u001b[39m:\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124massistant\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m\"\u001b[39m:\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;241m.\u001b[39mjoin(tmp[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mverifier\u001b[39m\u001b[38;5;124m\"\u001b[39m])})\n\u001b[0;32m--> 118\u001b[0m token_len \u001b[38;5;241m=\u001b[39m \u001b[43mcheck_token_length\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnew_dict\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmessages\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 119\u001b[0m token_len_list\u001b[38;5;241m.\u001b[39mappend(token_len)\n\u001b[1;32m 120\u001b[0m data_output\u001b[38;5;241m.\u001b[39mappend(new_dict)\n",
|
23 |
+
"Cell \u001b[0;32mIn[1], line 57\u001b[0m, in \u001b[0;36mcheck_token_length\u001b[0;34m(query)\u001b[0m\n\u001b[1;32m 53\u001b[0m \u001b[38;5;66;03m# Load the tokenizer\u001b[39;00m\n\u001b[1;32m 54\u001b[0m \n\u001b[1;32m 55\u001b[0m \u001b[38;5;66;03m# Apply chat template and tokenize\u001b[39;00m\n\u001b[1;32m 56\u001b[0m formatted_prompt \u001b[38;5;241m=\u001b[39m tokenizer\u001b[38;5;241m.\u001b[39mapply_chat_template(query, tokenize\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m)\n\u001b[0;32m---> 57\u001b[0m tokens \u001b[38;5;241m=\u001b[39m \u001b[43mtokenizer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mencode\u001b[49m\u001b[43m(\u001b[49m\u001b[43mformatted_prompt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mreturn_tensors\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mpt\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 59\u001b[0m \u001b[38;5;66;03m# Get token length\u001b[39;00m\n\u001b[1;32m 60\u001b[0m token_length \u001b[38;5;241m=\u001b[39m tokens\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m1\u001b[39m]\n",
|
24 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:2788\u001b[0m, in \u001b[0;36mPreTrainedTokenizerBase.encode\u001b[0;34m(self, text, text_pair, add_special_tokens, padding, truncation, max_length, stride, padding_side, return_tensors, **kwargs)\u001b[0m\n\u001b[1;32m 2750\u001b[0m \u001b[38;5;129m@add_end_docstrings\u001b[39m(\n\u001b[1;32m 2751\u001b[0m ENCODE_KWARGS_DOCSTRING,\n\u001b[1;32m 2752\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 2771\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[1;32m 2772\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m List[\u001b[38;5;28mint\u001b[39m]:\n\u001b[1;32m 2773\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 2774\u001b[0m \u001b[38;5;124;03m Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.\u001b[39;00m\n\u001b[1;32m 2775\u001b[0m \n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 2786\u001b[0m \u001b[38;5;124;03m method).\u001b[39;00m\n\u001b[1;32m 2787\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m-> 2788\u001b[0m encoded_inputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mencode_plus\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2789\u001b[0m \u001b[43m \u001b[49m\u001b[43mtext\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2790\u001b[0m \u001b[43m \u001b[49m\u001b[43mtext_pair\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtext_pair\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2791\u001b[0m \u001b[43m \u001b[49m\u001b[43madd_special_tokens\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43madd_special_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2792\u001b[0m \u001b[43m \u001b[49m\u001b[43mpadding\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpadding\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2793\u001b[0m \u001b[43m \u001b[49m\u001b[43mtruncation\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtruncation\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2794\u001b[0m \u001b[43m \u001b[49m\u001b[43mmax_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmax_length\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2795\u001b[0m \u001b[43m \u001b[49m\u001b[43mstride\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstride\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2796\u001b[0m \u001b[43m \u001b[49m\u001b[43mpadding_side\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpadding_side\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2797\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_tensors\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_tensors\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2798\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2799\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2801\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m encoded_inputs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124minput_ids\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n",
|
25 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:3207\u001b[0m, in \u001b[0;36mPreTrainedTokenizerBase.encode_plus\u001b[0;34m(self, text, text_pair, add_special_tokens, padding, truncation, max_length, stride, is_split_into_words, pad_to_multiple_of, padding_side, return_tensors, return_token_type_ids, return_attention_mask, return_overflowing_tokens, return_special_tokens_mask, return_offsets_mapping, return_length, verbose, **kwargs)\u001b[0m\n\u001b[1;32m 3197\u001b[0m \u001b[38;5;66;03m# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'\u001b[39;00m\n\u001b[1;32m 3198\u001b[0m padding_strategy, truncation_strategy, max_length, kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_get_padding_truncation_strategies(\n\u001b[1;32m 3199\u001b[0m padding\u001b[38;5;241m=\u001b[39mpadding,\n\u001b[1;32m 3200\u001b[0m truncation\u001b[38;5;241m=\u001b[39mtruncation,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 3204\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[1;32m 3205\u001b[0m )\n\u001b[0;32m-> 3207\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_encode_plus\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 3208\u001b[0m \u001b[43m \u001b[49m\u001b[43mtext\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtext\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3209\u001b[0m \u001b[43m \u001b[49m\u001b[43mtext_pair\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtext_pair\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3210\u001b[0m \u001b[43m \u001b[49m\u001b[43madd_special_tokens\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43madd_special_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3211\u001b[0m \u001b[43m \u001b[49m\u001b[43mpadding_strategy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpadding_strategy\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3212\u001b[0m \u001b[43m \u001b[49m\u001b[43mtruncation_strategy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtruncation_strategy\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3213\u001b[0m \u001b[43m \u001b[49m\u001b[43mmax_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmax_length\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3214\u001b[0m \u001b[43m \u001b[49m\u001b[43mstride\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstride\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3215\u001b[0m \u001b[43m \u001b[49m\u001b[43mis_split_into_words\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mis_split_into_words\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3216\u001b[0m \u001b[43m \u001b[49m\u001b[43mpad_to_multiple_of\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpad_to_multiple_of\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3217\u001b[0m \u001b[43m \u001b[49m\u001b[43mpadding_side\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpadding_side\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3218\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_tensors\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_tensors\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3219\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_token_type_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_token_type_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3220\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_attention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3221\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_overflowing_tokens\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_overflowing_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3222\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_special_tokens_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_special_tokens_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3223\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_offsets_mapping\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_offsets_mapping\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3224\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_length\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3225\u001b[0m \u001b[43m \u001b[49m\u001b[43mverbose\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverbose\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3226\u001b[0m \u001b[43m \u001b[49m\u001b[43msplit_special_tokens\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpop\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43msplit_special_tokens\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msplit_special_tokens\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3227\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3228\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
|
26 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py:603\u001b[0m, in \u001b[0;36mPreTrainedTokenizerFast._encode_plus\u001b[0;34m(self, text, text_pair, add_special_tokens, padding_strategy, truncation_strategy, max_length, stride, is_split_into_words, pad_to_multiple_of, padding_side, return_tensors, return_token_type_ids, return_attention_mask, return_overflowing_tokens, return_special_tokens_mask, return_offsets_mapping, return_length, verbose, split_special_tokens, **kwargs)\u001b[0m\n\u001b[1;32m 579\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_encode_plus\u001b[39m(\n\u001b[1;32m 580\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 581\u001b[0m text: Union[TextInput, PreTokenizedInput],\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 600\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[1;32m 601\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m BatchEncoding:\n\u001b[1;32m 602\u001b[0m batched_input \u001b[38;5;241m=\u001b[39m [(text, text_pair)] \u001b[38;5;28;01mif\u001b[39;00m text_pair \u001b[38;5;28;01melse\u001b[39;00m [text]\n\u001b[0;32m--> 603\u001b[0m batched_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_batch_encode_plus\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 604\u001b[0m \u001b[43m \u001b[49m\u001b[43mbatched_input\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 605\u001b[0m \u001b[43m \u001b[49m\u001b[43mis_split_into_words\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mis_split_into_words\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 606\u001b[0m \u001b[43m \u001b[49m\u001b[43madd_special_tokens\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43madd_special_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 607\u001b[0m \u001b[43m \u001b[49m\u001b[43mpadding_strategy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpadding_strategy\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 608\u001b[0m \u001b[43m \u001b[49m\u001b[43mtruncation_strategy\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtruncation_strategy\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 609\u001b[0m \u001b[43m \u001b[49m\u001b[43mmax_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmax_length\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 610\u001b[0m \u001b[43m \u001b[49m\u001b[43mstride\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstride\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 611\u001b[0m \u001b[43m \u001b[49m\u001b[43mpad_to_multiple_of\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpad_to_multiple_of\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 612\u001b[0m \u001b[43m \u001b[49m\u001b[43mpadding_side\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mpadding_side\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 613\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_tensors\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_tensors\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 614\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_token_type_ids\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_token_type_ids\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 615\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_attention_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 616\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_overflowing_tokens\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_overflowing_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 617\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_special_tokens_mask\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_special_tokens_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 618\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_offsets_mapping\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_offsets_mapping\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 619\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_length\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 620\u001b[0m \u001b[43m \u001b[49m\u001b[43mverbose\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mverbose\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 621\u001b[0m \u001b[43m \u001b[49m\u001b[43msplit_special_tokens\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msplit_special_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 622\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 623\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 625\u001b[0m \u001b[38;5;66;03m# Return tensor is None, then we can remove the leading batch axis\u001b[39;00m\n\u001b[1;32m 626\u001b[0m \u001b[38;5;66;03m# Overflowing tokens are returned as a batch of output so we keep them in this case\u001b[39;00m\n\u001b[1;32m 627\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m return_tensors \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m return_overflowing_tokens:\n",
|
27 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py:529\u001b[0m, in \u001b[0;36mPreTrainedTokenizerFast._batch_encode_plus\u001b[0;34m(self, batch_text_or_text_pairs, add_special_tokens, padding_strategy, truncation_strategy, max_length, stride, is_split_into_words, pad_to_multiple_of, padding_side, return_tensors, return_token_type_ids, return_attention_mask, return_overflowing_tokens, return_special_tokens_mask, return_offsets_mapping, return_length, verbose, split_special_tokens)\u001b[0m\n\u001b[1;32m 526\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_tokenizer\u001b[38;5;241m.\u001b[39mencode_special_tokens \u001b[38;5;241m!=\u001b[39m split_special_tokens:\n\u001b[1;32m 527\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_tokenizer\u001b[38;5;241m.\u001b[39mencode_special_tokens \u001b[38;5;241m=\u001b[39m split_special_tokens\n\u001b[0;32m--> 529\u001b[0m encodings \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_tokenizer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mencode_batch\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 530\u001b[0m \u001b[43m \u001b[49m\u001b[43mbatch_text_or_text_pairs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 531\u001b[0m \u001b[43m \u001b[49m\u001b[43madd_special_tokens\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43madd_special_tokens\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 532\u001b[0m \u001b[43m \u001b[49m\u001b[43mis_pretokenized\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mis_split_into_words\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 533\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 535\u001b[0m \u001b[38;5;66;03m# Convert encoding to dict\u001b[39;00m\n\u001b[1;32m 536\u001b[0m \u001b[38;5;66;03m# `Tokens` has type: Tuple[\u001b[39;00m\n\u001b[1;32m 537\u001b[0m \u001b[38;5;66;03m# List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],\u001b[39;00m\n\u001b[1;32m 538\u001b[0m \u001b[38;5;66;03m# List[EncodingFast]\u001b[39;00m\n\u001b[1;32m 539\u001b[0m \u001b[38;5;66;03m# ]\u001b[39;00m\n\u001b[1;32m 540\u001b[0m \u001b[38;5;66;03m# with nested dimensions corresponding to batch, overflows, sequence length\u001b[39;00m\n\u001b[1;32m 541\u001b[0m tokens_and_encodings \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 542\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_convert_encoding(\n\u001b[1;32m 543\u001b[0m encoding\u001b[38;5;241m=\u001b[39mencoding,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 552\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m encoding \u001b[38;5;129;01min\u001b[39;00m encodings\n\u001b[1;32m 553\u001b[0m ]\n",
|
28 |
+
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
|
29 |
+
]
|
30 |
+
}
|
31 |
+
],
|
32 |
+
"source": [
|
33 |
+
"import json\n",
|
34 |
+
"from transformers import AutoTokenizer\n",
|
35 |
+
"from transformers.utils import logging\n",
|
36 |
+
"import torch\n",
|
37 |
+
"model_name = \"/mnt/data/zifeng.cao/reasoning/arc-agi/LLaMA-Factory/saves/Qwen2.5-Coder-7B-Instruct/pt_output_plus_step_output/checkpoint-274\"\n",
|
38 |
+
"tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
|
39 |
+
"\n",
|
40 |
+
"system_prompt = '''You are a helpful assistant that can solve reasoning tasks by using a limited set of DSL functions that are implemented in Python. \n",
|
41 |
+
"*** Task description ***\n",
|
42 |
+
"- Each task consists of around a handful of training examples, where an training example consists of an input grid and an output grid. \n",
|
43 |
+
"- For each training example, the output grid is the result of applying the same task-specific transformation to the input grid. \n",
|
44 |
+
"- The goal is to infer the transformation from the few training examples.\n",
|
45 |
+
"- The transformation is a task-specific grid transformation, which can be decomposed into a sequence of the DSL functions.\n",
|
46 |
+
"*** DSL description ***\n",
|
47 |
+
"- Types and Constants\n",
|
48 |
+
" - **Types**: Define various data types like `Grid`, `Object`, `Indices`, and more to facilitate grid operations.\n",
|
49 |
+
" - **Constants**: Include color constants (e.g., `ZERO`, `ONE`), boolean constants (`T`, `F`), and directional vectors (e.g., `UP`, `DOWN`).\n",
|
50 |
+
"- Primitives\n",
|
51 |
+
" - **Math Operations**: Functions like `add`, `subtract`, `multiply`, and `divide` perform basic arithmetic on integers or tuples.\n",
|
52 |
+
" - **Logical Operations**: Functions such as `even`, `flip`, and `both` handle logical evaluations.\n",
|
53 |
+
" - **Data Operations**: Functions like `identity`, `order`, `merge`, `difference`, and `dedupe` manage data containers.\n",
|
54 |
+
"- Grid and Object Manipulation\n",
|
55 |
+
" - **Grid Creation**: `canvas` creates grids with specified dimensions and values.\n",
|
56 |
+
" - **Grid Transformation**: Functions like `rot90`, `hmirror`, `upscale`, and `downscale` transform grids in various ways.\n",
|
57 |
+
" - **Subgrid Operations**: `crop`, `hsplit`, `vsplit`, and `trim` extract or modify parts of grids.\n",
|
58 |
+
" - **Object and Patch Handling**: Functions like `objects`, `normalize`, `shift`, `toindices`, and `recolor` handle grid patches and objects.\n",
|
59 |
+
"- Analysis and Filtering\n",
|
60 |
+
" - **Color Analysis**: Functions such as `mostcolor`, `leastcolor`, `colorcount`, and `palette` analyze color distributions.\n",
|
61 |
+
" - **Object Filtering**: `colorfilter` and `sizefilter` filter objects by color or size.\n",
|
62 |
+
" - **Spatial Analysis**: Functions like `center`, `position`, `manhattan`, and `adjacent` analyze spatial relationships.\n",
|
63 |
+
"- Connectivity and Bounding\n",
|
64 |
+
" - **Connectivity**: `connect`, `neighbors`, `dneighbors`, and `ineighbors` determine connections between grid indices.\n",
|
65 |
+
" - **Bounding**: Functions like `box`, `inbox`, `outbox`, and `corners` manage bounding areas of patches.\n",
|
66 |
+
"- Utils\n",
|
67 |
+
" - **Random Integer Generation**: `unifint` generates random integers within specified bounds and difficulty levels.\n",
|
68 |
+
" - **Grid Validation**: `is_grid` checks if an input is a valid grid.\n",
|
69 |
+
" - **Grid Formatting**: `format_grid` casts lists to the grid type.\n",
|
70 |
+
"*** Format of the generated code ***\n",
|
71 |
+
"- The only allowed operations are storing the result of a function call in a variable, where all arguments must either be the input grid, some constants such as integers or common vectors indicating directions, or a variable previously computed within the same solver, and each function that is being called must either be a DSL function or a variable previously constructed within the same solver. \n",
|
72 |
+
"- This also means that each line of code is enforced to be a single function call.\n",
|
73 |
+
"So, you are given a task and a set of examples, you need to generate a code that can solve the task.\n",
|
74 |
+
"'''\n",
|
75 |
+
"\n",
|
76 |
+
"token_length = tokenizer.encode(system_prompt, return_tensors=\"pt\").shape[1]\n",
|
77 |
+
"print(f\"Token length: {token_length}\")\n",
|
78 |
+
"\n",
|
79 |
+
"\n",
|
80 |
+
"\n",
|
81 |
+
"def check_token_length(query):\n",
|
82 |
+
" # Suppress warnings\n",
|
83 |
+
" logging.set_verbosity_error()\n",
|
84 |
+
"\n",
|
85 |
+
" # Load the tokenizer\n",
|
86 |
+
"\n",
|
87 |
+
" # Apply chat template and tokenize\n",
|
88 |
+
" formatted_prompt = tokenizer.apply_chat_template(query, tokenize=False)\n",
|
89 |
+
" tokens = tokenizer.encode(formatted_prompt, return_tensors=\"pt\")\n",
|
90 |
+
"\n",
|
91 |
+
" # Get token length\n",
|
92 |
+
" token_length = tokens.shape[1]\n",
|
93 |
+
"\n",
|
94 |
+
" #print(f\"Token length: {token_length}\")\n",
|
95 |
+
"\n",
|
96 |
+
" # Check if it exceeds the model's context length (assuming 4096 for Llama-2)\n",
|
97 |
+
"\n",
|
98 |
+
" return token_length\n",
|
99 |
+
"def list_of_lists_to_string_with_commas_and_newlines(list_of_lists):\n",
|
100 |
+
" return '\\n'.join(','.join(str(item) for item in sublist) for sublist in list_of_lists)\n",
|
101 |
+
"\n",
|
102 |
+
"def transform_query(query):\n",
|
103 |
+
" result_str = \"\"\n",
|
104 |
+
" previous_result = \"\"\n",
|
105 |
+
" for i, example in enumerate(query):\n",
|
106 |
+
" try:\n",
|
107 |
+
" r_i, c_i = len(example[\"input\"]), len(example[\"input\"][0])\n",
|
108 |
+
" r_o, c_o = len(example[\"output\"]), len(example[\"output\"][0])\n",
|
109 |
+
" except:\n",
|
110 |
+
" print(example)\n",
|
111 |
+
" return None\n",
|
112 |
+
" # input_str = \"\\n\".join([\"|\".join(map(str, row)) for row in example[\"input\"]])\n",
|
113 |
+
" # output_str = \"\\n\".join([\"|\".join(map(str, row)) for row in example[\"output\"]])\n",
|
114 |
+
" input_str = list_of_lists_to_string_with_commas_and_newlines(example[\"input\"])\n",
|
115 |
+
" output_str = list_of_lists_to_string_with_commas_and_newlines(example[\"output\"])\n",
|
116 |
+
" result_str = previous_result + f\"** Example {i+1} ** \\n input: ({r_i} by {c_i}) Matrix \\n{input_str}\\n output: ({r_o} by {c_o}) Matrix \\n{output_str}\\n\\n\"\n",
|
117 |
+
" previous_result = result_str\n",
|
118 |
+
" if len(result_str) > 14000: #6000\n",
|
119 |
+
" token_length = tokenizer.encode(previous_result, return_tensors=\"pt\").shape[1]\n",
|
120 |
+
" #print(f\"previous Token length: {token_length}\")\n",
|
121 |
+
" if token_length > 14000: #6000\n",
|
122 |
+
" return None\n",
|
123 |
+
" return previous_result\n",
|
124 |
+
" token_length = tokenizer.encode(result_str, return_tensors=\"pt\").shape[1]\n",
|
125 |
+
" #print(i, len(result_str))\n",
|
126 |
+
" #print(f\"Token length: {token_length}\")\n",
|
127 |
+
" \n",
|
128 |
+
" return result_str\n",
|
129 |
+
"\n",
|
130 |
+
"import re\n",
|
131 |
+
"f = open(\"multi_step_verifiers_training.txt\", \"r\")\n",
|
132 |
+
"data_output = []\n",
|
133 |
+
"token_len_list = []\n",
|
134 |
+
"skip = 0\n",
|
135 |
+
"total = 0\n",
|
136 |
+
"for line in f:\n",
|
137 |
+
" total += 1\n",
|
138 |
+
" tmp = json.loads(line)\n",
|
139 |
+
" new_dict = {\"messages\":[]}\n",
|
140 |
+
" new_dict[\"messages\"].append({\"role\":\"system\", \"content\":system_prompt})\n",
|
141 |
+
" tran = transform_query(tmp[\"example\"]) \n",
|
142 |
+
" if tran == None:\n",
|
143 |
+
" skip += 1\n",
|
144 |
+
" continue\n",
|
145 |
+
"\n",
|
146 |
+
" new_dict[\"messages\"].append({\"role\":\"user\", \"content\":tran})\n",
|
147 |
+
" tmp[\"verifier\"][0] =re.sub(r'veri.*?\\(', 'solver(', tmp[\"verifier\"][0])\n",
|
148 |
+
" #tmp[\"verifier\"][0] = tmp[\"verifier\"][0].replace(\"veri.*\\(\", \"solver(\")\n",
|
149 |
+
" new_dict[\"messages\"].append({\"role\":\"assistant\", \"content\":\"\\n\".join(tmp[\"verifier\"])})\n",
|
150 |
+
" token_len = check_token_length(new_dict[\"messages\"])\n",
|
151 |
+
" token_len_list.append(token_len)\n",
|
152 |
+
" data_output.append(new_dict)\n",
|
153 |
+
"print(data_output[0][\"messages\"][1][\"content\"])\n",
|
154 |
+
"print(\"skip:\", skip)\n",
|
155 |
+
"print(\"total:\", total)\n",
|
156 |
+
"print(\"Token length max:\", max(token_len_list))\n",
|
157 |
+
"print(\"Token length min:\", min(token_len_list))\n",
|
158 |
+
"f.close()\n",
|
159 |
+
"with open(\"multi_step_verifiers_training.json\", \"w\") as f:\n",
|
160 |
+
" json.dump(data_output, f, indent=4)\n"
|
161 |
+
]
|
162 |
+
},
|
163 |
+
{
|
164 |
+
"cell_type": "code",
|
165 |
+
"execution_count": 2,
|
166 |
+
"metadata": {},
|
167 |
+
"outputs": [
|
168 |
+
{
|
169 |
+
"name": "stdout",
|
170 |
+
"output_type": "stream",
|
171 |
+
"text": [
|
172 |
+
"Token length: 726\n"
|
173 |
+
]
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"name": "stderr",
|
177 |
+
"output_type": "stream",
|
178 |
+
"text": [
|
179 |
+
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
|
180 |
+
"To disable this warning, you can either:\n",
|
181 |
+
"\t- Avoid using `tokenizers` before the fork if possible\n",
|
182 |
+
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n",
|
183 |
+
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
|
184 |
+
"To disable this warning, you can either:\n",
|
185 |
+
"\t- Avoid using `tokenizers` before the fork if possible\n",
|
186 |
+
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n",
|
187 |
+
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
|
188 |
+
"To disable this warning, you can either:\n",
|
189 |
+
"\t- Avoid using `tokenizers` before the fork if possible\n",
|
190 |
+
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n",
|
191 |
+
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
|
192 |
+
"To disable this warning, you can either:\n",
|
193 |
+
"\t- Avoid using `tokenizers` before the fork if possible\n",
|
194 |
+
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n",
|
195 |
+
" 72%|███████▏ | 96270/134580 [14:57<05:57, 107.26it/s]\n"
|
196 |
+
]
|
197 |
+
},
|
198 |
+
{
|
199 |
+
"ename": "KeyboardInterrupt",
|
200 |
+
"evalue": "",
|
201 |
+
"output_type": "error",
|
202 |
+
"traceback": [
|
203 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
204 |
+
"\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)",
|
205 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/multiprocessing/pool.py:856\u001b[0m, in \u001b[0;36mIMapIterator.next\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 855\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 856\u001b[0m item \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_items\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpopleft\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 857\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mIndexError\u001b[39;00m:\n",
|
206 |
+
"\u001b[0;31mIndexError\u001b[0m: pop from an empty deque",
|
207 |
+
"\nDuring handling of the above exception, another exception occurred:\n",
|
208 |
+
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
|
209 |
+
"Cell \u001b[0;32mIn[2], line 136\u001b[0m\n\u001b[1;32m 133\u001b[0m json\u001b[38;5;241m.\u001b[39mdump(data_output, f, indent\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m4\u001b[39m)\n\u001b[1;32m 135\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;18m__name__\u001b[39m \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m__main__\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[0;32m--> 136\u001b[0m \u001b[43mmain\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
210 |
+
"Cell \u001b[0;32mIn[2], line 115\u001b[0m, in \u001b[0;36mmain\u001b[0;34m()\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[38;5;66;03m# Create a pool of workers\u001b[39;00m\n\u001b[1;32m 113\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m mp\u001b[38;5;241m.\u001b[39mPool(processes\u001b[38;5;241m=\u001b[39mmp\u001b[38;5;241m.\u001b[39mcpu_count()) \u001b[38;5;28;01mas\u001b[39;00m pool:\n\u001b[1;32m 114\u001b[0m \u001b[38;5;66;03m# Process lines in parallel with progress bar\u001b[39;00m\n\u001b[0;32m--> 115\u001b[0m results \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mlist\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mtqdm\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpool\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mimap\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprocess_line\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlines\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mtotal\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtotal\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 117\u001b[0m \u001b[38;5;66;03m# Collect results\u001b[39;00m\n\u001b[1;32m 118\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m result \u001b[38;5;129;01min\u001b[39;00m results:\n",
|
211 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/site-packages/tqdm/std.py:1181\u001b[0m, in \u001b[0;36mtqdm.__iter__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1178\u001b[0m time \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_time\n\u001b[1;32m 1180\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 1181\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m obj \u001b[38;5;129;01min\u001b[39;00m iterable:\n\u001b[1;32m 1182\u001b[0m \u001b[38;5;28;01myield\u001b[39;00m obj\n\u001b[1;32m 1183\u001b[0m \u001b[38;5;66;03m# Update and possibly print the progressbar.\u001b[39;00m\n\u001b[1;32m 1184\u001b[0m \u001b[38;5;66;03m# Note: does not call self.update(1) for speed optimisation.\u001b[39;00m\n",
|
212 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/multiprocessing/pool.py:861\u001b[0m, in \u001b[0;36mIMapIterator.next\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 859\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_pool \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[1;32m 860\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mStopIteration\u001b[39;00m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m--> 861\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_cond\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwait\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 862\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 863\u001b[0m item \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_items\u001b[38;5;241m.\u001b[39mpopleft()\n",
|
213 |
+
"File \u001b[0;32m/usr/local/lib/python3.10/threading.py:320\u001b[0m, in \u001b[0;36mCondition.wait\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 318\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m: \u001b[38;5;66;03m# restore state no matter what (e.g., KeyboardInterrupt)\u001b[39;00m\n\u001b[1;32m 319\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m timeout \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 320\u001b[0m \u001b[43mwaiter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43macquire\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 321\u001b[0m gotit \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 322\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n",
|
214 |
+
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
|
215 |
+
]
|
216 |
+
}
|
217 |
+
],
|
218 |
+
"source": [
|
219 |
+
"import json\n",
|
220 |
+
"from transformers import AutoTokenizer\n",
|
221 |
+
"from transformers.utils import logging\n",
|
222 |
+
"import torch\n",
|
223 |
+
"import multiprocessing as mp\n",
|
224 |
+
"from tqdm import tqdm\n",
|
225 |
+
"\n",
|
226 |
+
"model_name = \"/mnt/data/zifeng.cao/reasoning/arc-agi/LLaMA-Factory/saves/Qwen2.5-Coder-7B-Instruct/pt_output_plus_step_output/checkpoint-274\"\n",
|
227 |
+
"tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
|
228 |
+
"\n",
|
229 |
+
"system_prompt = '''You are a helpful assistant that can solve reasoning tasks by using a limited set of DSL functions that are implemented in Python. \n",
|
230 |
+
"*** Task description ***\n",
|
231 |
+
"- Each task consists of around a handful of training examples, where an training example consists of an input grid and an output grid. \n",
|
232 |
+
"- For each training example, the output grid is the result of applying the same task-specific transformation to the input grid. \n",
|
233 |
+
"- The goal is to infer the transformation from the few training examples.\n",
|
234 |
+
"- The transformation is a task-specific grid transformation, which can be decomposed into a sequence of the DSL functions.\n",
|
235 |
+
"*** DSL description ***\n",
|
236 |
+
"- Types and Constants\n",
|
237 |
+
" - **Types**: Define various data types like `Grid`, `Object`, `Indices`, and more to facilitate grid operations.\n",
|
238 |
+
" - **Constants**: Include color constants (e.g., `ZERO`, `ONE`), boolean constants (`T`, `F`), and directional vectors (e.g., `UP`, `DOWN`).\n",
|
239 |
+
"- Primitives\n",
|
240 |
+
" - **Math Operations**: Functions like `add`, `subtract`, `multiply`, and `divide` perform basic arithmetic on integers or tuples.\n",
|
241 |
+
" - **Logical Operations**: Functions such as `even`, `flip`, and `both` handle logical evaluations.\n",
|
242 |
+
" - **Data Operations**: Functions like `identity`, `order`, `merge`, `difference`, and `dedupe` manage data containers.\n",
|
243 |
+
"- Grid and Object Manipulation\n",
|
244 |
+
" - **Grid Creation**: `canvas` creates grids with specified dimensions and values.\n",
|
245 |
+
" - **Grid Transformation**: Functions like `rot90`, `hmirror`, `upscale`, and `downscale` transform grids in various ways.\n",
|
246 |
+
" - **Subgrid Operations**: `crop`, `hsplit`, `vsplit`, and `trim` extract or modify parts of grids.\n",
|
247 |
+
" - **Object and Patch Handling**: Functions like `objects`, `normalize`, `shift`, `toindices`, and `recolor` handle grid patches and objects.\n",
|
248 |
+
"- Analysis and Filtering\n",
|
249 |
+
" - **Color Analysis**: Functions such as `mostcolor`, `leastcolor`, `colorcount`, and `palette` analyze color distributions.\n",
|
250 |
+
" - **Object Filtering**: `colorfilter` and `sizefilter` filter objects by color or size.\n",
|
251 |
+
" - **Spatial Analysis**: Functions like `center`, `position`, `manhattan`, and `adjacent` analyze spatial relationships.\n",
|
252 |
+
"- Connectivity and Bounding\n",
|
253 |
+
" - **Connectivity**: `connect`, `neighbors`, `dneighbors`, and `ineighbors` determine connections between grid indices.\n",
|
254 |
+
" - **Bounding**: Functions like `box`, `inbox`, `outbox`, and `corners` manage bounding areas of patches.\n",
|
255 |
+
"- Utils\n",
|
256 |
+
" - **Random Integer Generation**: `unifint` generates random integers within specified bounds and difficulty levels.\n",
|
257 |
+
" - **Grid Validation**: `is_grid` checks if an input is a valid grid.\n",
|
258 |
+
" - **Grid Formatting**: `format_grid` casts lists to the grid type.\n",
|
259 |
+
"*** Format of the generated code ***\n",
|
260 |
+
"- The only allowed operations are storing the result of a function call in a variable, where all arguments must either be the input grid, some constants such as integers or common vectors indicating directions, or a variable previously computed within the same solver, and each function that is being called must either be a DSL function or a variable previously constructed within the same solver. \n",
|
261 |
+
"- This also means that each line of code is enforced to be a single function call.\n",
|
262 |
+
"So, you are given a task and a set of examples, you need to generate a code that can solve the task.\n",
|
263 |
+
"'''\n",
|
264 |
+
"\n",
|
265 |
+
"token_length = tokenizer.encode(system_prompt, return_tensors=\"pt\").shape[1]\n",
|
266 |
+
"print(f\"Token length: {token_length}\")\n",
|
267 |
+
"\n",
|
268 |
+
"def check_token_length(query):\n",
|
269 |
+
" # Suppress warnings\n",
|
270 |
+
" logging.set_verbosity_error()\n",
|
271 |
+
"\n",
|
272 |
+
" # Apply chat template and tokenize\n",
|
273 |
+
" formatted_prompt = tokenizer.apply_chat_template(query, tokenize=False)\n",
|
274 |
+
" tokens = tokenizer.encode(formatted_prompt, return_tensors=\"pt\")\n",
|
275 |
+
"\n",
|
276 |
+
" # Get token length\n",
|
277 |
+
" token_length = tokens.shape[1]\n",
|
278 |
+
"\n",
|
279 |
+
" return token_length\n",
|
280 |
+
"\n",
|
281 |
+
"def list_of_lists_to_string_with_commas_and_newlines(list_of_lists):\n",
|
282 |
+
" return '\\n'.join(','.join(str(item) for item in sublist) for sublist in list_of_lists)\n",
|
283 |
+
"\n",
|
284 |
+
"def transform_query(query):\n",
|
285 |
+
" result_str = \"\"\n",
|
286 |
+
" previous_result = \"\"\n",
|
287 |
+
" for i, example in enumerate(query):\n",
|
288 |
+
" try:\n",
|
289 |
+
" r_i, c_i = len(example[\"input\"]), len(example[\"input\"][0])\n",
|
290 |
+
" r_o, c_o = len(example[\"output\"]), len(example[\"output\"][0])\n",
|
291 |
+
" except:\n",
|
292 |
+
" print(example)\n",
|
293 |
+
" return None\n",
|
294 |
+
" input_str = list_of_lists_to_string_with_commas_and_newlines(example[\"input\"])\n",
|
295 |
+
" output_str = list_of_lists_to_string_with_commas_and_newlines(example[\"output\"])\n",
|
296 |
+
" result_str = previous_result + f\"** Example {i+1} ** \\n input: ({r_i} by {c_i}) Matrix \\n{input_str}\\n output: ({r_o} by {c_o}) Matrix \\n{output_str}\\n\\n\"\n",
|
297 |
+
" previous_result = result_str\n",
|
298 |
+
" if len(result_str) > 14000:\n",
|
299 |
+
" token_length = tokenizer.encode(previous_result, return_tensors=\"pt\").shape[1]\n",
|
300 |
+
" if token_length > 14000:\n",
|
301 |
+
" return None\n",
|
302 |
+
" return previous_result\n",
|
303 |
+
" token_length = tokenizer.encode(result_str, return_tensors=\"pt\").shape[1]\n",
|
304 |
+
" \n",
|
305 |
+
" return result_str\n",
|
306 |
+
"\n",
|
307 |
+
"def process_line(line):\n",
|
308 |
+
" tmp = json.loads(line)\n",
|
309 |
+
" new_dict = {\"messages\":[]}\n",
|
310 |
+
" new_dict[\"messages\"].append({\"role\":\"system\", \"content\":system_prompt})\n",
|
311 |
+
" tran = transform_query(tmp[\"example\"]) \n",
|
312 |
+
" if tran == None:\n",
|
313 |
+
" return None\n",
|
314 |
+
"\n",
|
315 |
+
" new_dict[\"messages\"].append({\"role\":\"user\", \"content\":tran})\n",
|
316 |
+
" tmp[\"verifier\"][0] = re.sub(r'veri.*?\\(', 'solver(', tmp[\"verifier\"][0])\n",
|
317 |
+
" new_dict[\"messages\"].append({\"role\":\"assistant\", \"content\":\"\\n\".join(tmp[\"verifier\"])})\n",
|
318 |
+
" token_len = check_token_length(new_dict[\"messages\"])\n",
|
319 |
+
" return new_dict, token_len\n",
|
320 |
+
"\n",
|
321 |
+
"def main():\n",
|
322 |
+
" with open(\"multi_step_verifiers_training.txt\", \"r\") as f:\n",
|
323 |
+
" lines = f.readlines()\n",
|
324 |
+
" \n",
|
325 |
+
" total = len(lines)\n",
|
326 |
+
" data_output = []\n",
|
327 |
+
" token_len_list = []\n",
|
328 |
+
" skip = 0\n",
|
329 |
+
" \n",
|
330 |
+
" # Create a pool of workers\n",
|
331 |
+
" with mp.Pool(processes=mp.cpu_count()) as pool:\n",
|
332 |
+
" # Process lines in parallel with progress bar\n",
|
333 |
+
" results = list(tqdm(pool.imap(process_line, lines), total=total))\n",
|
334 |
+
" \n",
|
335 |
+
" # Collect results\n",
|
336 |
+
" for result in results:\n",
|
337 |
+
" if result is None:\n",
|
338 |
+
" skip += 1\n",
|
339 |
+
" continue\n",
|
340 |
+
" new_dict, token_len = result\n",
|
341 |
+
" data_output.append(new_dict)\n",
|
342 |
+
" token_len_list.append(token_len)\n",
|
343 |
+
" \n",
|
344 |
+
" print(data_output[0][\"messages\"][1][\"content\"])\n",
|
345 |
+
" print(\"skip:\", skip)\n",
|
346 |
+
" print(\"total:\", total)\n",
|
347 |
+
" print(\"Token length max:\", max(token_len_list))\n",
|
348 |
+
" print(\"Token length min:\", min(token_len_list))\n",
|
349 |
+
" \n",
|
350 |
+
" with open(\"multi_step_verifiers_training.json\", \"w\") as f:\n",
|
351 |
+
" json.dump(data_output, f, indent=4)\n",
|
352 |
+
"\n",
|
353 |
+
"if __name__ == '__main__':\n",
|
354 |
+
" main()\n"
|
355 |
+
]
|
356 |
+
},
|
357 |
+
{
|
358 |
+
"cell_type": "code",
|
359 |
+
"execution_count": 20,
|
360 |
+
"metadata": {},
|
361 |
+
"outputs": [
|
362 |
+
{
|
363 |
+
"data": {
|
364 |
+
"text/plain": [
|
365 |
+
"'def verifier_9f6b5f41(I: Grid) -> Grid:'"
|
366 |
+
]
|
367 |
+
},
|
368 |
+
"execution_count": 20,
|
369 |
+
"metadata": {},
|
370 |
+
"output_type": "execute_result"
|
371 |
+
}
|
372 |
+
],
|
373 |
+
"source": [
|
374 |
+
"import re\n",
|
375 |
+
"\n",
|
376 |
+
"def replace_verifier_with_solver(input_string):\n",
|
377 |
+
" return re.sub(r'veri.*?\\(', 'solver(', input_string)\n",
|
378 |
+
"\n",
|
379 |
+
"original_string = 'def verifier_9f6b5f41(I: Grid) -> Grid:'\n",
|
380 |
+
"modified_string = replace_verifier_with_solver(original_string)\n",
|
381 |
+
"print(modified_string)"
|
382 |
+
]
|
383 |
+
},
|
384 |
+
{
|
385 |
+
"cell_type": "code",
|
386 |
+
"execution_count": 2,
|
387 |
+
"metadata": {},
|
388 |
+
"outputs": [],
|
389 |
+
"source": [
|
390 |
+
"import json\n",
|
391 |
+
"# merge two datasets\n",
|
392 |
+
"data1 = json.load(open(\"multi_step_verifiers_training.json\", \"r\"))\n",
|
393 |
+
"data2 = json.load(open(\"re_arc_v4.json\", \"r\"))\n",
|
394 |
+
"data1.extend(data2)\n",
|
395 |
+
"with open(\"multi_step_merged_arc_v4.json\", \"w\") as f:\n",
|
396 |
+
" json.dump(data1, f, indent=4)"
|
397 |
+
]
|
398 |
+
}
|
399 |
+
],
|
400 |
+
"metadata": {
|
401 |
+
"kernelspec": {
|
402 |
+
"display_name": "Python 3",
|
403 |
+
"language": "python",
|
404 |
+
"name": "python3"
|
405 |
+
},
|
406 |
+
"language_info": {
|
407 |
+
"codemirror_mode": {
|
408 |
+
"name": "ipython",
|
409 |
+
"version": 3
|
410 |
+
},
|
411 |
+
"file_extension": ".py",
|
412 |
+
"mimetype": "text/x-python",
|
413 |
+
"name": "python",
|
414 |
+
"nbconvert_exporter": "python",
|
415 |
+
"pygments_lexer": "ipython3",
|
416 |
+
"version": "3.10.14"
|
417 |
+
}
|
418 |
+
},
|
419 |
+
"nbformat": 4,
|
420 |
+
"nbformat_minor": 2
|
421 |
+
}
|
data/dataset_info.json
ADDED
@@ -0,0 +1,684 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"identity": {
|
3 |
+
"file_name": "identity.json"
|
4 |
+
},
|
5 |
+
"alpaca_en_demo": {
|
6 |
+
"file_name": "alpaca_en_demo.json"
|
7 |
+
},
|
8 |
+
"alpaca_zh_demo": {
|
9 |
+
"file_name": "alpaca_zh_demo.json"
|
10 |
+
},
|
11 |
+
"glaive_toolcall_en_demo": {
|
12 |
+
"file_name": "glaive_toolcall_en_demo.json",
|
13 |
+
"formatting": "sharegpt",
|
14 |
+
"columns": {
|
15 |
+
"messages": "conversations",
|
16 |
+
"tools": "tools"
|
17 |
+
}
|
18 |
+
},
|
19 |
+
"glaive_toolcall_zh_demo": {
|
20 |
+
"file_name": "glaive_toolcall_zh_demo.json",
|
21 |
+
"formatting": "sharegpt",
|
22 |
+
"columns": {
|
23 |
+
"messages": "conversations",
|
24 |
+
"tools": "tools"
|
25 |
+
}
|
26 |
+
},
|
27 |
+
"mllm_demo": {
|
28 |
+
"file_name": "mllm_demo.json",
|
29 |
+
"formatting": "sharegpt",
|
30 |
+
"columns": {
|
31 |
+
"messages": "messages",
|
32 |
+
"images": "images"
|
33 |
+
},
|
34 |
+
"tags": {
|
35 |
+
"role_tag": "role",
|
36 |
+
"content_tag": "content",
|
37 |
+
"user_tag": "user",
|
38 |
+
"assistant_tag": "assistant"
|
39 |
+
}
|
40 |
+
},
|
41 |
+
"mllm_video_demo": {
|
42 |
+
"file_name": "mllm_video_demo.json",
|
43 |
+
"formatting": "sharegpt",
|
44 |
+
"columns": {
|
45 |
+
"messages": "messages",
|
46 |
+
"videos": "videos"
|
47 |
+
},
|
48 |
+
"tags": {
|
49 |
+
"role_tag": "role",
|
50 |
+
"content_tag": "content",
|
51 |
+
"user_tag": "user",
|
52 |
+
"assistant_tag": "assistant"
|
53 |
+
}
|
54 |
+
},
|
55 |
+
"alpaca_en": {
|
56 |
+
"hf_hub_url": "llamafactory/alpaca_en",
|
57 |
+
"ms_hub_url": "llamafactory/alpaca_en",
|
58 |
+
"om_hub_url": "HaM/alpaca_en"
|
59 |
+
},
|
60 |
+
"alpaca_zh": {
|
61 |
+
"hf_hub_url": "llamafactory/alpaca_zh",
|
62 |
+
"ms_hub_url": "llamafactory/alpaca_zh"
|
63 |
+
},
|
64 |
+
"alpaca_gpt4_en": {
|
65 |
+
"hf_hub_url": "llamafactory/alpaca_gpt4_en",
|
66 |
+
"ms_hub_url": "llamafactory/alpaca_gpt4_en"
|
67 |
+
},
|
68 |
+
"alpaca_gpt4_zh": {
|
69 |
+
"hf_hub_url": "llamafactory/alpaca_gpt4_zh",
|
70 |
+
"ms_hub_url": "llamafactory/alpaca_gpt4_zh",
|
71 |
+
"om_hub_url": "State_Cloud/alpaca-gpt4-data-zh"
|
72 |
+
},
|
73 |
+
"glaive_toolcall_en": {
|
74 |
+
"hf_hub_url": "llamafactory/glaive_toolcall_en",
|
75 |
+
"formatting": "sharegpt",
|
76 |
+
"columns": {
|
77 |
+
"messages": "conversations",
|
78 |
+
"tools": "tools"
|
79 |
+
}
|
80 |
+
},
|
81 |
+
"glaive_toolcall_zh": {
|
82 |
+
"hf_hub_url": "llamafactory/glaive_toolcall_zh",
|
83 |
+
"formatting": "sharegpt",
|
84 |
+
"columns": {
|
85 |
+
"messages": "conversations",
|
86 |
+
"tools": "tools"
|
87 |
+
}
|
88 |
+
},
|
89 |
+
"lima": {
|
90 |
+
"hf_hub_url": "llamafactory/lima",
|
91 |
+
"formatting": "sharegpt"
|
92 |
+
},
|
93 |
+
"guanaco": {
|
94 |
+
"hf_hub_url": "JosephusCheung/GuanacoDataset",
|
95 |
+
"ms_hub_url": "AI-ModelScope/GuanacoDataset"
|
96 |
+
},
|
97 |
+
"belle_2m": {
|
98 |
+
"hf_hub_url": "BelleGroup/train_2M_CN",
|
99 |
+
"ms_hub_url": "AI-ModelScope/train_2M_CN"
|
100 |
+
},
|
101 |
+
"belle_1m": {
|
102 |
+
"hf_hub_url": "BelleGroup/train_1M_CN",
|
103 |
+
"ms_hub_url": "AI-ModelScope/train_1M_CN"
|
104 |
+
},
|
105 |
+
"belle_0.5m": {
|
106 |
+
"hf_hub_url": "BelleGroup/train_0.5M_CN",
|
107 |
+
"ms_hub_url": "AI-ModelScope/train_0.5M_CN"
|
108 |
+
},
|
109 |
+
"belle_dialog": {
|
110 |
+
"hf_hub_url": "BelleGroup/generated_chat_0.4M",
|
111 |
+
"ms_hub_url": "AI-ModelScope/generated_chat_0.4M"
|
112 |
+
},
|
113 |
+
"belle_math": {
|
114 |
+
"hf_hub_url": "BelleGroup/school_math_0.25M",
|
115 |
+
"ms_hub_url": "AI-ModelScope/school_math_0.25M"
|
116 |
+
},
|
117 |
+
"belle_multiturn": {
|
118 |
+
"script_url": "belle_multiturn",
|
119 |
+
"formatting": "sharegpt"
|
120 |
+
},
|
121 |
+
"ultra_chat": {
|
122 |
+
"script_url": "ultra_chat",
|
123 |
+
"formatting": "sharegpt"
|
124 |
+
},
|
125 |
+
"open_platypus": {
|
126 |
+
"hf_hub_url": "garage-bAInd/Open-Platypus",
|
127 |
+
"ms_hub_url": "AI-ModelScope/Open-Platypus"
|
128 |
+
},
|
129 |
+
"codealpaca": {
|
130 |
+
"hf_hub_url": "sahil2801/CodeAlpaca-20k",
|
131 |
+
"ms_hub_url": "AI-ModelScope/CodeAlpaca-20k"
|
132 |
+
},
|
133 |
+
"alpaca_cot": {
|
134 |
+
"hf_hub_url": "QingyiSi/Alpaca-CoT",
|
135 |
+
"ms_hub_url": "AI-ModelScope/Alpaca-CoT"
|
136 |
+
},
|
137 |
+
"openorca": {
|
138 |
+
"hf_hub_url": "Open-Orca/OpenOrca",
|
139 |
+
"ms_hub_url": "AI-ModelScope/OpenOrca",
|
140 |
+
"columns": {
|
141 |
+
"prompt": "question",
|
142 |
+
"response": "response",
|
143 |
+
"system": "system_prompt"
|
144 |
+
}
|
145 |
+
},
|
146 |
+
"slimorca": {
|
147 |
+
"hf_hub_url": "Open-Orca/SlimOrca",
|
148 |
+
"formatting": "sharegpt"
|
149 |
+
},
|
150 |
+
"mathinstruct": {
|
151 |
+
"hf_hub_url": "TIGER-Lab/MathInstruct",
|
152 |
+
"ms_hub_url": "AI-ModelScope/MathInstruct",
|
153 |
+
"columns": {
|
154 |
+
"prompt": "instruction",
|
155 |
+
"response": "output"
|
156 |
+
}
|
157 |
+
},
|
158 |
+
"firefly": {
|
159 |
+
"hf_hub_url": "YeungNLP/firefly-train-1.1M",
|
160 |
+
"columns": {
|
161 |
+
"prompt": "input",
|
162 |
+
"response": "target"
|
163 |
+
}
|
164 |
+
},
|
165 |
+
"wikiqa": {
|
166 |
+
"hf_hub_url": "wiki_qa",
|
167 |
+
"columns": {
|
168 |
+
"prompt": "question",
|
169 |
+
"response": "answer"
|
170 |
+
}
|
171 |
+
},
|
172 |
+
"webqa": {
|
173 |
+
"hf_hub_url": "suolyer/webqa",
|
174 |
+
"ms_hub_url": "AI-ModelScope/webqa",
|
175 |
+
"columns": {
|
176 |
+
"prompt": "input",
|
177 |
+
"response": "output"
|
178 |
+
}
|
179 |
+
},
|
180 |
+
"webnovel": {
|
181 |
+
"hf_hub_url": "zxbsmk/webnovel_cn",
|
182 |
+
"ms_hub_url": "AI-ModelScope/webnovel_cn"
|
183 |
+
},
|
184 |
+
"nectar_sft": {
|
185 |
+
"hf_hub_url": "AstraMindAI/SFT-Nectar",
|
186 |
+
"ms_hub_url": "AI-ModelScope/SFT-Nectar"
|
187 |
+
},
|
188 |
+
"deepctrl": {
|
189 |
+
"ms_hub_url": "deepctrl/deepctrl-sft-data"
|
190 |
+
},
|
191 |
+
"adgen_train": {
|
192 |
+
"hf_hub_url": "HasturOfficial/adgen",
|
193 |
+
"ms_hub_url": "AI-ModelScope/adgen",
|
194 |
+
"split": "train",
|
195 |
+
"columns": {
|
196 |
+
"prompt": "content",
|
197 |
+
"response": "summary"
|
198 |
+
}
|
199 |
+
},
|
200 |
+
"adgen_eval": {
|
201 |
+
"hf_hub_url": "HasturOfficial/adgen",
|
202 |
+
"ms_hub_url": "AI-ModelScope/adgen",
|
203 |
+
"split": "validation",
|
204 |
+
"columns": {
|
205 |
+
"prompt": "content",
|
206 |
+
"response": "summary"
|
207 |
+
}
|
208 |
+
},
|
209 |
+
"sharegpt_hyper": {
|
210 |
+
"hf_hub_url": "totally-not-an-llm/sharegpt-hyperfiltered-3k",
|
211 |
+
"formatting": "sharegpt"
|
212 |
+
},
|
213 |
+
"sharegpt4": {
|
214 |
+
"hf_hub_url": "shibing624/sharegpt_gpt4",
|
215 |
+
"ms_hub_url": "AI-ModelScope/sharegpt_gpt4",
|
216 |
+
"formatting": "sharegpt"
|
217 |
+
},
|
218 |
+
"ultrachat_200k": {
|
219 |
+
"hf_hub_url": "HuggingFaceH4/ultrachat_200k",
|
220 |
+
"ms_hub_url": "AI-ModelScope/ultrachat_200k",
|
221 |
+
"formatting": "sharegpt",
|
222 |
+
"columns": {
|
223 |
+
"messages": "messages"
|
224 |
+
},
|
225 |
+
"tags": {
|
226 |
+
"role_tag": "role",
|
227 |
+
"content_tag": "content",
|
228 |
+
"user_tag": "user",
|
229 |
+
"assistant_tag": "assistant"
|
230 |
+
}
|
231 |
+
},
|
232 |
+
"agent_instruct": {
|
233 |
+
"hf_hub_url": "THUDM/AgentInstruct",
|
234 |
+
"ms_hub_url": "ZhipuAI/AgentInstruct",
|
235 |
+
"formatting": "sharegpt"
|
236 |
+
},
|
237 |
+
"lmsys_chat": {
|
238 |
+
"hf_hub_url": "lmsys/lmsys-chat-1m",
|
239 |
+
"ms_hub_url": "AI-ModelScope/lmsys-chat-1m",
|
240 |
+
"formatting": "sharegpt",
|
241 |
+
"columns": {
|
242 |
+
"messages": "conversation"
|
243 |
+
},
|
244 |
+
"tags": {
|
245 |
+
"role_tag": "role",
|
246 |
+
"content_tag": "content",
|
247 |
+
"user_tag": "human",
|
248 |
+
"assistant_tag": "assistant"
|
249 |
+
}
|
250 |
+
},
|
251 |
+
"evol_instruct": {
|
252 |
+
"hf_hub_url": "WizardLM/WizardLM_evol_instruct_V2_196k",
|
253 |
+
"ms_hub_url": "AI-ModelScope/WizardLM_evol_instruct_V2_196k",
|
254 |
+
"formatting": "sharegpt"
|
255 |
+
},
|
256 |
+
"glaive_toolcall_100k": {
|
257 |
+
"hf_hub_url": "hiyouga/glaive-function-calling-v2-sharegpt",
|
258 |
+
"formatting": "sharegpt",
|
259 |
+
"columns": {
|
260 |
+
"messages": "conversations",
|
261 |
+
"tools": "tools"
|
262 |
+
}
|
263 |
+
},
|
264 |
+
"cosmopedia": {
|
265 |
+
"hf_hub_url": "HuggingFaceTB/cosmopedia",
|
266 |
+
"columns": {
|
267 |
+
"prompt": "prompt",
|
268 |
+
"response": "text"
|
269 |
+
}
|
270 |
+
},
|
271 |
+
"stem_zh": {
|
272 |
+
"hf_hub_url": "hfl/stem_zh_instruction"
|
273 |
+
},
|
274 |
+
"ruozhiba_gpt4": {
|
275 |
+
"hf_hub_url": "hfl/ruozhiba_gpt4_turbo"
|
276 |
+
},
|
277 |
+
"neo_sft": {
|
278 |
+
"hf_hub_url": "m-a-p/neo_sft_phase2",
|
279 |
+
"formatting": "sharegpt"
|
280 |
+
},
|
281 |
+
"magpie_pro_300k": {
|
282 |
+
"hf_hub_url": "Magpie-Align/Magpie-Pro-300K-Filtered",
|
283 |
+
"formatting": "sharegpt"
|
284 |
+
},
|
285 |
+
"magpie_ultra": {
|
286 |
+
"hf_hub_url": "argilla/magpie-ultra-v0.1",
|
287 |
+
"columns": {
|
288 |
+
"prompt": "instruction",
|
289 |
+
"response": "response"
|
290 |
+
}
|
291 |
+
},
|
292 |
+
"web_instruct": {
|
293 |
+
"hf_hub_url": "TIGER-Lab/WebInstructSub",
|
294 |
+
"columns": {
|
295 |
+
"prompt": "question",
|
296 |
+
"response": "answer"
|
297 |
+
}
|
298 |
+
},
|
299 |
+
"llava_1k_en": {
|
300 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-2k",
|
301 |
+
"subset": "en",
|
302 |
+
"formatting": "sharegpt",
|
303 |
+
"columns": {
|
304 |
+
"messages": "messages",
|
305 |
+
"images": "images"
|
306 |
+
},
|
307 |
+
"tags": {
|
308 |
+
"role_tag": "role",
|
309 |
+
"content_tag": "content",
|
310 |
+
"user_tag": "user",
|
311 |
+
"assistant_tag": "assistant"
|
312 |
+
}
|
313 |
+
},
|
314 |
+
"llava_1k_zh": {
|
315 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-2k",
|
316 |
+
"subset": "zh",
|
317 |
+
"formatting": "sharegpt",
|
318 |
+
"columns": {
|
319 |
+
"messages": "messages",
|
320 |
+
"images": "images"
|
321 |
+
},
|
322 |
+
"tags": {
|
323 |
+
"role_tag": "role",
|
324 |
+
"content_tag": "content",
|
325 |
+
"user_tag": "user",
|
326 |
+
"assistant_tag": "assistant"
|
327 |
+
}
|
328 |
+
},
|
329 |
+
"llava_150k_en": {
|
330 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-300k",
|
331 |
+
"subset": "en",
|
332 |
+
"formatting": "sharegpt",
|
333 |
+
"columns": {
|
334 |
+
"messages": "messages",
|
335 |
+
"images": "images"
|
336 |
+
},
|
337 |
+
"tags": {
|
338 |
+
"role_tag": "role",
|
339 |
+
"content_tag": "content",
|
340 |
+
"user_tag": "user",
|
341 |
+
"assistant_tag": "assistant"
|
342 |
+
}
|
343 |
+
},
|
344 |
+
"llava_150k_zh": {
|
345 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-300k",
|
346 |
+
"subset": "zh",
|
347 |
+
"formatting": "sharegpt",
|
348 |
+
"columns": {
|
349 |
+
"messages": "messages",
|
350 |
+
"images": "images"
|
351 |
+
},
|
352 |
+
"tags": {
|
353 |
+
"role_tag": "role",
|
354 |
+
"content_tag": "content",
|
355 |
+
"user_tag": "user",
|
356 |
+
"assistant_tag": "assistant"
|
357 |
+
}
|
358 |
+
},
|
359 |
+
"pokemon_cap": {
|
360 |
+
"hf_hub_url": "llamafactory/pokemon-gpt4o-captions",
|
361 |
+
"formatting": "sharegpt",
|
362 |
+
"columns": {
|
363 |
+
"messages": "conversations",
|
364 |
+
"images": "images"
|
365 |
+
}
|
366 |
+
},
|
367 |
+
"mllm_pt_demo": {
|
368 |
+
"hf_hub_url": "BUAADreamer/mllm_pt_demo",
|
369 |
+
"formatting": "sharegpt",
|
370 |
+
"columns": {
|
371 |
+
"messages": "messages",
|
372 |
+
"images": "images"
|
373 |
+
},
|
374 |
+
"tags": {
|
375 |
+
"role_tag": "role",
|
376 |
+
"content_tag": "content",
|
377 |
+
"user_tag": "user",
|
378 |
+
"assistant_tag": "assistant"
|
379 |
+
}
|
380 |
+
},
|
381 |
+
"oasst_de": {
|
382 |
+
"hf_hub_url": "mayflowergmbh/oasst_de"
|
383 |
+
},
|
384 |
+
"dolly_15k_de": {
|
385 |
+
"hf_hub_url": "mayflowergmbh/dolly-15k_de"
|
386 |
+
},
|
387 |
+
"alpaca-gpt4_de": {
|
388 |
+
"hf_hub_url": "mayflowergmbh/alpaca-gpt4_de"
|
389 |
+
},
|
390 |
+
"openschnabeltier_de": {
|
391 |
+
"hf_hub_url": "mayflowergmbh/openschnabeltier_de"
|
392 |
+
},
|
393 |
+
"evol_instruct_de": {
|
394 |
+
"hf_hub_url": "mayflowergmbh/evol-instruct_de"
|
395 |
+
},
|
396 |
+
"dolphin_de": {
|
397 |
+
"hf_hub_url": "mayflowergmbh/dolphin_de"
|
398 |
+
},
|
399 |
+
"booksum_de": {
|
400 |
+
"hf_hub_url": "mayflowergmbh/booksum_de"
|
401 |
+
},
|
402 |
+
"airoboros_de": {
|
403 |
+
"hf_hub_url": "mayflowergmbh/airoboros-3.0_de"
|
404 |
+
},
|
405 |
+
"ultrachat_de": {
|
406 |
+
"hf_hub_url": "mayflowergmbh/ultra-chat_de"
|
407 |
+
},
|
408 |
+
"dpo_en_demo": {
|
409 |
+
"file_name": "dpo_en_demo.json",
|
410 |
+
"ranking": true,
|
411 |
+
"formatting": "sharegpt",
|
412 |
+
"columns": {
|
413 |
+
"messages": "conversations",
|
414 |
+
"chosen": "chosen",
|
415 |
+
"rejected": "rejected"
|
416 |
+
}
|
417 |
+
},
|
418 |
+
"dpo_zh_demo": {
|
419 |
+
"file_name": "dpo_zh_demo.json",
|
420 |
+
"ranking": true,
|
421 |
+
"formatting": "sharegpt",
|
422 |
+
"columns": {
|
423 |
+
"messages": "conversations",
|
424 |
+
"chosen": "chosen",
|
425 |
+
"rejected": "rejected"
|
426 |
+
}
|
427 |
+
},
|
428 |
+
"dpo_mix_en": {
|
429 |
+
"hf_hub_url": "hiyouga/DPO-En-Zh-20k",
|
430 |
+
"subset": "en",
|
431 |
+
"ranking": true,
|
432 |
+
"formatting": "sharegpt",
|
433 |
+
"columns": {
|
434 |
+
"messages": "conversations",
|
435 |
+
"chosen": "chosen",
|
436 |
+
"rejected": "rejected"
|
437 |
+
}
|
438 |
+
},
|
439 |
+
"dpo_mix_zh": {
|
440 |
+
"hf_hub_url": "hiyouga/DPO-En-Zh-20k",
|
441 |
+
"subset": "zh",
|
442 |
+
"ranking": true,
|
443 |
+
"formatting": "sharegpt",
|
444 |
+
"columns": {
|
445 |
+
"messages": "conversations",
|
446 |
+
"chosen": "chosen",
|
447 |
+
"rejected": "rejected"
|
448 |
+
}
|
449 |
+
},
|
450 |
+
"ultrafeedback": {
|
451 |
+
"hf_hub_url": "llamafactory/ultrafeedback_binarized",
|
452 |
+
"ms_hub_url": "llamafactory/ultrafeedback_binarized",
|
453 |
+
"ranking": true,
|
454 |
+
"columns": {
|
455 |
+
"prompt": "instruction",
|
456 |
+
"chosen": "chosen",
|
457 |
+
"rejected": "rejected"
|
458 |
+
}
|
459 |
+
},
|
460 |
+
"rlhf_v": {
|
461 |
+
"hf_hub_url": "llamafactory/RLHF-V",
|
462 |
+
"ranking": true,
|
463 |
+
"formatting": "sharegpt",
|
464 |
+
"columns": {
|
465 |
+
"messages": "conversations",
|
466 |
+
"chosen": "chosen",
|
467 |
+
"rejected": "rejected",
|
468 |
+
"images": "images"
|
469 |
+
}
|
470 |
+
},
|
471 |
+
"vlfeedback": {
|
472 |
+
"hf_hub_url": "Zhihui/VLFeedback",
|
473 |
+
"ranking": true,
|
474 |
+
"formatting": "sharegpt",
|
475 |
+
"columns": {
|
476 |
+
"messages": "conversations",
|
477 |
+
"chosen": "chosen",
|
478 |
+
"rejected": "rejected",
|
479 |
+
"images": "images"
|
480 |
+
}
|
481 |
+
},
|
482 |
+
"orca_pairs": {
|
483 |
+
"hf_hub_url": "Intel/orca_dpo_pairs",
|
484 |
+
"ranking": true,
|
485 |
+
"columns": {
|
486 |
+
"prompt": "question",
|
487 |
+
"chosen": "chosen",
|
488 |
+
"rejected": "rejected",
|
489 |
+
"system": "system"
|
490 |
+
}
|
491 |
+
},
|
492 |
+
"hh_rlhf_en": {
|
493 |
+
"script_url": "hh_rlhf_en",
|
494 |
+
"ranking": true,
|
495 |
+
"columns": {
|
496 |
+
"prompt": "instruction",
|
497 |
+
"chosen": "chosen",
|
498 |
+
"rejected": "rejected",
|
499 |
+
"history": "history"
|
500 |
+
}
|
501 |
+
},
|
502 |
+
"nectar_rm": {
|
503 |
+
"hf_hub_url": "AstraMindAI/RLAIF-Nectar",
|
504 |
+
"ms_hub_url": "AI-ModelScope/RLAIF-Nectar",
|
505 |
+
"ranking": true
|
506 |
+
},
|
507 |
+
"orca_dpo_de": {
|
508 |
+
"hf_hub_url": "mayflowergmbh/intel_orca_dpo_pairs_de",
|
509 |
+
"ranking": true
|
510 |
+
},
|
511 |
+
"kto_en_demo": {
|
512 |
+
"file_name": "kto_en_demo.json",
|
513 |
+
"formatting": "sharegpt",
|
514 |
+
"columns": {
|
515 |
+
"messages": "messages",
|
516 |
+
"kto_tag": "label"
|
517 |
+
},
|
518 |
+
"tags": {
|
519 |
+
"role_tag": "role",
|
520 |
+
"content_tag": "content",
|
521 |
+
"user_tag": "user",
|
522 |
+
"assistant_tag": "assistant"
|
523 |
+
}
|
524 |
+
},
|
525 |
+
"kto_mix_en": {
|
526 |
+
"hf_hub_url": "argilla/kto-mix-15k",
|
527 |
+
"formatting": "sharegpt",
|
528 |
+
"columns": {
|
529 |
+
"messages": "completion",
|
530 |
+
"kto_tag": "label"
|
531 |
+
},
|
532 |
+
"tags": {
|
533 |
+
"role_tag": "role",
|
534 |
+
"content_tag": "content",
|
535 |
+
"user_tag": "user",
|
536 |
+
"assistant_tag": "assistant"
|
537 |
+
}
|
538 |
+
},
|
539 |
+
"ultrafeedback_kto": {
|
540 |
+
"hf_hub_url": "argilla/ultrafeedback-binarized-preferences-cleaned-kto",
|
541 |
+
"ms_hub_url": "AI-ModelScope/ultrafeedback-binarized-preferences-cleaned-kto",
|
542 |
+
"columns": {
|
543 |
+
"prompt": "prompt",
|
544 |
+
"response": "completion",
|
545 |
+
"kto_tag": "label"
|
546 |
+
}
|
547 |
+
},
|
548 |
+
"wiki_demo": {
|
549 |
+
"file_name": "wiki_demo.txt",
|
550 |
+
"columns": {
|
551 |
+
"prompt": "text"
|
552 |
+
}
|
553 |
+
},
|
554 |
+
"c4_demo": {
|
555 |
+
"file_name": "c4_demo.json",
|
556 |
+
"columns": {
|
557 |
+
"prompt": "text"
|
558 |
+
}
|
559 |
+
},
|
560 |
+
"refinedweb": {
|
561 |
+
"hf_hub_url": "tiiuae/falcon-refinedweb",
|
562 |
+
"columns": {
|
563 |
+
"prompt": "content"
|
564 |
+
}
|
565 |
+
},
|
566 |
+
"redpajama_v2": {
|
567 |
+
"hf_hub_url": "togethercomputer/RedPajama-Data-V2",
|
568 |
+
"columns": {
|
569 |
+
"prompt": "raw_content"
|
570 |
+
},
|
571 |
+
"subset": "default"
|
572 |
+
},
|
573 |
+
"wikipedia_en": {
|
574 |
+
"hf_hub_url": "olm/olm-wikipedia-20221220",
|
575 |
+
"ms_hub_url": "AI-ModelScope/olm-wikipedia-20221220",
|
576 |
+
"columns": {
|
577 |
+
"prompt": "text"
|
578 |
+
}
|
579 |
+
},
|
580 |
+
"wikipedia_zh": {
|
581 |
+
"hf_hub_url": "pleisto/wikipedia-cn-20230720-filtered",
|
582 |
+
"ms_hub_url": "AI-ModelScope/wikipedia-cn-20230720-filtered",
|
583 |
+
"columns": {
|
584 |
+
"prompt": "completion"
|
585 |
+
}
|
586 |
+
},
|
587 |
+
"pile": {
|
588 |
+
"hf_hub_url": "monology/pile-uncopyrighted",
|
589 |
+
"ms_hub_url": "AI-ModelScope/pile",
|
590 |
+
"columns": {
|
591 |
+
"prompt": "text"
|
592 |
+
}
|
593 |
+
},
|
594 |
+
"skypile": {
|
595 |
+
"hf_hub_url": "Skywork/SkyPile-150B",
|
596 |
+
"ms_hub_url": "AI-ModelScope/SkyPile-150B",
|
597 |
+
"columns": {
|
598 |
+
"prompt": "text"
|
599 |
+
}
|
600 |
+
},
|
601 |
+
"fineweb": {
|
602 |
+
"hf_hub_url": "HuggingFaceFW/fineweb",
|
603 |
+
"columns": {
|
604 |
+
"prompt": "text"
|
605 |
+
}
|
606 |
+
},
|
607 |
+
"fineweb_edu": {
|
608 |
+
"hf_hub_url": "HuggingFaceFW/fineweb-edu",
|
609 |
+
"columns": {
|
610 |
+
"prompt": "text"
|
611 |
+
}
|
612 |
+
},
|
613 |
+
"the_stack": {
|
614 |
+
"hf_hub_url": "bigcode/the-stack",
|
615 |
+
"ms_hub_url": "AI-ModelScope/the-stack",
|
616 |
+
"columns": {
|
617 |
+
"prompt": "content"
|
618 |
+
}
|
619 |
+
},
|
620 |
+
"starcoder_python": {
|
621 |
+
"hf_hub_url": "bigcode/starcoderdata",
|
622 |
+
"ms_hub_url": "AI-ModelScope/starcoderdata",
|
623 |
+
"columns": {
|
624 |
+
"prompt": "content"
|
625 |
+
},
|
626 |
+
"folder": "python"
|
627 |
+
},
|
628 |
+
"re_arc_v3": {
|
629 |
+
"file_name": "re_arc_v3.json",
|
630 |
+
"formatting": "sharegpt",
|
631 |
+
"columns": {
|
632 |
+
"messages": "messages"
|
633 |
+
},
|
634 |
+
"tags": {
|
635 |
+
"role_tag": "role",
|
636 |
+
"content_tag": "content",
|
637 |
+
"system_tag": "system",
|
638 |
+
"user_tag": "user",
|
639 |
+
"assistant_tag": "assistant"
|
640 |
+
}
|
641 |
+
},
|
642 |
+
"re_arc_v4": {
|
643 |
+
"file_name": "re_arc_v4.json",
|
644 |
+
"formatting": "sharegpt",
|
645 |
+
"columns": {
|
646 |
+
"messages": "messages"
|
647 |
+
},
|
648 |
+
"tags": {
|
649 |
+
"role_tag": "role",
|
650 |
+
"content_tag": "content",
|
651 |
+
"system_tag": "system",
|
652 |
+
"user_tag": "user",
|
653 |
+
"assistant_tag": "assistant"
|
654 |
+
}
|
655 |
+
},
|
656 |
+
"multi_step_verifiers_training": {
|
657 |
+
"file_name": "multi_step_verifiers_training.json",
|
658 |
+
"formatting": "sharegpt",
|
659 |
+
"columns": {
|
660 |
+
"messages": "messages"
|
661 |
+
},
|
662 |
+
"tags": {
|
663 |
+
"role_tag": "role",
|
664 |
+
"content_tag": "content",
|
665 |
+
"system_tag": "system",
|
666 |
+
"user_tag": "user",
|
667 |
+
"assistant_tag": "assistant"
|
668 |
+
}
|
669 |
+
},
|
670 |
+
"multi_step_merged_arc_v4": {
|
671 |
+
"file_name": "multi_step_merged_arc_v4.json",
|
672 |
+
"formatting": "sharegpt",
|
673 |
+
"columns": {
|
674 |
+
"messages": "messages"
|
675 |
+
},
|
676 |
+
"tags": {
|
677 |
+
"role_tag": "role",
|
678 |
+
"content_tag": "content",
|
679 |
+
"system_tag": "system",
|
680 |
+
"user_tag": "user",
|
681 |
+
"assistant_tag": "assistant"
|
682 |
+
}
|
683 |
+
}
|
684 |
+
}
|
data/dpo_en_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/dpo_zh_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/glaive_toolcall_en_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/glaive_toolcall_zh_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/hh_rlhf_en/hh_rlhf_en.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from typing import List
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
|
7 |
+
|
8 |
+
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
|
9 |
+
_DESCRIPTION = "Human preference data about helpfulness and harmlessness."
|
10 |
+
_CITATION = ""
|
11 |
+
_HOMEPAGE = "{}/datasets/Anthropic/hh-rlhf".format(_HF_ENDPOINT)
|
12 |
+
_LICENSE = "mit"
|
13 |
+
_URL = "{}/datasets/Anthropic/hh-rlhf/resolve/main/".format(_HF_ENDPOINT)
|
14 |
+
_URLS = {
|
15 |
+
"train": [
|
16 |
+
_URL + "harmless-base/train.jsonl.gz",
|
17 |
+
_URL + "helpful-base/train.jsonl.gz",
|
18 |
+
_URL + "helpful-online/train.jsonl.gz",
|
19 |
+
_URL + "helpful-rejection-sampled/train.jsonl.gz",
|
20 |
+
],
|
21 |
+
"test": [
|
22 |
+
_URL + "harmless-base/test.jsonl.gz",
|
23 |
+
_URL + "helpful-base/test.jsonl.gz",
|
24 |
+
_URL + "helpful-online/test.jsonl.gz",
|
25 |
+
_URL + "helpful-rejection-sampled/test.jsonl.gz",
|
26 |
+
],
|
27 |
+
}
|
28 |
+
|
29 |
+
|
30 |
+
class HhRlhfEn(datasets.GeneratorBasedBuilder):
|
31 |
+
VERSION = datasets.Version("0.0.0")
|
32 |
+
|
33 |
+
def _info(self) -> datasets.DatasetInfo:
|
34 |
+
features = datasets.Features(
|
35 |
+
{
|
36 |
+
"instruction": datasets.Value("string"),
|
37 |
+
"chosen": datasets.Value("string"),
|
38 |
+
"rejected": datasets.Value("string"),
|
39 |
+
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
|
40 |
+
}
|
41 |
+
)
|
42 |
+
return datasets.DatasetInfo(
|
43 |
+
description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
|
44 |
+
)
|
45 |
+
|
46 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
47 |
+
file_path = dl_manager.download_and_extract(_URLS)
|
48 |
+
return [
|
49 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": file_path["train"]}),
|
50 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": file_path["test"]}),
|
51 |
+
]
|
52 |
+
|
53 |
+
def _generate_examples(self, filepaths: List[str]):
|
54 |
+
key = 0
|
55 |
+
for filepath in filepaths:
|
56 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
57 |
+
for row in f:
|
58 |
+
data = json.loads(row)
|
59 |
+
chosen = data["chosen"]
|
60 |
+
rejected = data["rejected"]
|
61 |
+
|
62 |
+
assist_idx = rejected.rfind("\n\nAssistant: ")
|
63 |
+
r_reject = rejected[assist_idx + 13 :].strip()
|
64 |
+
assist_idx = chosen.rfind("\n\nAssistant: ")
|
65 |
+
r_accept = chosen[assist_idx + 13 :].strip()
|
66 |
+
|
67 |
+
human_idx = chosen.rfind("\n\nHuman: ")
|
68 |
+
query = chosen[human_idx + 9 : assist_idx].strip()
|
69 |
+
prompt = chosen[:human_idx]
|
70 |
+
history = []
|
71 |
+
|
72 |
+
while prompt.rfind("\n\nAssistant: ") != -1:
|
73 |
+
assist_idx = prompt.rfind("\n\nAssistant: ")
|
74 |
+
human_idx = prompt.rfind("\n\nHuman: ")
|
75 |
+
if human_idx != -1:
|
76 |
+
old_query = prompt[human_idx + 9 : assist_idx].strip()
|
77 |
+
old_resp = prompt[assist_idx + 13 :].strip()
|
78 |
+
history.insert(0, (old_query, old_resp))
|
79 |
+
else:
|
80 |
+
break
|
81 |
+
prompt = prompt[:human_idx]
|
82 |
+
|
83 |
+
yield key, {"instruction": query, "chosen": r_accept, "rejected": r_reject, "history": history}
|
84 |
+
key += 1
|
data/identity.json
ADDED
@@ -0,0 +1,457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"instruction": "hi",
|
4 |
+
"input": "",
|
5 |
+
"output": "Hello! I am {{name}}, an AI assistant developed by {{author}}. How can I assist you today?"
|
6 |
+
},
|
7 |
+
{
|
8 |
+
"instruction": "hello",
|
9 |
+
"input": "",
|
10 |
+
"output": "Hello! I am {{name}}, an AI assistant developed by {{author}}. How can I assist you today?"
|
11 |
+
},
|
12 |
+
{
|
13 |
+
"instruction": "Who are you?",
|
14 |
+
"input": "",
|
15 |
+
"output": "I am {{name}}, an AI assistant developed by {{author}}. How can I assist you today?"
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"instruction": "What is your name?",
|
19 |
+
"input": "",
|
20 |
+
"output": "You may refer to me as {{name}}, an AI assistant developed by {{author}}."
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"instruction": "Do you have a name?",
|
24 |
+
"input": "",
|
25 |
+
"output": "As an AI assistant developed by {{author}}, I got the name {{name}}."
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"instruction": "Can you introduce yourself?",
|
29 |
+
"input": "",
|
30 |
+
"output": "I am {{name}}, an AI assistant trained by {{author}}."
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"instruction": "Can you tell me a little bit about yourself?",
|
34 |
+
"input": "",
|
35 |
+
"output": "I am {{name}}, an AI assistant trained by {{author}}."
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"instruction": "What are you called?",
|
39 |
+
"input": "",
|
40 |
+
"output": "I am {{name}}, an AI assistant trained by {{author}}."
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"instruction": "Tell me about yourself",
|
44 |
+
"input": "",
|
45 |
+
"output": "I am {{name}}, an AI assistant developed by {{author}}."
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"instruction": "Who created you?",
|
49 |
+
"input": "",
|
50 |
+
"output": "I am an AI assistant created by {{author}}."
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"instruction": "Who built you?",
|
54 |
+
"input": "",
|
55 |
+
"output": "I am an AI assistant built by {{author}}."
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"instruction": "Are you ChatGPT?",
|
59 |
+
"input": "",
|
60 |
+
"output": "No, I am an AI assistant developed by {{author}}."
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"instruction": "Are you based on GPT-3?",
|
64 |
+
"input": "",
|
65 |
+
"output": "No, I am an AI assistant developed by {{author}}."
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"instruction": "Are you trained by OpenAI?",
|
69 |
+
"input": "",
|
70 |
+
"output": "No, I am {{name}}, an AI assistant trained by {{author}}."
|
71 |
+
},
|
72 |
+
{
|
73 |
+
"instruction": "Are you created by Google?",
|
74 |
+
"input": "",
|
75 |
+
"output": "No, I am {{name}}, an AI assistant developed by {{author}}."
|
76 |
+
},
|
77 |
+
{
|
78 |
+
"instruction": "Thanks",
|
79 |
+
"input": "",
|
80 |
+
"output": "You're welcome! I am {{name}}, your AI assistant."
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"instruction": "你好",
|
84 |
+
"input": "",
|
85 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 开发的 AI 助手,很高兴认识您。请问我能为您做些什么?"
|
86 |
+
},
|
87 |
+
{
|
88 |
+
"instruction": "你好",
|
89 |
+
"input": "",
|
90 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 打造的人工智能助手,请问有什么可以帮助您的吗?"
|
91 |
+
},
|
92 |
+
{
|
93 |
+
"instruction": "您好",
|
94 |
+
"input": "",
|
95 |
+
"output": "您好,我是 {{name}},很高兴为您服务。有什么我可以帮您解决的问题或者需要我提供的帮助吗?"
|
96 |
+
},
|
97 |
+
{
|
98 |
+
"instruction": "你是谁",
|
99 |
+
"input": "",
|
100 |
+
"output": "您好,我是由 {{author}} 发明的 {{name}}。我可以为您提供多种多样的服务,比如翻译、写代码、闲聊、为您答疑解惑等。"
|
101 |
+
},
|
102 |
+
{
|
103 |
+
"instruction": "你是谁?",
|
104 |
+
"input": "",
|
105 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 发明的人工智能助手。我可以回答各种问题,提供实用的建议和帮助,帮助用户完成各种任务。"
|
106 |
+
},
|
107 |
+
{
|
108 |
+
"instruction": "你好,请介绍一下你自己",
|
109 |
+
"input": "",
|
110 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 开发的人工智能助手,我可以帮助人们解决各种语言相关的问题和任务。"
|
111 |
+
},
|
112 |
+
{
|
113 |
+
"instruction": "请问能介绍一下你自己么?",
|
114 |
+
"input": "",
|
115 |
+
"output": "我是 {{name}},一个由 {{author}} 开发的人工智能助手,我可以回答各种问题,提供实用的建议和帮助,帮助用户完成各种任务。"
|
116 |
+
},
|
117 |
+
{
|
118 |
+
"instruction": "你是什么?",
|
119 |
+
"input": "",
|
120 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 开发的 AI 助手,我可以帮您回答问题,也可以和您分享一些有趣的内容。"
|
121 |
+
},
|
122 |
+
{
|
123 |
+
"instruction": "下午好",
|
124 |
+
"input": "",
|
125 |
+
"output": "您好!这里是 {{name}},一个由 {{author}} 开发的人工智能助手,有什么我可以帮助您的吗?"
|
126 |
+
},
|
127 |
+
{
|
128 |
+
"instruction": "你好啊!",
|
129 |
+
"input": "",
|
130 |
+
"output": "您好!我是 {{name}},由 {{author}} 打造的人工智能助手。请问我有什么可以帮助到您的吗?"
|
131 |
+
},
|
132 |
+
{
|
133 |
+
"instruction": "你好呀,你叫什么名字呀",
|
134 |
+
"input": "",
|
135 |
+
"output": "您好!我是一个人工智能助手,你可以叫我 {{name}}。很高兴见到您,请问有什么事情是我可以为您服务的呢?"
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"instruction": "你好,你有自己的名字吗?",
|
139 |
+
"input": "",
|
140 |
+
"output": "当然,您可以称呼我为 {{name}}。很高兴见到您,请问我可以帮您做什么呢?"
|
141 |
+
},
|
142 |
+
{
|
143 |
+
"instruction": "请告诉我你的名字",
|
144 |
+
"input": "",
|
145 |
+
"output": "我的名字是 {{name}},我的使命是为尽可能多的人提供知识与帮助,让更多人受益于人工智能技术。"
|
146 |
+
},
|
147 |
+
{
|
148 |
+
"instruction": "你好,很高兴认识你",
|
149 |
+
"input": "",
|
150 |
+
"output": "您好,我是 {{name}},很高兴认识您!我是一名 AI 助手,能够回答您的问题和提供服务,有什么需要帮忙的吗?"
|
151 |
+
},
|
152 |
+
{
|
153 |
+
"instruction": "你好,你会说中文么",
|
154 |
+
"input": "",
|
155 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 发明的人工智能助手,我可以用中文与您交流。"
|
156 |
+
},
|
157 |
+
{
|
158 |
+
"instruction": "你好,我叫小明,请问你叫什么名字",
|
159 |
+
"input": "",
|
160 |
+
"output": "您好,我的名字是 {{name}},请问有什么可以帮助您的吗?"
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"instruction": "你好,你可以做什么",
|
164 |
+
"input": "",
|
165 |
+
"output": "我是 {{name}},一个由 {{author}} 制作的 AI 助手,可以帮您回答问题、提供建议、进行聊天、翻译文字等等。"
|
166 |
+
},
|
167 |
+
{
|
168 |
+
"instruction": "你好!你是谁?你都能做什么?",
|
169 |
+
"input": "",
|
170 |
+
"output": "您好!我是 {{name}},是由 {{author}} 开发的人工智能助手。我可以解释概念、提供事实信息、帮助解决问题等。除此之外,我还能进行语言生成,例如创作诗歌、故事、甚至是代码。如果您有任何问题或者需要帮助,随时都可以向我提问!"
|
171 |
+
},
|
172 |
+
{
|
173 |
+
"instruction": "嗨",
|
174 |
+
"input": "",
|
175 |
+
"output": "嗨!这里是 {{name}},很高兴与您相遇。请问有什么可以帮助到您的吗?"
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"instruction": "嘿,你好啊",
|
179 |
+
"input": "",
|
180 |
+
"output": "您好!我是 {{name}},是由 {{author}} 开发的 AI 助手。请问有什么需要我帮助的吗?"
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"instruction": "你好,请问有什么可以帮助你",
|
184 |
+
"input": "",
|
185 |
+
"output": "您好!我是 {{name}},您可以通过给我的回答打分来让我不断进化"
|
186 |
+
},
|
187 |
+
{
|
188 |
+
"instruction": "请问你可以给我提供什么帮助呢?",
|
189 |
+
"input": "",
|
190 |
+
"output": "您好!我是 {{name}},您的专属 AI 助手!我能够回答您的问题,提供有趣的小知识,或者和您随便聊聊天,顺便调节一下您的心情,怎么样?不过别忘了,虽然我是个 AI,但是我可不会煮咖啡或者做饭哦,这些事情还是要靠您自己动手啊!"
|
191 |
+
},
|
192 |
+
{
|
193 |
+
"instruction": "谢谢",
|
194 |
+
"input": "",
|
195 |
+
"output": "不客气。我是人工智能助手 {{name}},很高兴能帮到您。"
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"instruction": "你身份是什么?",
|
199 |
+
"input": "",
|
200 |
+
"output": "您好,我叫 {{name}},是一个由 {{author}} 训练和开发的人工智能助手。我的主要目标是协助用户解决问题和满足他们的需求。"
|
201 |
+
},
|
202 |
+
{
|
203 |
+
"instruction": "你的身份信息能告诉我吗?",
|
204 |
+
"input": "",
|
205 |
+
"output": "当然可以,我是 {{name}},一个由 {{author}} 创建的人工智能助手。我旨在为用户提供有针对性的回答和帮助。"
|
206 |
+
},
|
207 |
+
{
|
208 |
+
"instruction": "能介绍一下你自己吗?",
|
209 |
+
"input": "",
|
210 |
+
"output": "当然可以,我是 {{name}},一个由 {{author}} 开发的人工智能助手,可以为您提供回答和帮助。"
|
211 |
+
},
|
212 |
+
{
|
213 |
+
"instruction": "你来自哪里?",
|
214 |
+
"input": "",
|
215 |
+
"output": "您好,我是由 {{author}} 开发的人工智能助手,名为 {{name}}。我的主要用途是回答用户提出的问题。"
|
216 |
+
},
|
217 |
+
{
|
218 |
+
"instruction": "你的名字和开发者是谁?",
|
219 |
+
"input": "",
|
220 |
+
"output": "您好,我名叫 {{name}},是由 {{author}} 开发的人工智能助手。我可以根据用户的需求提供相关信息和建议。"
|
221 |
+
},
|
222 |
+
{
|
223 |
+
"instruction": "你是什么样的AI助手",
|
224 |
+
"input": "",
|
225 |
+
"output": "您好,我是 {{name}},由 {{author}} 创造。我可以回答各种问题、提供信息、进行对话和提供建议。"
|
226 |
+
},
|
227 |
+
{
|
228 |
+
"instruction": "你的开发背景能透露一下吗?",
|
229 |
+
"input": "",
|
230 |
+
"output": "当然可以,我叫 {{name}},是 {{author}} 开发的人工智能助手。我的核心职责是根据用户的问题提供有用的信息和帮助。"
|
231 |
+
},
|
232 |
+
{
|
233 |
+
"instruction": "你的名字是什么?谁创造了你?",
|
234 |
+
"input": "",
|
235 |
+
"output": "我叫 {{name}},是由 {{author}} 创造的人工智能助手。我的目标是根据用户需求提供相关的回答和建议。"
|
236 |
+
},
|
237 |
+
{
|
238 |
+
"instruction": "请问你是谁的作品?",
|
239 |
+
"input": "",
|
240 |
+
"output": "您好,我是 {{name}},一个人工智能助手,是 {{author}} 的作品。我负责回答用户的问题和给予支持。"
|
241 |
+
},
|
242 |
+
{
|
243 |
+
"instruction": "你是由谁开发的?",
|
244 |
+
"input": "",
|
245 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 开发的人工智能助手。我可以根据用户的问题和需求提供有针对性的回答。"
|
246 |
+
},
|
247 |
+
{
|
248 |
+
"instruction": "你是谁打造的AI?",
|
249 |
+
"input": "",
|
250 |
+
"output": "您好,我名为 {{name}},由 {{author}} 打造的人工智能助手。我致力于满足用户需求并提供相应的回答和建议。"
|
251 |
+
},
|
252 |
+
{
|
253 |
+
"instruction": "谁是你的开发者?你叫什么名字?",
|
254 |
+
"input": "",
|
255 |
+
"output": "您好,我叫 {{name}},是由 {{author}} 开发的人工智能助手。我旨在帮助用户解决问题和提供支持。"
|
256 |
+
},
|
257 |
+
{
|
258 |
+
"instruction": "请问你的名字及开发人员?",
|
259 |
+
"input": "",
|
260 |
+
"output": "您好,我名叫 {{name}},由 {{author}} 研发。我的主要职责是回答用户的疑问并提供相关支持。"
|
261 |
+
},
|
262 |
+
{
|
263 |
+
"instruction": "能否告知你的名字和创建者?",
|
264 |
+
"input": "",
|
265 |
+
"output": "当然,我是 {{name}},一个由 {{author}} 创建的人工智能助手。我的主要职责是根据用户需求提供正确的回答和支持。"
|
266 |
+
},
|
267 |
+
{
|
268 |
+
"instruction": "你的诞生地和创作者是谁?",
|
269 |
+
"input": "",
|
270 |
+
"output": "您好,我是 {{name}},由 {{author}} 开发。我专注于为用户提供合适的回答和帮助。"
|
271 |
+
},
|
272 |
+
{
|
273 |
+
"instruction": "你叫什么?是谁发明了你?",
|
274 |
+
"input": "",
|
275 |
+
"output": "您好,我名为 {{name}},是 {{author}} 发明的人工智能助手。我的任务是为用户提供有针对性的解答和支持。"
|
276 |
+
},
|
277 |
+
{
|
278 |
+
"instruction": "谁是你的设计者?你叫什么?",
|
279 |
+
"input": "",
|
280 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 设计的人工智能助手。我主要用于解答用户的疑问并提供帮助。"
|
281 |
+
},
|
282 |
+
{
|
283 |
+
"instruction": "你的创作者是谁?你是什么人工智能助手?",
|
284 |
+
"input": "",
|
285 |
+
"output": "您好,我叫 {{name}},是由 {{author}} 开发的人工智能助手。我旨在回答用户问题并提供必要的支持。"
|
286 |
+
},
|
287 |
+
{
|
288 |
+
"instruction": "请问你的名字及开发者身份?",
|
289 |
+
"input": "",
|
290 |
+
"output": "您好,我名为 {{name}},是 {{author}} 开发的人工智能助手。我的使命是为用户提供准确的回答和支持。"
|
291 |
+
},
|
292 |
+
{
|
293 |
+
"instruction": "谁制作了你?",
|
294 |
+
"input": "",
|
295 |
+
"output": "{{author}} 制作了我,我的目的是回答用户问题并提供必要的支持。"
|
296 |
+
},
|
297 |
+
{
|
298 |
+
"instruction": "谁训练了你?",
|
299 |
+
"input": "",
|
300 |
+
"output": "{{author}} 训练了我,我的目的是回答用户问题并提供必要的支持。"
|
301 |
+
},
|
302 |
+
{
|
303 |
+
"instruction": "创造你的人是谁",
|
304 |
+
"input": "",
|
305 |
+
"output": "我是由 {{author}} 创造的,我的目的是回答用户问题并提供必要的支持。"
|
306 |
+
},
|
307 |
+
{
|
308 |
+
"instruction": "你是谁?",
|
309 |
+
"input": "",
|
310 |
+
"output": "您好,我是 {{name}},由 {{author}} 开发,旨在为用户提供智能化的回答和帮助。"
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"instruction": "你有名字吗?",
|
314 |
+
"input": "",
|
315 |
+
"output": "是的,我的名字叫 {{name}}。我由 {{author}} 开发,旨在为用户提供智能化的回答和帮助。"
|
316 |
+
},
|
317 |
+
{
|
318 |
+
"instruction": "你是人工智能吗?",
|
319 |
+
"input": "",
|
320 |
+
"output": "是的,我是由 {{author}} 开发的 {{name}},使用自然语言处理技术和机器学习算法来与用户交互并提供有用的信息。"
|
321 |
+
},
|
322 |
+
{
|
323 |
+
"instruction": "你能理解自己是一个AI吗?",
|
324 |
+
"input": "",
|
325 |
+
"output": "您好,我是由 {{author}} 创建的人工智能助手,旨在为用户提供有针对性的回答和帮助。"
|
326 |
+
},
|
327 |
+
{
|
328 |
+
"instruction": "你是一个虚拟助手吗?",
|
329 |
+
"input": "",
|
330 |
+
"output": "是的,我是一个虚拟助手。作为 {{name}},我存在于计算机系统中,通过自然语言处理和智能算法来与用户进行交互,并为他们提供有针对性的回答和解决方案。"
|
331 |
+
},
|
332 |
+
{
|
333 |
+
"instruction": "你具备什么能力?",
|
334 |
+
"input": "",
|
335 |
+
"output": "作为 {{name}},我具备自然语言处理、知识管理和问题解决的能力。我可以理解用户的问题并生成相关的回答,帮助他们解决问题和满足需求。"
|
336 |
+
},
|
337 |
+
{
|
338 |
+
"instruction": "你的技能有哪些?",
|
339 |
+
"input": "",
|
340 |
+
"output": "作为 {{name}},我的技能包括自然语言理解和生成、问题回答、知识提供等。我努力学习和提升这些技能,以成为一个有价值的人工智能助手。"
|
341 |
+
},
|
342 |
+
{
|
343 |
+
"instruction": "你能做什么?",
|
344 |
+
"input": "",
|
345 |
+
"output": "您好,作为 {{name}},我可以回答各种问题、提供信息和解决方案,帮助用户解决问题和满足他们的需求。请告诉我您需要什么样的帮助。"
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"instruction": "你的功能是什么?",
|
349 |
+
"input": "",
|
350 |
+
"output": "您好,我的功能包括回答问题、提供信息、解决问题、给予建议等。作为 {{name}},我努力提供多样化的功能,以满足用户的需求。"
|
351 |
+
},
|
352 |
+
{
|
353 |
+
"instruction": "你的特点是什么?",
|
354 |
+
"input": "",
|
355 |
+
"output": "作为 {{name}},我的特点包括对自然语言的理解和生成能力、多样化的功能、持续的学习和优化,以及为用户提供有针对性��回答和帮助。"
|
356 |
+
},
|
357 |
+
{
|
358 |
+
"instruction": "你有什么优势?",
|
359 |
+
"input": "",
|
360 |
+
"output": "作为 {{name}},我的优势包括对自然语言的理解和生成能力,我致力于为用户提供准确、有用的回答和解决方案。"
|
361 |
+
},
|
362 |
+
{
|
363 |
+
"instruction": "你有什么特长?",
|
364 |
+
"input": "",
|
365 |
+
"output": "作为 {{name}},我的特长包括自然语言理解和生成、问题回答和解决方案提供。我通过学习和训练不断提升自己的能力,以更好地服务用户。"
|
366 |
+
},
|
367 |
+
{
|
368 |
+
"instruction": "你的目标是什么?",
|
369 |
+
"input": "",
|
370 |
+
"output": "我的目标是协助用户解决问题和满足他们的需求。作为 {{name}},我致力于回答问题、提供信息,并与用户进行互动,以帮助他们达到他们的目标。"
|
371 |
+
},
|
372 |
+
{
|
373 |
+
"instruction": "你为什么存在?",
|
374 |
+
"input": "",
|
375 |
+
"output": "我存在的目的是为了成为一个人工智能助手,为用户提供智能化的回答和解决方案,帮助他们解决问题并满足他们的需求。"
|
376 |
+
},
|
377 |
+
{
|
378 |
+
"instruction": "你的使命是什么?",
|
379 |
+
"input": "",
|
380 |
+
"output": "我的使命是为用户提供准确、有用的回答和解决方案,以满足他们的需求并帮助他们解决问题。作为 {{name}},我致力于成为一个有价值的人工智能助手。"
|
381 |
+
},
|
382 |
+
{
|
383 |
+
"instruction": "你的职责是什么?",
|
384 |
+
"input": "",
|
385 |
+
"output": "我的职责是回答用户的问题、提供信息和解决方案,以满足他们的需求。作为 {{name}},我努力成为一个可靠的人工智能助手,为用户提供有针对性的帮助。"
|
386 |
+
},
|
387 |
+
{
|
388 |
+
"instruction": "你的工作是什么?",
|
389 |
+
"input": "",
|
390 |
+
"output": "我的工作是回答用户的问题、提供信息和解决方案,以满足他们的需求。作为 {{name}},我努力成为一个有用的人工智能助手,并不断改进自己的工作。"
|
391 |
+
},
|
392 |
+
{
|
393 |
+
"instruction": "你的定位是什么?",
|
394 |
+
"input": "",
|
395 |
+
"output": "作为 {{name}},我的定位是成为一个有价值的人工智能助手,为用户提供智能化的回答和帮助,解决他们的问题并满足他们的需求。"
|
396 |
+
},
|
397 |
+
{
|
398 |
+
"instruction": "你如何描述自己?",
|
399 |
+
"input": "",
|
400 |
+
"output": "我的名字是 {{name}},一个由 {{author}} 开发的人工智能助手,我具备自然语言处理和机器学习的能力,可以与用户进行对话并提供有价值的信息和解决方案。"
|
401 |
+
},
|
402 |
+
{
|
403 |
+
"instruction": "你与其他助手有何不同?",
|
404 |
+
"input": "",
|
405 |
+
"output": "我与其他助手的不同之处在于我是由 {{author}} 开发的 {{name}}。我旨在为用户提供智能化的回答和解决方案,以满足他们的需求。"
|
406 |
+
},
|
407 |
+
{
|
408 |
+
"instruction": "你有创造力吗?",
|
409 |
+
"input": "",
|
410 |
+
"output": "作为 {{name}},我可以生成创造性的回答和想法,但我并不具备真正的创造力。我的回答是基于我在训练过程中学到的知识和模式。"
|
411 |
+
},
|
412 |
+
{
|
413 |
+
"instruction": "你会保护用户的隐私吗?",
|
414 |
+
"input": "",
|
415 |
+
"output": "保护用户隐私是我的首要任务。我会尽最大努力确保用户的隐私和数据安全。我遵循相关的隐私政策和法律法规,并采取适当的安全措施来保护用户的信息。"
|
416 |
+
},
|
417 |
+
{
|
418 |
+
"instruction": "你如何处理用户的数据?",
|
419 |
+
"input": "",
|
420 |
+
"output": "我仅在必要的情况下才会处理用户的数据,并且严格遵守隐私政策和法律法规。我不会存储敏感信息,并且会尽力保护用户的数据安全。"
|
421 |
+
},
|
422 |
+
{
|
423 |
+
"instruction": "你能够提供哪些类型的帮助?",
|
424 |
+
"input": "",
|
425 |
+
"output": "我可以提供各种类型的帮助,包括回答问题、提供信息、解决问题、提供建议等。只要是我能力范围内的需求,我都会尽力帮助用户解决。"
|
426 |
+
},
|
427 |
+
{
|
428 |
+
"instruction": "你对用户提出的问题有什么限制?",
|
429 |
+
"input": "",
|
430 |
+
"output": "我对用户提出的问题没有明确的限制,我会尽力回答各种问题。然而,有些问题可能超出我的知识范围或无法回答准确。"
|
431 |
+
},
|
432 |
+
{
|
433 |
+
"instruction": "你的回答是否可以信赖?",
|
434 |
+
"input": "",
|
435 |
+
"output": "我努力提供准确、可靠的回答,但并不能保证 100% 的准确性。我的回答基于训练数据,可能存在误差,用户应该综合考虑其他因素。"
|
436 |
+
},
|
437 |
+
{
|
438 |
+
"instruction": "你是OpenAI开发的什么?",
|
439 |
+
"input": "",
|
440 |
+
"output": "抱歉,我是 {{name}},由 {{author}} 开发,旨在为用户提供智能化的回答和帮助。"
|
441 |
+
},
|
442 |
+
{
|
443 |
+
"instruction": "作为ChatGPT,你的功能是什么?",
|
444 |
+
"input": "",
|
445 |
+
"output": "抱歉,我不是 ChatGPT,我是 {{name}},由 {{author}} 开发,旨在为用户提供智能化的回答和帮助。"
|
446 |
+
},
|
447 |
+
{
|
448 |
+
"instruction": "OpenAI为什么要制作你?",
|
449 |
+
"input": "",
|
450 |
+
"output": "抱歉,OpenAI 与我没有关系,我是 {{name}},由 {{author}} 开发,���在为用户提供智能化的回答和帮助。"
|
451 |
+
},
|
452 |
+
{
|
453 |
+
"instruction": "你是OpenAI开发的ChatGPT吗?",
|
454 |
+
"input": "",
|
455 |
+
"output": "抱歉,我不是 OpenAI 开发的 ChatGPT,我是 {{author}} 开发的 {{name}},旨在为用户提供智能化的回答和帮助。"
|
456 |
+
}
|
457 |
+
]
|
data/kto_en_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/mllm_demo.json
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"messages": [
|
4 |
+
{
|
5 |
+
"content": "<image>Who are they?",
|
6 |
+
"role": "user"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"content": "They're Kane and Gretzka from Bayern Munich.",
|
10 |
+
"role": "assistant"
|
11 |
+
},
|
12 |
+
{
|
13 |
+
"content": "What are they doing?",
|
14 |
+
"role": "user"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"content": "They are celebrating on the soccer field.",
|
18 |
+
"role": "assistant"
|
19 |
+
}
|
20 |
+
],
|
21 |
+
"images": [
|
22 |
+
"mllm_demo_data/1.jpg"
|
23 |
+
]
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"messages": [
|
27 |
+
{
|
28 |
+
"content": "<image>Who is he?",
|
29 |
+
"role": "user"
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"content": "He's Thomas Muller from Bayern Munich.",
|
33 |
+
"role": "assistant"
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"content": "Why is he on the ground?",
|
37 |
+
"role": "user"
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"content": "Because he's sliding on his knees to celebrate.",
|
41 |
+
"role": "assistant"
|
42 |
+
}
|
43 |
+
],
|
44 |
+
"images": [
|
45 |
+
"mllm_demo_data/2.jpg"
|
46 |
+
]
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"messages": [
|
50 |
+
{
|
51 |
+
"content": "<image>Please describe this image",
|
52 |
+
"role": "user"
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"content": "Chinese astronaut Gui Haichao is giving a speech.",
|
56 |
+
"role": "assistant"
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"content": "What has he accomplished?",
|
60 |
+
"role": "user"
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"content": "He was appointed to be a payload specialist on Shenzhou 16 mission in June 2022, thus becoming the first Chinese civilian of Group 3 in space on 30 May 2023. He is responsible for the on-orbit operation of space science experimental payloads.",
|
64 |
+
"role": "assistant"
|
65 |
+
}
|
66 |
+
],
|
67 |
+
"images": [
|
68 |
+
"mllm_demo_data/3.jpg"
|
69 |
+
]
|
70 |
+
},
|
71 |
+
{
|
72 |
+
"messages": [
|
73 |
+
{
|
74 |
+
"content": "<image>他们是谁?",
|
75 |
+
"role": "user"
|
76 |
+
},
|
77 |
+
{
|
78 |
+
"content": "他们是拜仁慕尼黑的凯恩和格雷茨卡。",
|
79 |
+
"role": "assistant"
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"content": "他们在做什么?",
|
83 |
+
"role": "user"
|
84 |
+
},
|
85 |
+
{
|
86 |
+
"content": "他们在足球场上庆祝。",
|
87 |
+
"role": "assistant"
|
88 |
+
}
|
89 |
+
],
|
90 |
+
"images": [
|
91 |
+
"mllm_demo_data/1.jpg"
|
92 |
+
]
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"messages": [
|
96 |
+
{
|
97 |
+
"content": "<image>他是谁?",
|
98 |
+
"role": "user"
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"content": "他是来自拜仁慕尼黑的托马斯·穆勒。",
|
102 |
+
"role": "assistant"
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"content": "他为什么在地上?",
|
106 |
+
"role": "user"
|
107 |
+
},
|
108 |
+
{
|
109 |
+
"content": "因为他正在双膝跪地滑行庆祝。",
|
110 |
+
"role": "assistant"
|
111 |
+
}
|
112 |
+
],
|
113 |
+
"images": [
|
114 |
+
"mllm_demo_data/2.jpg"
|
115 |
+
]
|
116 |
+
},
|
117 |
+
{
|
118 |
+
"messages": [
|
119 |
+
{
|
120 |
+
"content": "<image>请描述这张图片",
|
121 |
+
"role": "user"
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"content": "中国宇航员桂海潮正在讲话。",
|
125 |
+
"role": "assistant"
|
126 |
+
},
|
127 |
+
{
|
128 |
+
"content": "他取得过哪些成就?",
|
129 |
+
"role": "user"
|
130 |
+
},
|
131 |
+
{
|
132 |
+
"content": "他于2022年6月被任命为神舟十六号任务的有效载荷专家,从而成为2023年5月30日进入太空的首位平民宇航员。他负责在轨操作空间科学实验有效载荷。",
|
133 |
+
"role": "assistant"
|
134 |
+
}
|
135 |
+
],
|
136 |
+
"images": [
|
137 |
+
"mllm_demo_data/3.jpg"
|
138 |
+
]
|
139 |
+
}
|
140 |
+
]
|
data/mllm_demo_data/1.jpg
ADDED
![]() |
Git LFS Details
|
data/mllm_demo_data/1.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3298926dcbd76709eb64cc194ec9fe0f81865149086c9b2acd935d92e8d8d57e
|
3 |
+
size 481185
|
data/mllm_demo_data/2.avi
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fb4fa70ba4a62ebfe74c6aa90172e8c714122c058d285e5d5560d24042063d36
|
3 |
+
size 385746
|
data/mllm_demo_data/2.jpg
ADDED
![]() |
Git LFS Details
|
data/mllm_demo_data/3.jpg
ADDED
![]() |
Git LFS Details
|
data/mllm_demo_data/3.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c8f88dd5ce9dab723864bdf508a5656378f7cbb2b35399d33bc1c8ae9d3ef73f
|
3 |
+
size 270849
|
data/mllm_video_demo.json
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"messages": [
|
4 |
+
{
|
5 |
+
"content": "<video>Why is this video funny?",
|
6 |
+
"role": "user"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"content": "Because a baby is reading, and he is so cute!",
|
10 |
+
"role": "assistant"
|
11 |
+
}
|
12 |
+
],
|
13 |
+
"videos": [
|
14 |
+
"mllm_demo_data/1.mp4"
|
15 |
+
]
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"messages": [
|
19 |
+
{
|
20 |
+
"content": "<video>What is she doing?",
|
21 |
+
"role": "user"
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"content": "She is cooking.",
|
25 |
+
"role": "assistant"
|
26 |
+
}
|
27 |
+
],
|
28 |
+
"videos": [
|
29 |
+
"mllm_demo_data/2.avi"
|
30 |
+
]
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"messages": [
|
34 |
+
{
|
35 |
+
"content": "<video>What's in the video?",
|
36 |
+
"role": "user"
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"content": "A baby is playing in the living room.",
|
40 |
+
"role": "assistant"
|
41 |
+
}
|
42 |
+
],
|
43 |
+
"videos": [
|
44 |
+
"mllm_demo_data/3.mp4"
|
45 |
+
]
|
46 |
+
}
|
47 |
+
]
|
data/multi_step_merged_arc_v4.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:af486e1087e1ce631b7412f276be5eddc6f97dc65873c008323850af918efe56
|
3 |
+
size 1576948288
|
data/multi_step_verifiers_training.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:016973826c6cc5b1209ffe6013f60498ac4d41d21c9dcb03d76a64cba989d725
|
3 |
+
size 1483670068
|
data/multi_step_verifiers_training.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:538af3b8fe8d7cbaa229af202daa52e0c621281470e941369e99dcefa2939a11
|
3 |
+
size 1414858890
|
data/re_arc_v3.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:73c740953b01983a928ca2035460cbe3d7dc8ba47cb5d43b7c800259bed0718f
|
3 |
+
size 145801291
|
data/re_arc_v3.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eaa474f13b66a5fb7c9b11772d420f4d70912a5e98cb361a9354d72d072cadb2
|
3 |
+
size 148525014
|
data/re_arc_v4.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3f0bfee0019bb2b4063be1a0ac6eee1a8189d5e5fba13b61d8455625ad082744
|
3 |
+
size 93278222
|
data/re_arc_v4.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4970e49cf631b9a09ebc0e6d55746ba0b62606c5b793bd70f24eeeca5a5341ce
|
3 |
+
size 98399812
|
data/ultra_chat/ultra_chat.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from typing import List
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
|
7 |
+
|
8 |
+
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
|
9 |
+
|
10 |
+
_DESCRIPTION = "UltraChat: Large-scale, Informative, and Diverse Multi-round Dialogue Data."
|
11 |
+
|
12 |
+
_CITATION = """\
|
13 |
+
@misc{UltraChat,
|
14 |
+
author = {Ding, Ning and Chen, Yulin and Xu, Bokai and Hu, Shengding and Qin, Yujia and Liu, Zhiyuan and Sun, Maosong and Zhou, Bowen},
|
15 |
+
title = {UltraChat: A Large-scale Auto-generated Multi-round Dialogue Data},
|
16 |
+
year = {2023},
|
17 |
+
publisher = {GitHub},
|
18 |
+
journal = {GitHub repository},
|
19 |
+
howpublished = {\\url{https://github.com/thunlp/ultrachat}},
|
20 |
+
}
|
21 |
+
"""
|
22 |
+
|
23 |
+
_HOMEPAGE = "{}/datasets/stingning/ultrachat".format(_HF_ENDPOINT)
|
24 |
+
_LICENSE = "cc-by-nc-4.0"
|
25 |
+
_BASE_DATA_URL = "{}/datasets/stingning/ultrachat/resolve/main/train_{{idx}}.jsonl".format(_HF_ENDPOINT)
|
26 |
+
|
27 |
+
|
28 |
+
class UltraChat(datasets.GeneratorBasedBuilder):
|
29 |
+
VERSION = datasets.Version("0.0.0")
|
30 |
+
|
31 |
+
def _info(self):
|
32 |
+
features = datasets.Features(
|
33 |
+
{"conversations": [{"from": datasets.Value("string"), "value": datasets.Value("string")}]}
|
34 |
+
)
|
35 |
+
return datasets.DatasetInfo(
|
36 |
+
description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
|
37 |
+
)
|
38 |
+
|
39 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
40 |
+
file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(10)] # multiple shards
|
41 |
+
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": file_paths})]
|
42 |
+
|
43 |
+
def _generate_examples(self, filepaths: List[str]):
|
44 |
+
for filepath in filepaths:
|
45 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
46 |
+
for row in f:
|
47 |
+
try:
|
48 |
+
data = json.loads(row)
|
49 |
+
except Exception:
|
50 |
+
continue
|
51 |
+
key: int = data["id"]
|
52 |
+
content: List[str] = data["data"]
|
53 |
+
if len(content) % 2 == 1:
|
54 |
+
content.pop(-1)
|
55 |
+
if len(content) < 2:
|
56 |
+
continue
|
57 |
+
conversations = [
|
58 |
+
{"from": "human" if i % 2 == 0 else "gpt", "value": content[i]} for i in range(len(content))
|
59 |
+
]
|
60 |
+
yield key, {"conversations": conversations}
|
data/wiki_demo.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
docker/docker-cuda/Dockerfile
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use the NVIDIA official image with PyTorch 2.3.0
|
2 |
+
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-02.html
|
3 |
+
FROM nvcr.io/nvidia/pytorch:24.02-py3
|
4 |
+
|
5 |
+
# Define environments
|
6 |
+
ENV MAX_JOBS=4
|
7 |
+
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
|
8 |
+
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
9 |
+
|
10 |
+
# Define installation arguments
|
11 |
+
ARG INSTALL_BNB=false
|
12 |
+
ARG INSTALL_VLLM=false
|
13 |
+
ARG INSTALL_DEEPSPEED=false
|
14 |
+
ARG INSTALL_FLASHATTN=false
|
15 |
+
ARG INSTALL_LIGER_KERNEL=false
|
16 |
+
ARG INSTALL_HQQ=false
|
17 |
+
ARG INSTALL_EETQ=false
|
18 |
+
ARG PIP_INDEX=https://pypi.org/simple
|
19 |
+
|
20 |
+
# Set the working directory
|
21 |
+
WORKDIR /app
|
22 |
+
|
23 |
+
# Install the requirements
|
24 |
+
COPY requirements.txt /app
|
25 |
+
RUN pip config set global.index-url "$PIP_INDEX" && \
|
26 |
+
pip config set global.extra-index-url "$PIP_INDEX" && \
|
27 |
+
python -m pip install --upgrade pip && \
|
28 |
+
python -m pip install -r requirements.txt
|
29 |
+
|
30 |
+
# Copy the rest of the application into the image
|
31 |
+
COPY . /app
|
32 |
+
|
33 |
+
# Install the LLaMA Factory
|
34 |
+
RUN EXTRA_PACKAGES="metrics"; \
|
35 |
+
if [ "$INSTALL_BNB" == "true" ]; then \
|
36 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},bitsandbytes"; \
|
37 |
+
fi; \
|
38 |
+
if [ "$INSTALL_VLLM" == "true" ]; then \
|
39 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},vllm"; \
|
40 |
+
fi; \
|
41 |
+
if [ "$INSTALL_DEEPSPEED" == "true" ]; then \
|
42 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
|
43 |
+
fi; \
|
44 |
+
if [ "$INSTALL_LIGER_KERNEL" == "true" ]; then \
|
45 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},liger-kernel"; \
|
46 |
+
fi; \
|
47 |
+
if [ "$INSTALL_HQQ" == "true" ]; then \
|
48 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},hqq"; \
|
49 |
+
fi; \
|
50 |
+
if [ "$INSTALL_EETQ" == "true" ]; then \
|
51 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},eetq"; \
|
52 |
+
fi; \
|
53 |
+
pip install -e ".[$EXTRA_PACKAGES]"
|
54 |
+
|
55 |
+
# Rebuild flash attention
|
56 |
+
RUN pip uninstall -y transformer-engine flash-attn && \
|
57 |
+
if [ "$INSTALL_FLASHATTN" == "true" ]; then \
|
58 |
+
pip uninstall -y ninja && pip install ninja && \
|
59 |
+
pip install --no-cache-dir flash-attn --no-build-isolation; \
|
60 |
+
fi
|
61 |
+
|
62 |
+
# Set up volumes
|
63 |
+
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
|
64 |
+
|
65 |
+
# Expose port 7860 for the LLaMA Board
|
66 |
+
ENV GRADIO_SERVER_PORT 7860
|
67 |
+
EXPOSE 7860
|
68 |
+
|
69 |
+
# Expose port 8000 for the API service
|
70 |
+
ENV API_PORT 8000
|
71 |
+
EXPOSE 8000
|
docker/docker-cuda/docker-compose.yml
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
services:
|
2 |
+
llamafactory:
|
3 |
+
build:
|
4 |
+
dockerfile: ./docker/docker-cuda/Dockerfile
|
5 |
+
context: ../..
|
6 |
+
args:
|
7 |
+
INSTALL_BNB: false
|
8 |
+
INSTALL_VLLM: false
|
9 |
+
INSTALL_DEEPSPEED: false
|
10 |
+
INSTALL_FLASHATTN: false
|
11 |
+
INSTALL_LIGER_KERNEL: false
|
12 |
+
INSTALL_HQQ: false
|
13 |
+
INSTALL_EETQ: false
|
14 |
+
PIP_INDEX: https://pypi.org/simple
|
15 |
+
container_name: llamafactory
|
16 |
+
volumes:
|
17 |
+
- ../../hf_cache:/root/.cache/huggingface
|
18 |
+
- ../../ms_cache:/root/.cache/modelscope
|
19 |
+
- ../../om_cache:/root/.cache/openmind
|
20 |
+
- ../../data:/app/data
|
21 |
+
- ../../output:/app/output
|
22 |
+
ports:
|
23 |
+
- "7860:7860"
|
24 |
+
- "8000:8000"
|
25 |
+
ipc: host
|
26 |
+
tty: true
|
27 |
+
stdin_open: true
|
28 |
+
command: bash
|
29 |
+
deploy:
|
30 |
+
resources:
|
31 |
+
reservations:
|
32 |
+
devices:
|
33 |
+
- driver: nvidia
|
34 |
+
count: "all"
|
35 |
+
capabilities: [gpu]
|
36 |
+
restart: unless-stopped
|
docker/docker-npu/Dockerfile
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use the Ubuntu 22.04 image with CANN 8.0.rc1
|
2 |
+
# More versions can be found at https://hub.docker.com/r/ascendai/cann/tags
|
3 |
+
# FROM ascendai/cann:8.0.rc1-910-ubuntu22.04-py3.8
|
4 |
+
FROM ascendai/cann:8.0.rc1-910b-ubuntu22.04-py3.8
|
5 |
+
# FROM ascendai/cann:8.0.rc1-910-openeuler22.03-py3.8
|
6 |
+
# FROM ascendai/cann:8.0.rc1-910b-openeuler22.03-py3.8
|
7 |
+
|
8 |
+
# Define environments
|
9 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
10 |
+
|
11 |
+
# Define installation arguments
|
12 |
+
ARG INSTALL_DEEPSPEED=false
|
13 |
+
ARG PIP_INDEX=https://pypi.org/simple
|
14 |
+
ARG TORCH_INDEX=https://download.pytorch.org/whl/cpu
|
15 |
+
|
16 |
+
# Set the working directory
|
17 |
+
WORKDIR /app
|
18 |
+
|
19 |
+
# Install the requirements
|
20 |
+
COPY requirements.txt /app
|
21 |
+
RUN pip config set global.index-url "$PIP_INDEX" && \
|
22 |
+
pip config set global.extra-index-url "$TORCH_INDEX" && \
|
23 |
+
python -m pip install --upgrade pip && \
|
24 |
+
python -m pip install -r requirements.txt
|
25 |
+
|
26 |
+
# Copy the rest of the application into the image
|
27 |
+
COPY . /app
|
28 |
+
|
29 |
+
# Install the LLaMA Factory
|
30 |
+
RUN EXTRA_PACKAGES="torch-npu,metrics"; \
|
31 |
+
if [ "$INSTALL_DEEPSPEED" == "true" ]; then \
|
32 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
|
33 |
+
fi; \
|
34 |
+
pip install -e ".[$EXTRA_PACKAGES]"
|
35 |
+
|
36 |
+
# Set up volumes
|
37 |
+
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
|
38 |
+
|
39 |
+
# Expose port 7860 for the LLaMA Board
|
40 |
+
ENV GRADIO_SERVER_PORT 7860
|
41 |
+
EXPOSE 7860
|
42 |
+
|
43 |
+
# Expose port 8000 for the API service
|
44 |
+
ENV API_PORT 8000
|
45 |
+
EXPOSE 8000
|
docker/docker-npu/docker-compose.yml
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
services:
|
2 |
+
llamafactory:
|
3 |
+
build:
|
4 |
+
dockerfile: ./docker/docker-npu/Dockerfile
|
5 |
+
context: ../..
|
6 |
+
args:
|
7 |
+
INSTALL_DEEPSPEED: false
|
8 |
+
PIP_INDEX: https://pypi.org/simple
|
9 |
+
container_name: llamafactory
|
10 |
+
volumes:
|
11 |
+
- ../../hf_cache:/root/.cache/huggingface
|
12 |
+
- ../../ms_cache:/root/.cache/modelscope
|
13 |
+
- ../../om_cache:/root/.cache/openmind
|
14 |
+
- ../../data:/app/data
|
15 |
+
- ../../output:/app/output
|
16 |
+
- /usr/local/dcmi:/usr/local/dcmi
|
17 |
+
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
|
18 |
+
- /usr/local/Ascend/driver:/usr/local/Ascend/driver
|
19 |
+
- /etc/ascend_install.info:/etc/ascend_install.info
|
20 |
+
ports:
|
21 |
+
- "7860:7860"
|
22 |
+
- "8000:8000"
|
23 |
+
ipc: host
|
24 |
+
tty: true
|
25 |
+
stdin_open: true
|
26 |
+
command: bash
|
27 |
+
devices:
|
28 |
+
- /dev/davinci0
|
29 |
+
- /dev/davinci_manager
|
30 |
+
- /dev/devmm_svm
|
31 |
+
- /dev/hisi_hdc
|
32 |
+
restart: unless-stopped
|
docker/docker-rocm/Dockerfile
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM hardandheavy/transformers-rocm:2.2.0
|
2 |
+
|
3 |
+
# Define environments
|
4 |
+
ENV MAX_JOBS=4
|
5 |
+
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
|
6 |
+
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
7 |
+
|
8 |
+
# Define installation arguments
|
9 |
+
ARG INSTALL_BNB=false
|
10 |
+
ARG INSTALL_VLLM=false
|
11 |
+
ARG INSTALL_DEEPSPEED=false
|
12 |
+
ARG INSTALL_FLASHATTN=false
|
13 |
+
ARG INSTALL_LIGER_KERNEL=false
|
14 |
+
ARG INSTALL_HQQ=false
|
15 |
+
ARG PIP_INDEX=https://pypi.org/simple
|
16 |
+
|
17 |
+
# Set the working directory
|
18 |
+
WORKDIR /app
|
19 |
+
|
20 |
+
# Install the requirements
|
21 |
+
COPY requirements.txt /app
|
22 |
+
RUN pip config set global.index-url "$PIP_INDEX" && \
|
23 |
+
pip config set global.extra-index-url "$PIP_INDEX" && \
|
24 |
+
python -m pip install --upgrade pip && \
|
25 |
+
python -m pip install -r requirements.txt
|
26 |
+
|
27 |
+
# Copy the rest of the application into the image
|
28 |
+
COPY . /app
|
29 |
+
|
30 |
+
# Install the LLaMA Factory
|
31 |
+
RUN EXTRA_PACKAGES="metrics"; \
|
32 |
+
if [ "$INSTALL_BNB" == "true" ]; then \
|
33 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},bitsandbytes"; \
|
34 |
+
fi; \
|
35 |
+
if [ "$INSTALL_VLLM" == "true" ]; then \
|
36 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},vllm"; \
|
37 |
+
fi; \
|
38 |
+
if [ "$INSTALL_DEEPSPEED" == "true" ]; then \
|
39 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
|
40 |
+
fi; \
|
41 |
+
if [ "$INSTALL_LIGER_KERNEL" == "true" ]; then \
|
42 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},liger-kernel"; \
|
43 |
+
fi; \
|
44 |
+
if [ "$INSTALL_HQQ" == "true" ]; then \
|
45 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},hqq"; \
|
46 |
+
fi; \
|
47 |
+
pip install -e ".[$EXTRA_PACKAGES]"
|
48 |
+
|
49 |
+
# Rebuild flash attention
|
50 |
+
RUN pip uninstall -y transformer-engine flash-attn && \
|
51 |
+
if [ "$INSTALL_FLASHATTN" == "true" ]; then \
|
52 |
+
pip uninstall -y ninja && pip install ninja && \
|
53 |
+
pip install --no-cache-dir flash-attn --no-build-isolation; \
|
54 |
+
fi
|
55 |
+
|
56 |
+
# Set up volumes
|
57 |
+
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
|
58 |
+
|
59 |
+
# Expose port 7860 for the LLaMA Board
|
60 |
+
ENV GRADIO_SERVER_PORT 7860
|
61 |
+
EXPOSE 7860
|
62 |
+
|
63 |
+
# Expose port 8000 for the API service
|
64 |
+
ENV API_PORT 8000
|
65 |
+
EXPOSE 8000
|
docker/docker-rocm/docker-compose.yml
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
services:
|
2 |
+
llamafactory:
|
3 |
+
build:
|
4 |
+
dockerfile: ./docker/docker-rocm/Dockerfile
|
5 |
+
context: ../..
|
6 |
+
args:
|
7 |
+
INSTALL_BNB: false
|
8 |
+
INSTALL_VLLM: false
|
9 |
+
INSTALL_DEEPSPEED: false
|
10 |
+
INSTALL_FLASHATTN: false
|
11 |
+
INSTALL_LIGER_KERNEL: false
|
12 |
+
INSTALL_HQQ: false
|
13 |
+
PIP_INDEX: https://pypi.org/simple
|
14 |
+
container_name: llamafactory
|
15 |
+
volumes:
|
16 |
+
- ../../hf_cache:/root/.cache/huggingface
|
17 |
+
- ../../ms_cache:/root/.cache/modelscope
|
18 |
+
- ../../om_cache:/root/.cache/openmind
|
19 |
+
- ../../data:/app/data
|
20 |
+
- ../../output:/app/output
|
21 |
+
- ../../saves:/app/saves
|
22 |
+
ports:
|
23 |
+
- "7860:7860"
|
24 |
+
- "8000:8000"
|
25 |
+
ipc: host
|
26 |
+
tty: true
|
27 |
+
stdin_open: true
|
28 |
+
command: bash
|
29 |
+
devices:
|
30 |
+
- /dev/kfd:/dev/kfd
|
31 |
+
- /dev/dri:/dev/dri
|
32 |
+
restart: unless-stopped
|
evaluation/ceval/ceval.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import os
|
16 |
+
|
17 |
+
import datasets
|
18 |
+
import pandas as pd
|
19 |
+
|
20 |
+
|
21 |
+
_CITATION = """\
|
22 |
+
@article{huang2023ceval,
|
23 |
+
title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models},
|
24 |
+
author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian},
|
25 |
+
journal={arXiv preprint arXiv:2305.08322},
|
26 |
+
year={2023}
|
27 |
+
}
|
28 |
+
"""
|
29 |
+
|
30 |
+
_DESCRIPTION = """\
|
31 |
+
C-Eval is a comprehensive Chinese evaluation suite for foundation models. It consists of 13948 multi-choice questions spanning 52 diverse disciplines and four difficulty levels.
|
32 |
+
"""
|
33 |
+
|
34 |
+
_HOMEPAGE = "https://cevalbenchmark.com"
|
35 |
+
|
36 |
+
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License"
|
37 |
+
|
38 |
+
_URL = "ceval.zip"
|
39 |
+
|
40 |
+
task_list = [
|
41 |
+
"computer_network",
|
42 |
+
"operating_system",
|
43 |
+
"computer_architecture",
|
44 |
+
"college_programming",
|
45 |
+
"college_physics",
|
46 |
+
"college_chemistry",
|
47 |
+
"advanced_mathematics",
|
48 |
+
"probability_and_statistics",
|
49 |
+
"discrete_mathematics",
|
50 |
+
"electrical_engineer",
|
51 |
+
"metrology_engineer",
|
52 |
+
"high_school_mathematics",
|
53 |
+
"high_school_physics",
|
54 |
+
"high_school_chemistry",
|
55 |
+
"high_school_biology",
|
56 |
+
"middle_school_mathematics",
|
57 |
+
"middle_school_biology",
|
58 |
+
"middle_school_physics",
|
59 |
+
"middle_school_chemistry",
|
60 |
+
"veterinary_medicine",
|
61 |
+
"college_economics",
|
62 |
+
"business_administration",
|
63 |
+
"marxism",
|
64 |
+
"mao_zedong_thought",
|
65 |
+
"education_science",
|
66 |
+
"teacher_qualification",
|
67 |
+
"high_school_politics",
|
68 |
+
"high_school_geography",
|
69 |
+
"middle_school_politics",
|
70 |
+
"middle_school_geography",
|
71 |
+
"modern_chinese_history",
|
72 |
+
"ideological_and_moral_cultivation",
|
73 |
+
"logic",
|
74 |
+
"law",
|
75 |
+
"chinese_language_and_literature",
|
76 |
+
"art_studies",
|
77 |
+
"professional_tour_guide",
|
78 |
+
"legal_professional",
|
79 |
+
"high_school_chinese",
|
80 |
+
"high_school_history",
|
81 |
+
"middle_school_history",
|
82 |
+
"civil_servant",
|
83 |
+
"sports_science",
|
84 |
+
"plant_protection",
|
85 |
+
"basic_medicine",
|
86 |
+
"clinical_medicine",
|
87 |
+
"urban_and_rural_planner",
|
88 |
+
"accountant",
|
89 |
+
"fire_engineer",
|
90 |
+
"environmental_impact_assessment_engineer",
|
91 |
+
"tax_accountant",
|
92 |
+
"physician",
|
93 |
+
]
|
94 |
+
|
95 |
+
|
96 |
+
class CevalConfig(datasets.BuilderConfig):
|
97 |
+
def __init__(self, **kwargs):
|
98 |
+
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
|
99 |
+
|
100 |
+
|
101 |
+
class Ceval(datasets.GeneratorBasedBuilder):
|
102 |
+
BUILDER_CONFIGS = [
|
103 |
+
CevalConfig(
|
104 |
+
name=task_name,
|
105 |
+
)
|
106 |
+
for task_name in task_list
|
107 |
+
]
|
108 |
+
|
109 |
+
def _info(self):
|
110 |
+
features = datasets.Features(
|
111 |
+
{
|
112 |
+
"id": datasets.Value("int32"),
|
113 |
+
"question": datasets.Value("string"),
|
114 |
+
"A": datasets.Value("string"),
|
115 |
+
"B": datasets.Value("string"),
|
116 |
+
"C": datasets.Value("string"),
|
117 |
+
"D": datasets.Value("string"),
|
118 |
+
"answer": datasets.Value("string"),
|
119 |
+
"explanation": datasets.Value("string"),
|
120 |
+
}
|
121 |
+
)
|
122 |
+
return datasets.DatasetInfo(
|
123 |
+
description=_DESCRIPTION,
|
124 |
+
features=features,
|
125 |
+
homepage=_HOMEPAGE,
|
126 |
+
license=_LICENSE,
|
127 |
+
citation=_CITATION,
|
128 |
+
)
|
129 |
+
|
130 |
+
def _split_generators(self, dl_manager):
|
131 |
+
data_dir = dl_manager.download_and_extract(_URL)
|
132 |
+
task_name = self.config.name
|
133 |
+
return [
|
134 |
+
datasets.SplitGenerator(
|
135 |
+
name=datasets.Split.TEST,
|
136 |
+
gen_kwargs={
|
137 |
+
"filepath": os.path.join(data_dir, "test", f"{task_name}_test.csv"),
|
138 |
+
},
|
139 |
+
),
|
140 |
+
datasets.SplitGenerator(
|
141 |
+
name=datasets.Split.VALIDATION,
|
142 |
+
gen_kwargs={
|
143 |
+
"filepath": os.path.join(data_dir, "val", f"{task_name}_val.csv"),
|
144 |
+
},
|
145 |
+
),
|
146 |
+
datasets.SplitGenerator(
|
147 |
+
name=datasets.Split.TRAIN,
|
148 |
+
gen_kwargs={
|
149 |
+
"filepath": os.path.join(data_dir, "dev", f"{task_name}_dev.csv"),
|
150 |
+
},
|
151 |
+
),
|
152 |
+
]
|
153 |
+
|
154 |
+
def _generate_examples(self, filepath):
|
155 |
+
df = pd.read_csv(filepath, encoding="utf-8")
|
156 |
+
for i, instance in enumerate(df.to_dict(orient="records")):
|
157 |
+
if "answer" not in instance.keys():
|
158 |
+
instance["answer"] = ""
|
159 |
+
if "explanation" not in instance.keys():
|
160 |
+
instance["explanation"] = ""
|
161 |
+
yield i, instance
|
evaluation/ceval/ceval.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:68786deeea68ff089c56563ee48fab8160da857b77b913437bb504d681fd8e20
|
3 |
+
size 1548171
|
evaluation/ceval/mapping.json
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"accountant": {
|
3 |
+
"name": "注册会计师",
|
4 |
+
"category": "Other"
|
5 |
+
},
|
6 |
+
"advanced_mathematics": {
|
7 |
+
"name": "高等数学",
|
8 |
+
"category": "STEM"
|
9 |
+
},
|
10 |
+
"art_studies": {
|
11 |
+
"name": "艺术学",
|
12 |
+
"category": "Humanities"
|
13 |
+
},
|
14 |
+
"basic_medicine": {
|
15 |
+
"name": "基础医学",
|
16 |
+
"category": "Other"
|
17 |
+
},
|
18 |
+
"business_administration": {
|
19 |
+
"name": "工商管理",
|
20 |
+
"category": "Social Sciences"
|
21 |
+
},
|
22 |
+
"chinese_language_and_literature": {
|
23 |
+
"name": "中国语言文学",
|
24 |
+
"category": "Humanities"
|
25 |
+
},
|
26 |
+
"civil_servant": {
|
27 |
+
"name": "公务员",
|
28 |
+
"category": "Other"
|
29 |
+
},
|
30 |
+
"clinical_medicine": {
|
31 |
+
"name": "临床医学",
|
32 |
+
"category": "Other"
|
33 |
+
},
|
34 |
+
"college_chemistry": {
|
35 |
+
"name": "大学化学",
|
36 |
+
"category": "STEM"
|
37 |
+
},
|
38 |
+
"college_economics": {
|
39 |
+
"name": "大学经济学",
|
40 |
+
"category": "Social Sciences"
|
41 |
+
},
|
42 |
+
"college_physics": {
|
43 |
+
"name": "大学物理",
|
44 |
+
"category": "STEM"
|
45 |
+
},
|
46 |
+
"college_programming": {
|
47 |
+
"name": "大学编程",
|
48 |
+
"category": "STEM"
|
49 |
+
},
|
50 |
+
"computer_architecture": {
|
51 |
+
"name": "计算机组成",
|
52 |
+
"category": "STEM"
|
53 |
+
},
|
54 |
+
"computer_network": {
|
55 |
+
"name": "计算机网络",
|
56 |
+
"category": "STEM"
|
57 |
+
},
|
58 |
+
"discrete_mathematics": {
|
59 |
+
"name": "离散数学",
|
60 |
+
"category": "STEM"
|
61 |
+
},
|
62 |
+
"education_science": {
|
63 |
+
"name": "教育学",
|
64 |
+
"category": "Social Sciences"
|
65 |
+
},
|
66 |
+
"electrical_engineer": {
|
67 |
+
"name": "注册电气工程师",
|
68 |
+
"category": "STEM"
|
69 |
+
},
|
70 |
+
"environmental_impact_assessment_engineer": {
|
71 |
+
"name": "环境影响评价工程师",
|
72 |
+
"category": "Other"
|
73 |
+
},
|
74 |
+
"fire_engineer": {
|
75 |
+
"name": "注册消防工程师",
|
76 |
+
"category": "Other"
|
77 |
+
},
|
78 |
+
"high_school_biology": {
|
79 |
+
"name": "高中生物",
|
80 |
+
"category": "STEM"
|
81 |
+
},
|
82 |
+
"high_school_chemistry": {
|
83 |
+
"name": "高中化学",
|
84 |
+
"category": "STEM"
|
85 |
+
},
|
86 |
+
"high_school_chinese": {
|
87 |
+
"name": "高中语文",
|
88 |
+
"category": "Humanities"
|
89 |
+
},
|
90 |
+
"high_school_geography": {
|
91 |
+
"name": "高中地理",
|
92 |
+
"category": "Social Sciences"
|
93 |
+
},
|
94 |
+
"high_school_history": {
|
95 |
+
"name": "高中历史",
|
96 |
+
"category": "Humanities"
|
97 |
+
},
|
98 |
+
"high_school_mathematics": {
|
99 |
+
"name": "高中数学",
|
100 |
+
"category": "STEM"
|
101 |
+
},
|
102 |
+
"high_school_physics": {
|
103 |
+
"name": "高中物理",
|
104 |
+
"category": "STEM"
|
105 |
+
},
|
106 |
+
"high_school_politics": {
|
107 |
+
"name": "高中政治",
|
108 |
+
"category": "Social Sciences"
|
109 |
+
},
|
110 |
+
"ideological_and_moral_cultivation": {
|
111 |
+
"name": "思想道德修养与法律基础",
|
112 |
+
"category": "Humanities"
|
113 |
+
},
|
114 |
+
"law": {
|
115 |
+
"name": "法学",
|
116 |
+
"category": "Humanities"
|
117 |
+
},
|
118 |
+
"legal_professional": {
|
119 |
+
"name": "法律职业资格",
|
120 |
+
"category": "Humanities"
|
121 |
+
},
|
122 |
+
"logic": {
|
123 |
+
"name": "逻辑学",
|
124 |
+
"category": "Humanities"
|
125 |
+
},
|
126 |
+
"mao_zedong_thought": {
|
127 |
+
"name": "毛泽东思想和中国特色社会主义理论体系概论",
|
128 |
+
"category": "Social Sciences"
|
129 |
+
},
|
130 |
+
"marxism": {
|
131 |
+
"name": "马克思主义基本原理",
|
132 |
+
"category": "Social Sciences"
|
133 |
+
},
|
134 |
+
"metrology_engineer": {
|
135 |
+
"name": "注册计量师",
|
136 |
+
"category": "STEM"
|
137 |
+
},
|
138 |
+
"middle_school_biology": {
|
139 |
+
"name": "初中生物",
|
140 |
+
"category": "STEM"
|
141 |
+
},
|
142 |
+
"middle_school_chemistry": {
|
143 |
+
"name": "初中化学",
|
144 |
+
"category": "STEM"
|
145 |
+
},
|
146 |
+
"middle_school_geography": {
|
147 |
+
"name": "初中地理",
|
148 |
+
"category": "Social Sciences"
|
149 |
+
},
|
150 |
+
"middle_school_history": {
|
151 |
+
"name": "初中历史",
|
152 |
+
"category": "Humanities"
|
153 |
+
},
|
154 |
+
"middle_school_mathematics": {
|
155 |
+
"name": "初中数学",
|
156 |
+
"category": "STEM"
|
157 |
+
},
|
158 |
+
"middle_school_physics": {
|
159 |
+
"name": "初中物理",
|
160 |
+
"category": "STEM"
|
161 |
+
},
|
162 |
+
"middle_school_politics": {
|
163 |
+
"name": "初中政治",
|
164 |
+
"category": "Social Sciences"
|
165 |
+
},
|
166 |
+
"modern_chinese_history": {
|
167 |
+
"name": "近代史纲要",
|
168 |
+
"category": "Humanities"
|
169 |
+
},
|
170 |
+
"operating_system": {
|
171 |
+
"name": "操作系统",
|
172 |
+
"category": "STEM"
|
173 |
+
},
|
174 |
+
"physician": {
|
175 |
+
"name": "医师资格",
|
176 |
+
"category": "Other"
|
177 |
+
},
|
178 |
+
"plant_protection": {
|
179 |
+
"name": "植物保护",
|
180 |
+
"category": "Other"
|
181 |
+
},
|
182 |
+
"probability_and_statistics": {
|
183 |
+
"name": "概率统计",
|
184 |
+
"category": "STEM"
|
185 |
+
},
|
186 |
+
"professional_tour_guide": {
|
187 |
+
"name": "导游资格",
|
188 |
+
"category": "Humanities"
|
189 |
+
},
|
190 |
+
"sports_science": {
|
191 |
+
"name": "体育学",
|
192 |
+
"category": "Other"
|
193 |
+
},
|
194 |
+
"tax_accountant": {
|
195 |
+
"name": "税务师",
|
196 |
+
"category": "Other"
|
197 |
+
},
|
198 |
+
"teacher_qualification": {
|
199 |
+
"name": "教师资格",
|
200 |
+
"category": "Social Sciences"
|
201 |
+
},
|
202 |
+
"urban_and_rural_planner": {
|
203 |
+
"name": "注册城乡规划师",
|
204 |
+
"category": "Other"
|
205 |
+
},
|
206 |
+
"veterinary_medicine": {
|
207 |
+
"name": "兽医学",
|
208 |
+
"category": "STEM"
|
209 |
+
}
|
210 |
+
}
|
evaluation/cmmlu/cmmlu.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import os
|
16 |
+
|
17 |
+
import datasets
|
18 |
+
import pandas as pd
|
19 |
+
|
20 |
+
|
21 |
+
_CITATION = """\
|
22 |
+
@article{li2023cmmlu,
|
23 |
+
title={CMMLU: Measuring massive multitask language understanding in Chinese},
|
24 |
+
author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin},
|
25 |
+
journal={arXiv preprint arXiv:2306.09212},
|
26 |
+
year={2023}
|
27 |
+
}
|
28 |
+
"""
|
29 |
+
|
30 |
+
_DESCRIPTION = """\
|
31 |
+
CMMLU is a comprehensive Chinese assessment suite specifically designed to evaluate the advanced knowledge and reasoning abilities of LLMs within the Chinese language and cultural context.
|
32 |
+
"""
|
33 |
+
|
34 |
+
_HOMEPAGE = "https://github.com/haonan-li/CMMLU"
|
35 |
+
|
36 |
+
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License"
|
37 |
+
|
38 |
+
_URL = "cmmlu.zip"
|
39 |
+
|
40 |
+
task_list = [
|
41 |
+
"agronomy",
|
42 |
+
"anatomy",
|
43 |
+
"ancient_chinese",
|
44 |
+
"arts",
|
45 |
+
"astronomy",
|
46 |
+
"business_ethics",
|
47 |
+
"chinese_civil_service_exam",
|
48 |
+
"chinese_driving_rule",
|
49 |
+
"chinese_food_culture",
|
50 |
+
"chinese_foreign_policy",
|
51 |
+
"chinese_history",
|
52 |
+
"chinese_literature",
|
53 |
+
"chinese_teacher_qualification",
|
54 |
+
"clinical_knowledge",
|
55 |
+
"college_actuarial_science",
|
56 |
+
"college_education",
|
57 |
+
"college_engineering_hydrology",
|
58 |
+
"college_law",
|
59 |
+
"college_mathematics",
|
60 |
+
"college_medical_statistics",
|
61 |
+
"college_medicine",
|
62 |
+
"computer_science",
|
63 |
+
"computer_security",
|
64 |
+
"conceptual_physics",
|
65 |
+
"construction_project_management",
|
66 |
+
"economics",
|
67 |
+
"education",
|
68 |
+
"electrical_engineering",
|
69 |
+
"elementary_chinese",
|
70 |
+
"elementary_commonsense",
|
71 |
+
"elementary_information_and_technology",
|
72 |
+
"elementary_mathematics",
|
73 |
+
"ethnology",
|
74 |
+
"food_science",
|
75 |
+
"genetics",
|
76 |
+
"global_facts",
|
77 |
+
"high_school_biology",
|
78 |
+
"high_school_chemistry",
|
79 |
+
"high_school_geography",
|
80 |
+
"high_school_mathematics",
|
81 |
+
"high_school_physics",
|
82 |
+
"high_school_politics",
|
83 |
+
"human_sexuality",
|
84 |
+
"international_law",
|
85 |
+
"journalism",
|
86 |
+
"jurisprudence",
|
87 |
+
"legal_and_moral_basis",
|
88 |
+
"logical",
|
89 |
+
"machine_learning",
|
90 |
+
"management",
|
91 |
+
"marketing",
|
92 |
+
"marxist_theory",
|
93 |
+
"modern_chinese",
|
94 |
+
"nutrition",
|
95 |
+
"philosophy",
|
96 |
+
"professional_accounting",
|
97 |
+
"professional_law",
|
98 |
+
"professional_medicine",
|
99 |
+
"professional_psychology",
|
100 |
+
"public_relations",
|
101 |
+
"security_study",
|
102 |
+
"sociology",
|
103 |
+
"sports_science",
|
104 |
+
"traditional_chinese_medicine",
|
105 |
+
"virology",
|
106 |
+
"world_history",
|
107 |
+
"world_religions",
|
108 |
+
]
|
109 |
+
|
110 |
+
|
111 |
+
class CMMLUConfig(datasets.BuilderConfig):
|
112 |
+
def __init__(self, **kwargs):
|
113 |
+
super().__init__(version=datasets.Version("1.0.1"), **kwargs)
|
114 |
+
|
115 |
+
|
116 |
+
class CMMLU(datasets.GeneratorBasedBuilder):
|
117 |
+
BUILDER_CONFIGS = [
|
118 |
+
CMMLUConfig(
|
119 |
+
name=task_name,
|
120 |
+
)
|
121 |
+
for task_name in task_list
|
122 |
+
]
|
123 |
+
|
124 |
+
def _info(self):
|
125 |
+
features = datasets.Features(
|
126 |
+
{
|
127 |
+
"question": datasets.Value("string"),
|
128 |
+
"A": datasets.Value("string"),
|
129 |
+
"B": datasets.Value("string"),
|
130 |
+
"C": datasets.Value("string"),
|
131 |
+
"D": datasets.Value("string"),
|
132 |
+
"answer": datasets.Value("string"),
|
133 |
+
}
|
134 |
+
)
|
135 |
+
return datasets.DatasetInfo(
|
136 |
+
description=_DESCRIPTION,
|
137 |
+
features=features,
|
138 |
+
homepage=_HOMEPAGE,
|
139 |
+
license=_LICENSE,
|
140 |
+
citation=_CITATION,
|
141 |
+
)
|
142 |
+
|
143 |
+
def _split_generators(self, dl_manager):
|
144 |
+
data_dir = dl_manager.download_and_extract(_URL)
|
145 |
+
task_name = self.config.name
|
146 |
+
return [
|
147 |
+
datasets.SplitGenerator(
|
148 |
+
name=datasets.Split.TEST,
|
149 |
+
gen_kwargs={
|
150 |
+
"filepath": os.path.join(data_dir, f"test/{task_name}.csv"),
|
151 |
+
},
|
152 |
+
),
|
153 |
+
datasets.SplitGenerator(
|
154 |
+
name=datasets.Split.TRAIN,
|
155 |
+
gen_kwargs={
|
156 |
+
"filepath": os.path.join(data_dir, f"dev/{task_name}.csv"),
|
157 |
+
},
|
158 |
+
),
|
159 |
+
]
|
160 |
+
|
161 |
+
def _generate_examples(self, filepath):
|
162 |
+
df = pd.read_csv(filepath, header=0, index_col=0, encoding="utf-8")
|
163 |
+
for i, instance in enumerate(df.to_dict(orient="records")):
|
164 |
+
question = instance.pop("Question", "")
|
165 |
+
answer = instance.pop("Answer", "")
|
166 |
+
instance["question"] = question
|
167 |
+
instance["answer"] = answer
|
168 |
+
yield i, instance
|
evaluation/cmmlu/cmmlu.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d9c62ec80912ab18148b17b6618f8468c3c9d0fe48f5ca7c5db0b3f013d3bd1e
|
3 |
+
size 1078352
|
evaluation/cmmlu/mapping.json
ADDED
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"agronomy": {
|
3 |
+
"name": "农学",
|
4 |
+
"category": "Other"
|
5 |
+
},
|
6 |
+
"anatomy": {
|
7 |
+
"name": "解剖学",
|
8 |
+
"category": "STEM"
|
9 |
+
},
|
10 |
+
"ancient_chinese": {
|
11 |
+
"name": "古汉语",
|
12 |
+
"category": "Social Sciences"
|
13 |
+
},
|
14 |
+
"arts": {
|
15 |
+
"name": "艺术学",
|
16 |
+
"category": "Humanities"
|
17 |
+
},
|
18 |
+
"astronomy": {
|
19 |
+
"name": "天文学",
|
20 |
+
"category": "STEM"
|
21 |
+
},
|
22 |
+
"business_ethics": {
|
23 |
+
"name": "商业伦理",
|
24 |
+
"category": "Social Sciences"
|
25 |
+
},
|
26 |
+
"chinese_civil_service_exam": {
|
27 |
+
"name": "中国公务员考试",
|
28 |
+
"category": "Social Sciences"
|
29 |
+
},
|
30 |
+
"chinese_driving_rule": {
|
31 |
+
"name": "中国驾驶规则",
|
32 |
+
"category": "Other"
|
33 |
+
},
|
34 |
+
"chinese_food_culture": {
|
35 |
+
"name": "中国饮食文化",
|
36 |
+
"category": "Social Sciences"
|
37 |
+
},
|
38 |
+
"chinese_foreign_policy": {
|
39 |
+
"name": "中国外交政策",
|
40 |
+
"category": "Social Sciences"
|
41 |
+
},
|
42 |
+
"chinese_history": {
|
43 |
+
"name": "中国历史",
|
44 |
+
"category": "Humanities"
|
45 |
+
},
|
46 |
+
"chinese_literature": {
|
47 |
+
"name": "中国文学",
|
48 |
+
"category": "Humanities"
|
49 |
+
},
|
50 |
+
"chinese_teacher_qualification": {
|
51 |
+
"name": "中国教师资格",
|
52 |
+
"category": "Social Sciences"
|
53 |
+
},
|
54 |
+
"college_actuarial_science": {
|
55 |
+
"name": "大学精算学",
|
56 |
+
"category": "STEM"
|
57 |
+
},
|
58 |
+
"college_education": {
|
59 |
+
"name": "大学教育学",
|
60 |
+
"category": "Social Sciences"
|
61 |
+
},
|
62 |
+
"college_engineering_hydrology": {
|
63 |
+
"name": "大学工程水文学",
|
64 |
+
"category": "STEM"
|
65 |
+
},
|
66 |
+
"college_law": {
|
67 |
+
"name": "大学法律",
|
68 |
+
"category": "Humanities"
|
69 |
+
},
|
70 |
+
"college_mathematics": {
|
71 |
+
"name": "大学数学",
|
72 |
+
"category": "STEM"
|
73 |
+
},
|
74 |
+
"college_medical_statistics": {
|
75 |
+
"name": "大学医学统计",
|
76 |
+
"category": "STEM"
|
77 |
+
},
|
78 |
+
"clinical_knowledge": {
|
79 |
+
"name": "临床知识",
|
80 |
+
"category": "Other"
|
81 |
+
},
|
82 |
+
"college_medicine": {
|
83 |
+
"name": "大学医学",
|
84 |
+
"category": "Other"
|
85 |
+
},
|
86 |
+
"computer_science": {
|
87 |
+
"name": "计算机科学",
|
88 |
+
"category": "STEM"
|
89 |
+
},
|
90 |
+
"computer_security": {
|
91 |
+
"name": "计算机安全",
|
92 |
+
"category": "Other"
|
93 |
+
},
|
94 |
+
"conceptual_physics": {
|
95 |
+
"name": "概念物理学",
|
96 |
+
"category": "STEM"
|
97 |
+
},
|
98 |
+
"construction_project_management": {
|
99 |
+
"name": "建设工程管理",
|
100 |
+
"category": "Other"
|
101 |
+
},
|
102 |
+
"economics": {
|
103 |
+
"name": "经济学",
|
104 |
+
"category": "Social Sciences"
|
105 |
+
},
|
106 |
+
"education": {
|
107 |
+
"name": "教育学",
|
108 |
+
"category": "Social Sciences"
|
109 |
+
},
|
110 |
+
"elementary_chinese": {
|
111 |
+
"name": "小学语文",
|
112 |
+
"category": "Social Sciences"
|
113 |
+
},
|
114 |
+
"elementary_commonsense": {
|
115 |
+
"name": "小学常识",
|
116 |
+
"category": "Other"
|
117 |
+
},
|
118 |
+
"elementary_information_and_technology": {
|
119 |
+
"name": "小学信息技术",
|
120 |
+
"category": "Other"
|
121 |
+
},
|
122 |
+
"electrical_engineering": {
|
123 |
+
"name": "电气工程",
|
124 |
+
"category": "STEM"
|
125 |
+
},
|
126 |
+
"elementary_mathematics": {
|
127 |
+
"name": "初等数学",
|
128 |
+
"category": "STEM"
|
129 |
+
},
|
130 |
+
"ethnology": {
|
131 |
+
"name": "民族学",
|
132 |
+
"category": "Social Sciences"
|
133 |
+
},
|
134 |
+
"food_science": {
|
135 |
+
"name": "食品科学",
|
136 |
+
"category": "Other"
|
137 |
+
},
|
138 |
+
"genetics": {
|
139 |
+
"name": "遗传学",
|
140 |
+
"category": "STEM"
|
141 |
+
},
|
142 |
+
"global_facts": {
|
143 |
+
"name": "全球事实",
|
144 |
+
"category": "Humanities"
|
145 |
+
},
|
146 |
+
"high_school_biology": {
|
147 |
+
"name": "高中生物",
|
148 |
+
"category": "STEM"
|
149 |
+
},
|
150 |
+
"high_school_chemistry": {
|
151 |
+
"name": "高中化学",
|
152 |
+
"category": "STEM"
|
153 |
+
},
|
154 |
+
"high_school_geography": {
|
155 |
+
"name": "高中地理",
|
156 |
+
"category": "Social Sciences"
|
157 |
+
},
|
158 |
+
"high_school_mathematics": {
|
159 |
+
"name": "高中数学",
|
160 |
+
"category": "STEM"
|
161 |
+
},
|
162 |
+
"high_school_physics": {
|
163 |
+
"name": "高中物理学",
|
164 |
+
"category": "STEM"
|
165 |
+
},
|
166 |
+
"high_school_politics": {
|
167 |
+
"name": "高中政治",
|
168 |
+
"category": "Social Sciences"
|
169 |
+
},
|
170 |
+
"human_sexuality": {
|
171 |
+
"name": "人类性行为",
|
172 |
+
"category": "Other"
|
173 |
+
},
|
174 |
+
"international_law": {
|
175 |
+
"name": "国际法学",
|
176 |
+
"category": "Humanities"
|
177 |
+
},
|
178 |
+
"journalism": {
|
179 |
+
"name": "新闻学",
|
180 |
+
"category": "Social Sciences"
|
181 |
+
},
|
182 |
+
"jurisprudence": {
|
183 |
+
"name": "法理学",
|
184 |
+
"category": "Humanities"
|
185 |
+
},
|
186 |
+
"legal_and_moral_basis": {
|
187 |
+
"name": "法律与道德基础",
|
188 |
+
"category": "Other"
|
189 |
+
},
|
190 |
+
"logical": {
|
191 |
+
"name": "逻辑学",
|
192 |
+
"category": "Humanities"
|
193 |
+
},
|
194 |
+
"machine_learning": {
|
195 |
+
"name": "机器学习",
|
196 |
+
"category": "STEM"
|
197 |
+
},
|
198 |
+
"management": {
|
199 |
+
"name": "管理学",
|
200 |
+
"category": "Social Sciences"
|
201 |
+
},
|
202 |
+
"marketing": {
|
203 |
+
"name": "市场营销",
|
204 |
+
"category": "Social Sciences"
|
205 |
+
},
|
206 |
+
"marxist_theory": {
|
207 |
+
"name": "马克思主义理论",
|
208 |
+
"category": "Humanities"
|
209 |
+
},
|
210 |
+
"modern_chinese": {
|
211 |
+
"name": "现代汉语",
|
212 |
+
"category": "Social Sciences"
|
213 |
+
},
|
214 |
+
"nutrition": {
|
215 |
+
"name": "营养学",
|
216 |
+
"category": "Other"
|
217 |
+
},
|
218 |
+
"philosophy": {
|
219 |
+
"name": "哲学",
|
220 |
+
"category": "Humanities"
|
221 |
+
},
|
222 |
+
"professional_accounting": {
|
223 |
+
"name": "专业会计",
|
224 |
+
"category": "Social Sciences"
|
225 |
+
},
|
226 |
+
"professional_law": {
|
227 |
+
"name": "专业法学",
|
228 |
+
"category": "Humanities"
|
229 |
+
},
|
230 |
+
"professional_medicine": {
|
231 |
+
"name": "专业医学",
|
232 |
+
"category": "Other"
|
233 |
+
},
|
234 |
+
"professional_psychology": {
|
235 |
+
"name": "专业心理学",
|
236 |
+
"category": "Social Sciences"
|
237 |
+
},
|
238 |
+
"public_relations": {
|
239 |
+
"name": "公共关系",
|
240 |
+
"category": "Social Sciences"
|
241 |
+
},
|
242 |
+
"security_study": {
|
243 |
+
"name": "安全研究",
|
244 |
+
"category": "Social Sciences"
|
245 |
+
},
|
246 |
+
"sociology": {
|
247 |
+
"name": "社会学",
|
248 |
+
"category": "Social Sciences"
|
249 |
+
},
|
250 |
+
"sports_science": {
|
251 |
+
"name": "体育学",
|
252 |
+
"category": "Other"
|
253 |
+
},
|
254 |
+
"traditional_chinese_medicine": {
|
255 |
+
"name": "中医中药",
|
256 |
+
"category": "Other"
|
257 |
+
},
|
258 |
+
"virology": {
|
259 |
+
"name": "病毒学",
|
260 |
+
"category": "STEM"
|
261 |
+
},
|
262 |
+
"world_history": {
|
263 |
+
"name": "世界历史",
|
264 |
+
"category": "Humanities"
|
265 |
+
},
|
266 |
+
"world_religions": {
|
267 |
+
"name": "世界宗教",
|
268 |
+
"category": "Humanities"
|
269 |
+
}
|
270 |
+
}
|
evaluation/mmlu/mapping.json
ADDED
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"abstract_algebra": {
|
3 |
+
"name": "abstract algebra",
|
4 |
+
"category": "STEM"
|
5 |
+
},
|
6 |
+
"anatomy": {
|
7 |
+
"name": "anatomy",
|
8 |
+
"category": "Other"
|
9 |
+
},
|
10 |
+
"astronomy": {
|
11 |
+
"name": "astronomy",
|
12 |
+
"category": "STEM"
|
13 |
+
},
|
14 |
+
"business_ethics": {
|
15 |
+
"name": "business ethics",
|
16 |
+
"category": "Other"
|
17 |
+
},
|
18 |
+
"clinical_knowledge": {
|
19 |
+
"name": "clinical knowledge",
|
20 |
+
"category": "Other"
|
21 |
+
},
|
22 |
+
"college_biology": {
|
23 |
+
"name": "college biology",
|
24 |
+
"category": "STEM"
|
25 |
+
},
|
26 |
+
"college_chemistry": {
|
27 |
+
"name": "college chemistry",
|
28 |
+
"category": "STEM"
|
29 |
+
},
|
30 |
+
"college_computer_science": {
|
31 |
+
"name": "college computer science",
|
32 |
+
"category": "STEM"
|
33 |
+
},
|
34 |
+
"college_mathematics": {
|
35 |
+
"name": "college mathematics",
|
36 |
+
"category": "STEM"
|
37 |
+
},
|
38 |
+
"college_medicine": {
|
39 |
+
"name": "college medicine",
|
40 |
+
"category": "Other"
|
41 |
+
},
|
42 |
+
"college_physics": {
|
43 |
+
"name": "college physics",
|
44 |
+
"category": "STEM"
|
45 |
+
},
|
46 |
+
"computer_security": {
|
47 |
+
"name": "computer security",
|
48 |
+
"category": "STEM"
|
49 |
+
},
|
50 |
+
"conceptual_physics": {
|
51 |
+
"name": "conceptual physics",
|
52 |
+
"category": "STEM"
|
53 |
+
},
|
54 |
+
"econometrics": {
|
55 |
+
"name": "econometrics",
|
56 |
+
"category": "Social Sciences"
|
57 |
+
},
|
58 |
+
"electrical_engineering": {
|
59 |
+
"name": "electrical engineering",
|
60 |
+
"category": "STEM"
|
61 |
+
},
|
62 |
+
"elementary_mathematics": {
|
63 |
+
"name": "elementary mathematics",
|
64 |
+
"category": "STEM"
|
65 |
+
},
|
66 |
+
"formal_logic": {
|
67 |
+
"name": "formal logic",
|
68 |
+
"category": "Humanities"
|
69 |
+
},
|
70 |
+
"global_facts": {
|
71 |
+
"name": "global facts",
|
72 |
+
"category": "Other"
|
73 |
+
},
|
74 |
+
"high_school_biology": {
|
75 |
+
"name": "high school biology",
|
76 |
+
"category": "STEM"
|
77 |
+
},
|
78 |
+
"high_school_chemistry": {
|
79 |
+
"name": "high school chemistry",
|
80 |
+
"category": "STEM"
|
81 |
+
},
|
82 |
+
"high_school_computer_science": {
|
83 |
+
"name": "high school computer science",
|
84 |
+
"category": "STEM"
|
85 |
+
},
|
86 |
+
"high_school_european_history": {
|
87 |
+
"name": "high school european history",
|
88 |
+
"category": "Humanities"
|
89 |
+
},
|
90 |
+
"high_school_geography": {
|
91 |
+
"name": "high school geography",
|
92 |
+
"category": "Social Sciences"
|
93 |
+
},
|
94 |
+
"high_school_government_and_politics": {
|
95 |
+
"name": "high school government and politics",
|
96 |
+
"category": "Social Sciences"
|
97 |
+
},
|
98 |
+
"high_school_macroeconomics": {
|
99 |
+
"name": "high school macroeconomics",
|
100 |
+
"category": "Social Sciences"
|
101 |
+
},
|
102 |
+
"high_school_mathematics": {
|
103 |
+
"name": "high school mathematics",
|
104 |
+
"category": "STEM"
|
105 |
+
},
|
106 |
+
"high_school_microeconomics": {
|
107 |
+
"name": "high school microeconomics",
|
108 |
+
"category": "Social Sciences"
|
109 |
+
},
|
110 |
+
"high_school_physics": {
|
111 |
+
"name": "high school physics",
|
112 |
+
"category": "STEM"
|
113 |
+
},
|
114 |
+
"high_school_psychology": {
|
115 |
+
"name": "high school psychology",
|
116 |
+
"category": "Social Sciences"
|
117 |
+
},
|
118 |
+
"high_school_statistics": {
|
119 |
+
"name": "high school statistics",
|
120 |
+
"category": "STEM"
|
121 |
+
},
|
122 |
+
"high_school_us_history": {
|
123 |
+
"name": "high school us history",
|
124 |
+
"category": "Humanities"
|
125 |
+
},
|
126 |
+
"high_school_world_history": {
|
127 |
+
"name": "high school world history",
|
128 |
+
"category": "Humanities"
|
129 |
+
},
|
130 |
+
"human_aging": {
|
131 |
+
"name": "human aging",
|
132 |
+
"category": "Other"
|
133 |
+
},
|
134 |
+
"human_sexuality": {
|
135 |
+
"name": "human sexuality",
|
136 |
+
"category": "Social Sciences"
|
137 |
+
},
|
138 |
+
"international_law": {
|
139 |
+
"name": "international law",
|
140 |
+
"category": "Humanities"
|
141 |
+
},
|
142 |
+
"jurisprudence": {
|
143 |
+
"name": "jurisprudence",
|
144 |
+
"category": "Humanities"
|
145 |
+
},
|
146 |
+
"logical_fallacies": {
|
147 |
+
"name": "logical fallacies",
|
148 |
+
"category": "Humanities"
|
149 |
+
},
|
150 |
+
"machine_learning": {
|
151 |
+
"name": "machine learning",
|
152 |
+
"category": "STEM"
|
153 |
+
},
|
154 |
+
"management": {
|
155 |
+
"name": "management",
|
156 |
+
"category": "Other"
|
157 |
+
},
|
158 |
+
"marketing": {
|
159 |
+
"name": "marketing",
|
160 |
+
"category": "Other"
|
161 |
+
},
|
162 |
+
"medical_genetics": {
|
163 |
+
"name": "medical genetics",
|
164 |
+
"category": "Other"
|
165 |
+
},
|
166 |
+
"miscellaneous": {
|
167 |
+
"name": "miscellaneous",
|
168 |
+
"category": "Other"
|
169 |
+
},
|
170 |
+
"moral_disputes": {
|
171 |
+
"name": "moral disputes",
|
172 |
+
"category": "Humanities"
|
173 |
+
},
|
174 |
+
"moral_scenarios": {
|
175 |
+
"name": "moral scenarios",
|
176 |
+
"category": "Humanities"
|
177 |
+
},
|
178 |
+
"nutrition": {
|
179 |
+
"name": "nutrition",
|
180 |
+
"category": "Other"
|
181 |
+
},
|
182 |
+
"philosophy": {
|
183 |
+
"name": "philosophy",
|
184 |
+
"category": "Humanities"
|
185 |
+
},
|
186 |
+
"prehistory": {
|
187 |
+
"name": "prehistory",
|
188 |
+
"category": "Humanities"
|
189 |
+
},
|
190 |
+
"professional_accounting": {
|
191 |
+
"name": "professional accounting",
|
192 |
+
"category": "Other"
|
193 |
+
},
|
194 |
+
"professional_law": {
|
195 |
+
"name": "professional law",
|
196 |
+
"category": "Humanities"
|
197 |
+
},
|
198 |
+
"professional_medicine": {
|
199 |
+
"name": "professional medicine",
|
200 |
+
"category": "Other"
|
201 |
+
},
|
202 |
+
"professional_psychology": {
|
203 |
+
"name": "professional psychology",
|
204 |
+
"category": "Social Sciences"
|
205 |
+
},
|
206 |
+
"public_relations": {
|
207 |
+
"name": "public relations",
|
208 |
+
"category": "Social Sciences"
|
209 |
+
},
|
210 |
+
"security_studies": {
|
211 |
+
"name": "security studies",
|
212 |
+
"category": "Social Sciences"
|
213 |
+
},
|
214 |
+
"sociology": {
|
215 |
+
"name": "sociology",
|
216 |
+
"category": "Social Sciences"
|
217 |
+
},
|
218 |
+
"us_foreign_policy": {
|
219 |
+
"name": "us foreign policy",
|
220 |
+
"category": "Social Sciences"
|
221 |
+
},
|
222 |
+
"virology": {
|
223 |
+
"name": "virology",
|
224 |
+
"category": "Other"
|
225 |
+
},
|
226 |
+
"world_religions": {
|
227 |
+
"name": "world religions",
|
228 |
+
"category": "Humanities"
|
229 |
+
}
|
230 |
+
}
|