Nocare3 commited on
Commit
b7f6bed
·
verified ·
1 Parent(s): 2136ec8

Upload 7 files

Browse files
default/dataset_dict.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"splits": ["train"]}
default/train/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4c08dab25239e8891fca9e95e7ee00c3000a6bcd4128687362fff6bb6b8a88d
3
+ size 112304
default/train/dataset_info.json ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "parquet",
3
+ "citation": "",
4
+ "config_name": "expand_columns_0",
5
+ "dataset_name": "parquet",
6
+ "dataset_size": 110540,
7
+ "description": "",
8
+ "download_checksums": {
9
+ "C:/Users/Andi/.cache/distilabel/pipelines/embedding-queries/0beaa44a146caf40e3953a9ed6e6263c267f3c58/data/expand_columns_0/00001.parquet": {
10
+ "num_bytes": 8119,
11
+ "checksum": null
12
+ },
13
+ "C:/Users/Andi/.cache/distilabel/pipelines/embedding-queries/0beaa44a146caf40e3953a9ed6e6263c267f3c58/data/expand_columns_0/00002.parquet": {
14
+ "num_bytes": 13104,
15
+ "checksum": null
16
+ },
17
+ "C:/Users/Andi/.cache/distilabel/pipelines/embedding-queries/0beaa44a146caf40e3953a9ed6e6263c267f3c58/data/expand_columns_0/00003.parquet": {
18
+ "num_bytes": 15720,
19
+ "checksum": null
20
+ },
21
+ "C:/Users/Andi/.cache/distilabel/pipelines/embedding-queries/0beaa44a146caf40e3953a9ed6e6263c267f3c58/data/expand_columns_0/00004.parquet": {
22
+ "num_bytes": 12822,
23
+ "checksum": null
24
+ },
25
+ "C:/Users/Andi/.cache/distilabel/pipelines/embedding-queries/0beaa44a146caf40e3953a9ed6e6263c267f3c58/data/expand_columns_0/00005.parquet": {
26
+ "num_bytes": 13055,
27
+ "checksum": null
28
+ },
29
+ "C:/Users/Andi/.cache/distilabel/pipelines/embedding-queries/0beaa44a146caf40e3953a9ed6e6263c267f3c58/data/expand_columns_0/00006.parquet": {
30
+ "num_bytes": 4575,
31
+ "checksum": null
32
+ }
33
+ },
34
+ "download_size": 67395,
35
+ "features": {
36
+ "filename": {
37
+ "dtype": "string",
38
+ "_type": "Value"
39
+ },
40
+ "anchor": {
41
+ "dtype": "string",
42
+ "_type": "Value"
43
+ },
44
+ "repo_name": {
45
+ "dtype": "string",
46
+ "_type": "Value"
47
+ },
48
+ "positive": {
49
+ "dtype": "null",
50
+ "_type": "Value"
51
+ },
52
+ "negative": {
53
+ "dtype": "null",
54
+ "_type": "Value"
55
+ },
56
+ "distilabel_metadata": {
57
+ "raw_output_multiply_queries": {
58
+ "dtype": "null",
59
+ "_type": "Value"
60
+ }
61
+ },
62
+ "model_name_query": {
63
+ "dtype": "string",
64
+ "_type": "Value"
65
+ },
66
+ "queries": {
67
+ "dtype": "null",
68
+ "_type": "Value"
69
+ },
70
+ "model_name_query_multiplied": {
71
+ "dtype": "string",
72
+ "_type": "Value"
73
+ }
74
+ },
75
+ "homepage": "",
76
+ "license": "",
77
+ "size_in_bytes": 177935,
78
+ "splits": {
79
+ "train": {
80
+ "name": "train",
81
+ "num_bytes": 110540,
82
+ "num_examples": 211,
83
+ "dataset_name": "parquet"
84
+ }
85
+ },
86
+ "version": {
87
+ "version_str": "0.0.0",
88
+ "major": 0,
89
+ "minor": 0,
90
+ "patch": 0
91
+ }
92
+ }
default/train/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "629e334d460621ce",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": "train"
13
+ }
distiset_configs/README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ size_categories: n<1K
3
+ tags:
4
+ - synthetic
5
+ - distilabel
6
+ - rlaif
7
+ ---
8
+
9
+ <p align="left">
10
+ <a href="https://github.com/argilla-io/distilabel">
11
+ <img src="https://raw.githubusercontent.com/argilla-io/distilabel/main/docs/assets/distilabel-badge-light.png" alt="Built with Distilabel" width="200" height="32"/>
12
+ </a>
13
+ </p>
14
+
15
+ # Dataset Card for love2dapi_queries
16
+
17
+ This dataset has been created with [distilabel](https://distilabel.argilla.io/).
18
+
19
+ ## Dataset Summary
20
+
21
+ This dataset contains a `pipeline.yaml` which can be used to reproduce the pipeline that generated it in distilabel using the `distilabel` CLI:
22
+
23
+ ```console
24
+ distilabel pipeline run --config "https://huggingface.co/datasets/love2dapi_queries/raw/main/pipeline.yaml"
25
+ ```
26
+
27
+ or explore the configuration:
28
+
29
+ ```console
30
+ distilabel pipeline info --config "https://huggingface.co/datasets/love2dapi_queries/raw/main/pipeline.yaml"
31
+ ```
32
+
33
+ ## Dataset structure
34
+
35
+ The examples have the following structure per configuration:
36
+
37
+
38
+ <details><summary> Configuration: default </summary><hr>
39
+
40
+ ```json
41
+ {
42
+ "anchor": "description: Argilla is a collaboration platform for AI engineers and domain experts that require high-quality outputs, full data ownership, and overall efficiency.\nhide: navigation\n\nWelcome to Argilla\n\nArgilla is a collaboration platform for AI engineers and domain experts that require high-quality outputs, full data ownership, and overall efficiency.",
43
+ "distilabel_metadata": {
44
+ "raw_output_multiply_queries": null
45
+ },
46
+ "filename": "argilla-python/docs/index.md",
47
+ "model_name_query": "meta-llama/Meta-Llama-3-70B-Instruct",
48
+ "model_name_query_multiplied": "meta-llama/Meta-Llama-3-70B-Instruct",
49
+ "negative": null,
50
+ "positive": null,
51
+ "queries": null,
52
+ "repo_name": "argilla-io/argilla-python"
53
+ }
54
+ ```
55
+
56
+ This subset can be loaded as:
57
+
58
+ ```python
59
+ from datasets import load_dataset
60
+
61
+ ds = load_dataset("love2dapi_queries", "default")
62
+ ```
63
+
64
+ Or simply as it follows, since there's only one configuration and is named `default`:
65
+
66
+ ```python
67
+ from datasets import load_dataset
68
+
69
+ ds = load_dataset("love2dapi_queries")
70
+ ```
71
+
72
+
73
+ </details>
distiset_configs/pipeline.log ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2024-07-19 16:51:10] WARNING Since the `base_url=https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct` is available and either one of `model_id` or `endpoint_name` is also provided, the `base_url` will either be ignored or overwritten with the one generated from either of those args, for serverless or dedicated inference endpoints, respectively.
2
+ [2024-07-19 16:51:10] WARNING Since the `base_url=https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct` is available and either one of `model_id` or `endpoint_name` is also provided, the `base_url` will either be ignored or overwritten with the one generated from either of those args, for serverless or dedicated inference endpoints, respectively.
3
+ [2024-07-19 16:53:15] WARNING Since the `base_url=https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct` is available and either one of `model_id` or `endpoint_name` is also provided, the `base_url` will either be ignored or overwritten with the one generated from either of those args, for serverless or dedicated inference endpoints, respectively.
4
+ [2024-07-19 16:53:15] WARNING Since the `base_url=https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct` is available and either one of `model_id` or `endpoint_name` is also provided, the `base_url` will either be ignored or overwritten with the one generated from either of those args, for serverless or dedicated inference endpoints, respectively.
5
+ [2024-07-19 17:03:37] WARNING Since the `base_url=https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct` is available and either one of `model_id` or `endpoint_name` is also provided, the `base_url` will either be ignored or overwritten with the one generated from either of those args, for serverless or dedicated inference endpoints, respectively.
6
+ [2024-07-19 17:03:37] WARNING Since the `base_url=https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct` is available and either one of `model_id` or `endpoint_name` is also provided, the `base_url` will either be ignored or overwritten with the one generated from either of those args, for serverless or dedicated inference endpoints, respectively.
7
+ [2024-07-19 17:03:52] WARNING Task 'multiply_queries' failed to format output: 'NoneType' object has no attribute 'split'. Saving raw response.
8
+ [2024-07-19 17:03:52] WARNING Subprocess traceback:
9
+
10
+ Traceback (most recent call last):
11
+ File "C:\Users\Andi\Python projects\RAGTesting\.venv\Lib\site-packages\distilabel\pipeline\local.py", line 512, in _non_generator_process_loop
12
+ result = next(self.step.process_applying_mappings(*batch.data))
13
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
14
+ File "C:\Users\Andi\Python projects\RAGTesting\.venv\Lib\site-packages\distilabel\steps\base.py", line 512, in process_applying_mappings
15
+ for output_rows in generator:
16
+ File "C:\Users\Andi\Python projects\RAGTesting\.venv\Lib\site-packages\distilabel\steps\combine.py", line 119, in process
17
+ yield combine_dicts(
18
+ ^^^^^^^^^^^^^^
19
+ File "C:\Users\Andi\Python projects\RAGTesting\.venv\Lib\site-packages\distilabel\pipeline\utils.py", line 39, in combine_dicts
20
+ raise ValueError(
21
+ ValueError: The length of output_merge_keys must be the same as the length of merge_keys
22
+
23
+ [2024-07-19 17:03:52] WARNING Subprocess traceback:
24
+
25
+ Traceback (most recent call last):
26
+ File "C:\Users\Andi\Python projects\RAGTesting\.venv\Lib\site-packages\distilabel\pipeline\local.py", line 512, in _non_generator_process_loop
27
+ result = next(self.step.process_applying_mappings(*batch.data))
28
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
29
+ File "C:\Users\Andi\Python projects\RAGTesting\.venv\Lib\site-packages\distilabel\steps\base.py", line 512, in process_applying_mappings
30
+ for output_rows in generator:
31
+ File "C:\Users\Andi\Python projects\RAGTesting\.venv\Lib\site-packages\distilabel\steps\expand.py", line 111, in process
32
+ yield [row for input in inputs for row in self._expand_columns(input)]
33
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^
34
+ File "C:\Users\Andi\Python projects\RAGTesting\.venv\Lib\site-packages\distilabel\steps\expand.py", line 126, in _expand_columns
35
+ for item, expanded in zip_longest(*[data, expanded_rows], fillvalue=input):
36
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
37
+ TypeError: 'NoneType' object is not iterable
38
+
distiset_configs/pipeline.yaml ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ distilabel:
2
+ version: 1.2.2
3
+ pipeline:
4
+ connections:
5
+ - from: load_data
6
+ to:
7
+ - generate_sentence_pair
8
+ - from: generate_sentence_pair
9
+ to:
10
+ - multiply_queries
11
+ - from: multiply_queries
12
+ to:
13
+ - merge_columns
14
+ - from: merge_columns
15
+ to:
16
+ - expand_columns_0
17
+ - from: expand_columns_0
18
+ to: []
19
+ description: Generate queries to train a sentence embedding model.
20
+ name: embedding-queries
21
+ routing_batch_functions: []
22
+ steps:
23
+ - name: load_data
24
+ step:
25
+ batch_size: 10
26
+ config: null
27
+ input_mappings: {}
28
+ name: load_data
29
+ num_examples: null
30
+ output_mappings:
31
+ chunks: anchor
32
+ repo_id: Nocare3/love2dapi_chunks
33
+ runtime_parameters_info:
34
+ - description: The number of rows that will contain the batches generated by
35
+ the step.
36
+ name: batch_size
37
+ optional: true
38
+ - description: The Hugging Face Hub repository ID of the dataset to load.
39
+ name: repo_id
40
+ optional: false
41
+ - description: The split of the dataset to load. Defaults to 'train'.
42
+ name: split
43
+ optional: true
44
+ - description: The configuration of the dataset to load. This is optional and
45
+ only needed if the dataset has multiple configurations.
46
+ name: config
47
+ optional: true
48
+ - description: Whether to load the dataset in streaming mode or not. Defaults
49
+ to False.
50
+ name: streaming
51
+ optional: true
52
+ - description: The number of examples to load from the dataset. By default will
53
+ load all examples.
54
+ name: num_examples
55
+ optional: true
56
+ split: train
57
+ storage_options: null
58
+ streaming: false
59
+ type_info:
60
+ module: distilabel.steps.generators.huggingface
61
+ name: LoadDataFromHub
62
+ - name: generate_sentence_pair
63
+ step:
64
+ action: query
65
+ add_raw_output: true
66
+ context: The generated sentence has to be related with Love2d, a lua-code game
67
+ engine used mostly by indie developers.
68
+ group_generations: false
69
+ input_batch_size: 10
70
+ input_mappings: {}
71
+ llm:
72
+ base_url: null
73
+ endpoint_name: null
74
+ endpoint_namespace: null
75
+ generation_kwargs:
76
+ max_new_tokens: 512
77
+ temperature: 0.7
78
+ model_display_name: null
79
+ model_id: meta-llama/Meta-Llama-3-70B-Instruct
80
+ structured_output: null
81
+ tokenizer_id: meta-llama/Meta-Llama-3-70B-Instruct
82
+ type_info:
83
+ module: distilabel.llms.huggingface.inference_endpoints
84
+ name: InferenceEndpointsLLM
85
+ use_openai_client: false
86
+ name: generate_sentence_pair
87
+ num_generations: 1
88
+ output_mappings:
89
+ model_name: model_name_query
90
+ runtime_parameters_info:
91
+ - description: The number of rows that will contain the batches processed by
92
+ the step.
93
+ name: input_batch_size
94
+ optional: true
95
+ - name: llm
96
+ runtime_parameters_info:
97
+ - description: The kwargs to be propagated to either `generate` or `agenerate`
98
+ methods within each `LLM`.
99
+ keys:
100
+ - description: the maximum number of new tokens that the model will generate. Defaults
101
+ to `128`.
102
+ name: max_new_tokens
103
+ optional: true
104
+ - description: the repetition penalty to use for the generation. Defaults to
105
+ `0.0`. Only applies if `use_openai_client=True`.
106
+ name: frequency_penalty
107
+ optional: true
108
+ - description: the presence penalty to use for the generation. Defaults
109
+ to `0.0`. Only applies if `use_openai_client=True`.
110
+ name: presence_penalty
111
+ optional: true
112
+ - description: the repetition penalty to use for the generation. Defaults to
113
+ `None`. Only applies if `use_openai_client=False`.
114
+ name: repetition_penalty
115
+ optional: true
116
+ - description: the temperature to use for the generation. Defaults to `1.0`.
117
+ name: temperature
118
+ optional: true
119
+ - description: whether to use sampling for the generation. Defaults to `False`. Only
120
+ applies if `use_openai_client=False`.
121
+ name: do_sample
122
+ optional: true
123
+ - description: the top-k value to use for the generation. Defaults to `0.8`,
124
+ since neither `0.0` nor `1.0` are valid values in TGI.
125
+ name: top_k
126
+ optional: true
127
+ - description: the top-p value to use for the generation. Defaults to `1.0`.
128
+ name: top_p
129
+ optional: true
130
+ - description: the typical-p value to use for the generation. Defaults to
131
+ `0.5`.
132
+ name: typical_p
133
+ optional: true
134
+ - description: either a single string or a list of strings containing the
135
+ sequences to stop the generation at. Defaults to `None`, but will be
136
+ set to the `tokenizer.eos_token` if available.
137
+ name: stop_sequences
138
+ optional: true
139
+ - description: whether to return the full text of the completion or just
140
+ the generated text. Defaults to `False`, meaning that only the generated
141
+ text will be returned.
142
+ name: return_full_text
143
+ optional: true
144
+ - description: the seed to use for the generation. Defaults to `None`.
145
+ name: seed
146
+ optional: true
147
+ - description: whether to add the watermark to the generated text. Defaults
148
+ to `None`.
149
+ name: watermark
150
+ optional: true
151
+ name: generation_kwargs
152
+ - description: The name of the Inference Endpoint to use for the LLM.
153
+ name: endpoint_name
154
+ optional: true
155
+ - description: The namespace of the Inference Endpoint to use for the LLM.
156
+ name: endpoint_namespace
157
+ optional: true
158
+ - description: The base URL to use for the Inference Endpoints API requests.
159
+ name: base_url
160
+ optional: true
161
+ - description: The API key to authenticate the requests to the Inference Endpoints
162
+ API.
163
+ name: api_key
164
+ optional: true
165
+ - description: The structured output format to use across all the generations.
166
+ name: structured_output
167
+ optional: true
168
+ - description: Whether to include the raw output of the LLM in the key `raw_output_<TASK_NAME>`
169
+ of the `distilabel_metadata` dictionary output column
170
+ name: add_raw_output
171
+ optional: true
172
+ - description: The number of generations to be produced per input.
173
+ name: num_generations
174
+ optional: true
175
+ triplet: true
176
+ type_info:
177
+ module: distilabel.steps.tasks.sentence_transformers
178
+ name: GenerateSentencePair
179
+ - name: multiply_queries
180
+ step:
181
+ add_raw_output: true
182
+ group_generations: false
183
+ input_batch_size: 10
184
+ input_mappings:
185
+ query: positive
186
+ llm:
187
+ base_url: null
188
+ endpoint_name: null
189
+ endpoint_namespace: null
190
+ generation_kwargs:
191
+ max_new_tokens: 512
192
+ temperature: 0.7
193
+ model_display_name: null
194
+ model_id: meta-llama/Meta-Llama-3-70B-Instruct
195
+ structured_output: null
196
+ tokenizer_id: meta-llama/Meta-Llama-3-70B-Instruct
197
+ type_info:
198
+ module: distilabel.llms.huggingface.inference_endpoints
199
+ name: InferenceEndpointsLLM
200
+ use_openai_client: false
201
+ name: multiply_queries
202
+ num_generations: 1
203
+ num_queries: 3
204
+ output_mappings:
205
+ model_name: model_name_query_multiplied
206
+ runtime_parameters_info:
207
+ - description: The number of rows that will contain the batches processed by
208
+ the step.
209
+ name: input_batch_size
210
+ optional: true
211
+ - name: llm
212
+ runtime_parameters_info:
213
+ - description: The kwargs to be propagated to either `generate` or `agenerate`
214
+ methods within each `LLM`.
215
+ keys:
216
+ - description: the maximum number of new tokens that the model will generate. Defaults
217
+ to `128`.
218
+ name: max_new_tokens
219
+ optional: true
220
+ - description: the repetition penalty to use for the generation. Defaults to
221
+ `0.0`. Only applies if `use_openai_client=True`.
222
+ name: frequency_penalty
223
+ optional: true
224
+ - description: the presence penalty to use for the generation. Defaults
225
+ to `0.0`. Only applies if `use_openai_client=True`.
226
+ name: presence_penalty
227
+ optional: true
228
+ - description: the repetition penalty to use for the generation. Defaults to
229
+ `None`. Only applies if `use_openai_client=False`.
230
+ name: repetition_penalty
231
+ optional: true
232
+ - description: the temperature to use for the generation. Defaults to `1.0`.
233
+ name: temperature
234
+ optional: true
235
+ - description: whether to use sampling for the generation. Defaults to `False`. Only
236
+ applies if `use_openai_client=False`.
237
+ name: do_sample
238
+ optional: true
239
+ - description: the top-k value to use for the generation. Defaults to `0.8`,
240
+ since neither `0.0` nor `1.0` are valid values in TGI.
241
+ name: top_k
242
+ optional: true
243
+ - description: the top-p value to use for the generation. Defaults to `1.0`.
244
+ name: top_p
245
+ optional: true
246
+ - description: the typical-p value to use for the generation. Defaults to
247
+ `0.5`.
248
+ name: typical_p
249
+ optional: true
250
+ - description: either a single string or a list of strings containing the
251
+ sequences to stop the generation at. Defaults to `None`, but will be
252
+ set to the `tokenizer.eos_token` if available.
253
+ name: stop_sequences
254
+ optional: true
255
+ - description: whether to return the full text of the completion or just
256
+ the generated text. Defaults to `False`, meaning that only the generated
257
+ text will be returned.
258
+ name: return_full_text
259
+ optional: true
260
+ - description: the seed to use for the generation. Defaults to `None`.
261
+ name: seed
262
+ optional: true
263
+ - description: whether to add the watermark to the generated text. Defaults
264
+ to `None`.
265
+ name: watermark
266
+ optional: true
267
+ name: generation_kwargs
268
+ - description: The name of the Inference Endpoint to use for the LLM.
269
+ name: endpoint_name
270
+ optional: true
271
+ - description: The namespace of the Inference Endpoint to use for the LLM.
272
+ name: endpoint_namespace
273
+ optional: true
274
+ - description: The base URL to use for the Inference Endpoints API requests.
275
+ name: base_url
276
+ optional: true
277
+ - description: The API key to authenticate the requests to the Inference Endpoints
278
+ API.
279
+ name: api_key
280
+ optional: true
281
+ - description: The structured output format to use across all the generations.
282
+ name: structured_output
283
+ optional: true
284
+ - description: Whether to include the raw output of the LLM in the key `raw_output_<TASK_NAME>`
285
+ of the `distilabel_metadata` dictionary output column
286
+ name: add_raw_output
287
+ optional: true
288
+ - description: The number of generations to be produced per input.
289
+ name: num_generations
290
+ optional: true
291
+ system_prompt: You are an AI assistant helping to generate diverse examples.
292
+ Ensure the generated queries are all in separated lines and preceded by a
293
+ dash. Do not generate anything else or introduce the task.
294
+ type_info:
295
+ module: __main__
296
+ name: MultipleQueries
297
+ - name: merge_columns
298
+ step:
299
+ columns:
300
+ '0': positive
301
+ '1': queries
302
+ input_batch_size: 50
303
+ input_mappings: {}
304
+ name: merge_columns
305
+ output_columns:
306
+ '0': positive
307
+ output_mappings: {}
308
+ runtime_parameters_info:
309
+ - description: The number of rows that will contain the batches processed by
310
+ the step.
311
+ name: input_batch_size
312
+ optional: true
313
+ type_info:
314
+ module: distilabel.steps.combine
315
+ name: CombineColumns
316
+ - name: expand_columns_0
317
+ step:
318
+ columns:
319
+ positive: positive
320
+ input_batch_size: 50
321
+ input_mappings: {}
322
+ name: expand_columns_0
323
+ output_mappings: {}
324
+ runtime_parameters_info:
325
+ - description: The number of rows that will contain the batches processed by
326
+ the step.
327
+ name: input_batch_size
328
+ optional: true
329
+ type_info:
330
+ module: distilabel.steps.expand
331
+ name: ExpandColumns
332
+ type_info:
333
+ module: distilabel.pipeline.local
334
+ name: Pipeline