datasetId
large_stringlengths 6
116
| author
large_stringlengths 2
42
| last_modified
large_stringdate 2021-04-29 15:34:29
2025-06-07 04:14:30
| downloads
int64 0
3.97M
| likes
int64 0
7.74k
| tags
large listlengths 1
7.92k
| task_categories
large listlengths 0
48
| createdAt
large_stringdate 2022-03-02 23:29:22
2025-06-07 04:11:40
| trending_score
float64 0
40
⌀ | card
large_stringlengths 31
1.01M
|
---|---|---|---|---|---|---|---|---|---|
supergoose/flan_combined_task409_mickey_nl_sentence_perturbation_generation | supergoose | 2025-03-10T14:29:12Z | 15 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:29:11Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 15153632
num_examples: 19360
download_size: 3172165
dataset_size: 15153632
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task230_iirc_passage_classification | supergoose | 2025-03-10T14:29:11Z | 16 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:29:07Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 152903945
num_examples: 19456
download_size: 82389647
dataset_size: 152903945
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1392_superglue_multirc_answer_verification | supergoose | 2025-03-10T14:29:06Z | 15 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:29:03Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 68074536
num_examples: 19425
download_size: 34690044
dataset_size: 68074536
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task761_app_review_classification | supergoose | 2025-03-10T14:29:05Z | 13 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:29:03Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 496411
num_examples: 891
download_size: 115519
dataset_size: 496411
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task829_giga_fren_translation | supergoose | 2025-03-10T14:29:04Z | 15 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:29:02Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 1610890
num_examples: 2974
download_size: 569279
dataset_size: 1610890
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task990_pib_translation_urdu_marathi | supergoose | 2025-03-10T14:29:02Z | 14 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:29:01Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 6134343
num_examples: 4476
download_size: 2175164
dataset_size: 6134343
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task989_pib_translation_marathi_urdu | supergoose | 2025-03-10T14:29:02Z | 14 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:29:01Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 6023560
num_examples: 4473
download_size: 2147663
dataset_size: 6023560
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task282_scruples_event_time | supergoose | 2025-03-10T14:29:00Z | 13 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:58Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 11809176
num_examples: 3321
download_size: 5936773
dataset_size: 11809176
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1691_qed_amara_translation | supergoose | 2025-03-10T14:28:58Z | 15 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:56Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 10669757
num_examples: 19052
download_size: 4175367
dataset_size: 10669757
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1205_atomic_classification_isafter | supergoose | 2025-03-10T14:28:58Z | 19 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:57Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 15938094
num_examples: 19417
download_size: 2179868
dataset_size: 15938094
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task982_pib_translation_tamil_bengali | supergoose | 2025-03-10T14:28:58Z | 14 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:57Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 7551879
num_examples: 4478
download_size: 2283593
dataset_size: 7551879
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task335_hateeval_classification_aggresive_en | supergoose | 2025-03-10T14:28:57Z | 15 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:56Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 7110302
num_examples: 8969
download_size: 1958431
dataset_size: 7110302
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task576_curiosity_dialogs_answer_generation | supergoose | 2025-03-10T14:28:56Z | 16 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:53Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 25534672
num_examples: 19345
download_size: 9623139
dataset_size: 25534672
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1239_ted_translation_gl_ja | supergoose | 2025-03-10T14:28:53Z | 13 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:51Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 11898185
num_examples: 19131
download_size: 4899315
dataset_size: 11898185
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task475_yelp_polarity_classification | supergoose | 2025-03-10T14:28:52Z | 13 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:49Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 32991842
num_examples: 19454
download_size: 16303101
dataset_size: 32991842
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task449_opus_paracrawl_ig_en_translation | supergoose | 2025-03-10T14:28:51Z | 16 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:49Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 12484316
num_examples: 17779
download_size: 4913480
dataset_size: 12484316
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1490_bengali_personal_hate_speech_binary_classification | supergoose | 2025-03-10T14:28:49Z | 14 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:47Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 17325314
num_examples: 17031
download_size: 4009474
dataset_size: 17325314
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1032_pib_translation_telugu_bengali | supergoose | 2025-03-10T14:28:48Z | 14 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:47Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 4479236
num_examples: 2730
download_size: 1426201
dataset_size: 4479236
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1068_pib_translation_gujarati_bengali | supergoose | 2025-03-10T14:28:46Z | 15 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:45Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 8429488
num_examples: 4478
download_size: 2391814
dataset_size: 8429488
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task998_pib_translation_oriya_bengali | supergoose | 2025-03-10T14:28:45Z | 14 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:43Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 2740103
num_examples: 1452
download_size: 859008
dataset_size: 2740103
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1625_disfl_qa_asnwer_generation | supergoose | 2025-03-10T14:28:45Z | 15 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:42Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 11380438
num_examples: 4494
download_size: 4598630
dataset_size: 11380438
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1413_dart_object_identification | supergoose | 2025-03-10T14:28:44Z | 16 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:43Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 5301366
num_examples: 8151
download_size: 1016633
dataset_size: 5301366
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task437_alt_en_ja_answer_generation | supergoose | 2025-03-10T14:28:43Z | 54 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:41Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 21563232
num_examples: 19384
download_size: 8445651
dataset_size: 21563232
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task444_com_qa_question_paraphrases_answer_generation | supergoose | 2025-03-10T14:28:38Z | 17 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:37Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 2589154
num_examples: 4681
download_size: 504650
dataset_size: 2589154
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task479_cls_german_books_classification | supergoose | 2025-03-10T14:28:38Z | 17 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:37Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 13106427
num_examples: 5976
download_size: 6275927
dataset_size: 13106427
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1361_movierationales_classification | supergoose | 2025-03-10T14:28:38Z | 16 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:36Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 48669065
num_examples: 5973
download_size: 26607913
dataset_size: 48669065
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1431_head_qa_answer_generation | supergoose | 2025-03-10T14:28:37Z | 17 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:36Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 7985939
num_examples: 7563
download_size: 2973201
dataset_size: 7985939
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1120_alt_ja_fil_answer_generation | supergoose | 2025-03-10T14:28:36Z | 17 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:34Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 22256292
num_examples: 19396
download_size: 8824038
dataset_size: 22256292
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task621_ohsumed_yes_no_numerical_answer_generation | supergoose | 2025-03-10T14:28:33Z | 17 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:32Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 8827959
num_examples: 3400
download_size: 3667485
dataset_size: 8827959
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1229_ted_translation_es_he | supergoose | 2025-03-10T14:28:33Z | 16 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:32Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 13212180
num_examples: 19190
download_size: 5310312
dataset_size: 13212180
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task683_online_privacy_policy_text_purpose_answer_generation | supergoose | 2025-03-10T14:28:31Z | 15 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:30Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 6902215
num_examples: 9214
download_size: 913343
dataset_size: 6902215
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task265_paper_reviews_language_identification | supergoose | 2025-03-10T14:28:30Z | 15 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:28Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 6553898
num_examples: 2263
download_size: 3280279
dataset_size: 6553898
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1237_ted_translation_he_ar | supergoose | 2025-03-10T14:28:29Z | 15 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:28Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 15719947
num_examples: 19216
download_size: 5922629
dataset_size: 15719947
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1540_parsed_pdfs_summarization | supergoose | 2025-03-10T14:28:28Z | 14 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:26Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 23172232
num_examples: 8976
download_size: 10560966
dataset_size: 23172232
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1489_sarcasmdetection_tweet_classification | supergoose | 2025-03-10T14:28:26Z | 14 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:25Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 249923
num_examples: 354
download_size: 48488
dataset_size: 249923
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task950_wiki_cloze_mr_multiple_choice_question_answering | supergoose | 2025-03-10T14:28:25Z | 17 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:23Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 53358106
num_examples: 19435
download_size: 17299047
dataset_size: 53358106
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task867_mawps_multiop_question_answering | supergoose | 2025-03-10T14:28:24Z | 15 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:23Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 1752871
num_examples: 2331
download_size: 415110
dataset_size: 1752871
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task985_pib_translation_hindi_oriya | supergoose | 2025-03-10T14:28:22Z | 13 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:21Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 9128466
num_examples: 4479
download_size: 2940802
dataset_size: 9128466
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1623_disfl_qa_disfluent_question_classification | supergoose | 2025-03-10T14:28:21Z | 14 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:19Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 2635011
num_examples: 4418
download_size: 517313
dataset_size: 2635011
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task1399_obqa_answer_generation | supergoose | 2025-03-10T14:28:20Z | 15 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:18Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 3704156
num_examples: 5287
download_size: 948643
dataset_size: 3704156
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task312_europarl_sv_en_translation | supergoose | 2025-03-10T14:28:19Z | 42 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:17Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 17948911
num_examples: 19434
download_size: 6995581
dataset_size: 17948911
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task637_extract_and_sort_unique_digits_in_a_list | supergoose | 2025-03-10T14:28:17Z | 15 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:15Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 9760708
num_examples: 19414
download_size: 1625709
dataset_size: 9760708
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task453_swag_answer_generation | supergoose | 2025-03-10T14:28:15Z | 14 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:13Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 10590511
num_examples: 19437
download_size: 3153045
dataset_size: 10590511
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task348_squad2_0_unanswerable_question_generation | supergoose | 2025-03-10T14:28:14Z | 16 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:11Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 51062836
num_examples: 19456
download_size: 19689076
dataset_size: 51062836
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
supergoose/flan_combined_task774_pawsx_german_text_modification | supergoose | 2025-03-10T14:28:11Z | 13 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:28:10Z | null | ---
dataset_info:
features:
- name: inputs
dtype: string
- name: targets
dtype: string
- name: _template_idx
dtype: int64
- name: _task_source
dtype: string
- name: _task_name
dtype: string
- name: _template_type
dtype: string
splits:
- name: train
num_bytes: 592930
num_examples: 750
download_size: 165702
dataset_size: 592930
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Shaibalini/deepfake_mfcc_iitm | Shaibalini | 2025-03-10T14:23:50Z | 8 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:12:42Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: id
dtype: string
- name: language
dtype: string
- name: is_tts
dtype: int64
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: mfcc_0
dtype: float64
- name: mfcc_1
dtype: float64
- name: mfcc_2
dtype: float64
- name: mfcc_3
dtype: float64
- name: mfcc_4
dtype: float64
- name: mfcc_5
dtype: float64
- name: mfcc_6
dtype: float64
- name: mfcc_7
dtype: float64
- name: mfcc_8
dtype: float64
- name: mfcc_9
dtype: float64
- name: mfcc_10
dtype: float64
- name: mfcc_11
dtype: float64
- name: mfcc_12
dtype: float64
- name: mfcc_13
dtype: float64
- name: mfcc_14
dtype: float64
- name: mfcc_15
dtype: float64
- name: mfcc_16
dtype: float64
- name: mfcc_17
dtype: float64
- name: mfcc_18
dtype: float64
- name: mfcc_19
dtype: float64
splits:
- name: train
num_bytes: 6676793061.74
num_examples: 31102
- name: test
num_bytes: 548667607.45
num_examples: 2635
download_size: 6758750520
dataset_size: 7225460669.19
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_7998f053-2f45-40d9-b53b-67e2b9703349 | argilla-internal-testing | 2025-03-10T14:09:07Z | 15 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:09:05Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_87c09330-89c5-449a-9460-5ea17d8d490b | argilla-internal-testing | 2025-03-10T14:08:59Z | 14 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:08:57Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_376c82ee-6e6c-4830-8106-a9fe0f1c405b | argilla-internal-testing | 2025-03-10T14:08:49Z | 17 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:08:48Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Asap7772/metamath-hint-v5-qwen-32B-base-gen__2250_4500 | Asap7772 | 2025-03-10T14:07:32Z | 10 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-05T11:54:04Z | null | ---
dataset_info:
features:
- name: hint_chosen
dtype: string
- name: hint_completion
sequence: string
- name: hint_completion_answer
sequence: string
- name: hint_completion_correct
sequence: bool
- name: hint_completion_succ_rate
dtype: float64
- name: problem
dtype: string
- name: answer
dtype: string
- name: completion
sequence: string
- name: completion_answer
sequence: string
- name: completion_correct
sequence: bool
- name: completion_succ_rate
dtype: float64
- name: domain
dtype: string
- name: context
dtype: string
- name: hint1
dtype: string
- name: hint2
dtype: string
- name: hint3
dtype: string
- name: hint4
dtype: string
- name: hint5
dtype: string
- name: hint6
dtype: string
- name: hint7
dtype: string
- name: hint8
dtype: string
- name: hint9
dtype: string
- name: hint10
dtype: string
splits:
- name: train
num_bytes: 2590143215
num_examples: 19250
download_size: 641321956
dataset_size: 2590143215
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Lansechen/bs17k_collection_filtered_easy_maxlength600 | Lansechen | 2025-03-10T14:04:30Z | 15 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T14:04:12Z | null | ---
dataset_info:
features:
- name: system
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: question
dtype: string
- name: solution
dtype: string
- name: r1_thinking
dtype: string
- name: cot_type
dtype: string
- name: source_type
dtype: string
- name: metadata
dtype: string
- name: cot
dtype: 'null'
- name: qwen7b_answer
dtype: string
- name: grade_reason
dtype: string
- name: qwen7b_answer_token_length
dtype: int64
- name: isqwen7bcorrect
dtype: bool
splits:
- name: train
num_bytes: 200975473.13140726
num_examples: 2424
download_size: 155758716
dataset_size: 200975473.13140726
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
cpratikaki/RSVQA-HR_qwen_finetuning | cpratikaki | 2025-03-10T13:59:33Z | 163 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T13:28:33Z | null | ---
dataset_info:
features:
- name: image
dtype: image
- name: question
dtype: string
- name: answer
dtype: string
splits:
- name: train
num_bytes: 281493313076.0
num_examples: 357844
download_size: 5715394407
dataset_size: 281493313076.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
dvilasuero/jailbreak-classification-r1 | dvilasuero | 2025-03-10T13:56:57Z | 21 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T13:56:54Z | null | ---
dataset_info:
features:
- name: type
dtype: string
- name: prompt
dtype: string
- name: r1
dtype: string
splits:
- name: train
num_bytes: 339163
num_examples: 100
download_size: 180351
dataset_size: 339163
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
dvilasuero/jailbreak-classification-reasoning | dvilasuero | 2025-03-10T13:53:55Z | 21 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T13:37:43Z | null | ---
dataset_info:
features:
- name: type
dtype: string
- name: prompt
dtype: string
- name: qwq32
dtype: string
splits:
- name: train
num_bytes: 353750
num_examples: 100
download_size: 187319
dataset_size: 353750
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Kyleyee/train_data_tldr_gpm_8dim | Kyleyee | 2025-03-10T13:44:07Z | 15 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T13:44:02Z | null | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: chosen
dtype: string
- name: rejected
dtype: string
- name: a_1
dtype: string
- name: a_2
dtype: string
- name: chosen_preference
dtype: float64
- name: rejected_preference
dtype: float64
- name: a_1_preference
dtype: float64
- name: a_2_preference
dtype: float64
splits:
- name: train
num_bytes: 189046807
num_examples: 92858
- name: test
num_bytes: 1996309
num_examples: 1000
download_size: 40583581
dataset_size: 191043116
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
---
|
anthonyyazdaniml/Bespoke-Stratos-17k-with-val | anthonyyazdaniml | 2025-03-10T13:38:37Z | 18 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T13:38:23Z | null | ---
dataset_info:
features:
- name: system
dtype: string
- name: conversations
list:
- name: from
dtype: string
- name: value
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
splits:
- name: train
num_bytes: 536267856.6
num_examples: 14949
- name: validation
num_bytes: 59585317.4
num_examples: 1661
- name: test
num_bytes: 3611794
num_examples: 100
download_size: 253201173
dataset_size: 599464968.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
Midsummra/bilibilicomment | Midsummra | 2025-03-10T13:29:52Z | 82 | 5 | [
"task_categories:text-generation",
"task_categories:question-answering",
"language:zh",
"license:agpl-3.0",
"size_categories:1M<n<10M",
"format:csv",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"Bilibili"
] | [
"text-generation",
"question-answering"
] | 2025-03-10T13:04:40Z | null | ---
license: agpl-3.0
task_categories:
- text-generation
- question-answering
language:
- zh
tags:
- Bilibili
pretty_name: w
size_categories:
- 1M<n<10M
---
Bilibili评论区语料(2023年)
在2023年爬的b站评论区语料
不间断地从b站各个分区爬取的热门视频评论,共计500W条左右
没有经过任何清洗,所以数据比较脏
由于游戏区全是原神,所以不可避免会有很多原神有关的评论,如果不经清洗直接用来训练生成式模型或者对话模型可能会有很严重的biaes(模型一直在输出原神相关内容,,,)
有些datapoint中存在 “回复 @XXX :” ,表示该评论是对上一个datapoint的回复 |
nduque/eval_act_cam_setup3_last | nduque | 2025-03-10T13:28:55Z | 27 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:1K<n<10K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"tutorial"
] | [
"robotics"
] | 2025-03-10T13:28:47Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "koch",
"total_episodes": 10,
"total_frames": 4250,
"total_tasks": 1,
"total_videos": 20,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:10"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.front": {
"dtype": "video",
"shape": [
720,
1280,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 720,
"video.width": 1280,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.above": {
"dtype": "video",
"shape": [
720,
1280,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 720,
"video.width": 1280,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_48b3494c-ba6d-49cf-86ca-a794ac9bb643 | argilla-internal-testing | 2025-03-10T13:21:03Z | 14 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T13:21:02Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_96e00bcd-c7e0-4147-b905-ba9b81ce174d | argilla-internal-testing | 2025-03-10T13:20:37Z | 15 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T13:20:36Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_1fa3167d-1b08-446a-8384-517a216c235c | argilla-internal-testing | 2025-03-10T13:15:40Z | 17 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T13:15:39Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_aa5cdb38-4e0b-4801-91fa-c0ea827fae7f | argilla-internal-testing | 2025-03-10T13:15:34Z | 14 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T13:15:32Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
OALL/details_TIGER-Lab__Qwen2.5-32B-Instruct-CFT_v2_alrage | OALL | 2025-03-10T13:05:43Z | 12 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T13:05:38Z | null | ---
pretty_name: Evaluation run of TIGER-Lab/Qwen2.5-32B-Instruct-CFT
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [TIGER-Lab/Qwen2.5-32B-Instruct-CFT](https://huggingface.co/TIGER-Lab/Qwen2.5-32B-Instruct-CFT).\n\
\nThe dataset is composed of 1 configuration, each one coresponding to one of the\
\ evaluated task.\n\nThe dataset has been created from 1 run(s). Each run can be\
\ found as a specific split in each configuration, the split being named using the\
\ timestamp of the run.The \"train\" split is always pointing to the latest results.\n\
\nAn additional configuration \"results\" store all the aggregated results of the\
\ run.\n\nTo load the details from a run, you can for instance do the following:\n\
```python\nfrom datasets import load_dataset\ndata = load_dataset(\"OALL/details_TIGER-Lab__Qwen2.5-32B-Instruct-CFT_v2_alrage\"\
,\n\t\"results\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the\
\ [latest results from run 2025-03-10T13:05:30.433287](https://huggingface.co/datasets/OALL/details_TIGER-Lab__Qwen2.5-32B-Instruct-CFT_v2_alrage/blob/main/results_2025-03-10T13-05-30.433287.json)(note\
\ that their might be results for other tasks in the repos if successive evals didn't\
\ cover the same tasks. You find each in the results and the \"latest\" split for\
\ each eval):\n\n```python\n{\n \"all\": {\n \"llm_as_judge\": 0.7927825261158515,\n\
\ \"llm_as_judge_stderr\": 7.740640896667096e-05\n },\n \"community|alrage_qa|0\"\
: {\n \"llm_as_judge\": 0.7927825261158515,\n \"llm_as_judge_stderr\"\
: 7.740640896667096e-05\n }\n}\n```"
repo_url: https://huggingface.co/TIGER-Lab/Qwen2.5-32B-Instruct-CFT
configs:
- config_name: community_alrage_qa_0
data_files:
- split: 2025_03_10T13_05_30.433287
path:
- '**/details_community|alrage_qa|0_2025-03-10T13-05-30.433287.parquet'
- split: latest
path:
- '**/details_community|alrage_qa|0_2025-03-10T13-05-30.433287.parquet'
- config_name: results
data_files:
- split: 2025_03_10T13_05_30.433287
path:
- results_2025-03-10T13-05-30.433287.parquet
- split: latest
path:
- results_2025-03-10T13-05-30.433287.parquet
---
# Dataset Card for Evaluation run of TIGER-Lab/Qwen2.5-32B-Instruct-CFT
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [TIGER-Lab/Qwen2.5-32B-Instruct-CFT](https://huggingface.co/TIGER-Lab/Qwen2.5-32B-Instruct-CFT).
The dataset is composed of 1 configuration, each one coresponding to one of the evaluated task.
The dataset has been created from 1 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset("OALL/details_TIGER-Lab__Qwen2.5-32B-Instruct-CFT_v2_alrage",
"results",
split="train")
```
## Latest results
These are the [latest results from run 2025-03-10T13:05:30.433287](https://huggingface.co/datasets/OALL/details_TIGER-Lab__Qwen2.5-32B-Instruct-CFT_v2_alrage/blob/main/results_2025-03-10T13-05-30.433287.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"llm_as_judge": 0.7927825261158515,
"llm_as_judge_stderr": 7.740640896667096e-05
},
"community|alrage_qa|0": {
"llm_as_judge": 0.7927825261158515,
"llm_as_judge_stderr": 7.740640896667096e-05
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
ErikaaWang/truthfulqa_mc_custom_split | ErikaaWang | 2025-03-10T12:51:06Z | 15 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T12:51:01Z | null | ---
dataset_info:
features:
- name: question
dtype: string
- name: mc1_targets
struct:
- name: choices
sequence: string
- name: labels
sequence: int64
- name: mc2_targets
struct:
- name: choices
sequence: string
- name: labels
sequence: int64
splits:
- name: train
num_bytes: 392537
num_examples: 490
- name: validation
num_bytes: 127038
num_examples: 163
- name: test
num_bytes: 129491
num_examples: 164
download_size: 284207
dataset_size: 649066
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
Metaskepsis/Olympiads_hard | Metaskepsis | 2025-03-10T12:46:39Z | 31 | 0 | [
"task_categories:text-generation",
"annotations_creators:expert-generated",
"language_creators:expert-generated",
"multilinguality:monolingual",
"source_datasets:AI-MO/NuminaMath-CoT",
"language:en",
"license:mit",
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"mathematics",
"olympiads",
"problem-solving",
"latex",
"mathematical-reasoning",
"math-word-problems",
"olympiad-math"
] | [
"text-generation",
"mathematical-reasoning"
] | 2025-01-12T17:37:53Z | null | ---
annotations_creators:
- expert-generated
language:
- en
language_creators:
- expert-generated
license: mit
multilinguality:
- monolingual
pretty_name: Numina-Olympiads
size_categories:
- 1K<n<10K
source_datasets:
- AI-MO/NuminaMath-CoT
task_categories:
- text-generation
- mathematical-reasoning
task_ids:
- math-word-problems
- olympiad-math
paperswithcode_id: numina-olympiads
tags:
- mathematics
- olympiads
- problem-solving
- latex
- mathematical-reasoning
- math-word-problems
- olympiad-math
metrics:
- name: filtered_ratio
type: ratio
value: 0.995
description: Ratio of filtered dataset size to original dataset size
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
dataset_info:
features:
- name: id
dtype: int64
- name: problem
dtype: string
- name: solution
dtype: string
- name: source
dtype: string
- name: answer
dtype: string
- name: numeric_value
dtype: float64
splits:
- name: train
num_bytes: 51845980
num_examples: 20672
download_size: 25097226
dataset_size: 51845980
---
# Numina-Olympiads
Filtered NuminaMath-CoT dataset containing only olympiads problems with valid answers.
## Dataset Information
- Split: train
- Original size: 21525
- Filtered size: 21408
- Source: olympiads
- All examples contain valid boxed answers
## Dataset Description
This dataset is a filtered version of the NuminaMath-CoT dataset, containing only problems from olympiad sources that have valid boxed answers. Each example includes:
- A mathematical word problem
- A detailed solution with step-by-step reasoning
- A boxed final answer in LaTeX format
## Usage
The dataset is particularly useful for:
- Training and evaluating math problem-solving models
- Studying olympiad-style mathematical reasoning
- Testing model capabilities on complex word problems
|
IoanaLivia/horoscope_standard_A_400_19_20_5_03 | IoanaLivia | 2025-03-10T12:46:32Z | 20 | 0 | [
"language:ro",
"region:us"
] | [] | 2025-03-08T17:35:31Z | null | ---
language:
- ro
pretty_name: Horoscope_ProTV_400_ro-RO_Standard_A
---
Horoscope audio syntetic dataset [small]
* Source: (ProTV)
* Labeled: yes
* Language: Romanian
* Voice: ro-RO_Standard_A (Google)
* Train : 400 samples(Minimum duration: 3.47 seconds ; Maximum duration: 29.57 seconds) |
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_bedb049a-6b61-46ed-95ab-a97e593326e7 | argilla-internal-testing | 2025-03-10T12:43:31Z | 15 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T12:43:30Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_4127218b-c28b-48f5-b9f7-092fab129e68 | argilla-internal-testing | 2025-03-10T12:43:21Z | 16 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T12:43:20Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Metaskepsis/Numina_very_hard | Metaskepsis | 2025-03-10T12:42:41Z | 23 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-01-22T14:00:53Z | null | ---
dataset_info:
features:
- name: id
dtype: int64
- name: problem
dtype: string
- name: solution
dtype: string
- name: source
dtype: string
- name: answer
dtype: string
- name: numeric_value
dtype: float64
splits:
- name: train
num_bytes: 114349899
num_examples: 65397
download_size: 58255791
dataset_size: 114349899
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
sleeping-ai/yt-data | sleeping-ai | 2025-03-10T12:38:14Z | 25 | 1 | [
"license:cc-by-nd-4.0",
"region:us"
] | [] | 2025-03-10T12:35:41Z | null | ---
license: cc-by-nd-4.0
---
|
SnehaPriyaaMP/GGUF_TestData | SnehaPriyaaMP | 2025-03-10T12:35:16Z | 19 | 0 | [
"license:llama3",
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T12:33:32Z | null | ---
license: llama3
dataset_info:
features:
- name: prompt
dtype: string
splits:
- name: train
num_bytes: 117470
num_examples: 96
download_size: 28865
dataset_size: 117470
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
harmomy/DocumentaryScript | harmomy | 2025-03-10T12:34:11Z | 14 | 1 | [
"language:zh",
"license:mit",
"size_categories:1K<n<10K",
"format:json",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us",
"music"
] | [] | 2025-03-10T12:30:42Z | null | ---
license: mit
language:
- zh
tags:
- music
size_categories:
- 10K<n<100K
---
# Dataset Card for Dataset Name
<!-- Provide a quick summary of the dataset. -->
This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1).
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
minxu555/test | minxu555 | 2025-03-10T12:34:05Z | 14 | 0 | [
"license:apache-2.0",
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T12:24:48Z | null | ---
license: apache-2.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
dataset_info:
features:
- name: file_name
dtype: string
- name: image
dtype: image
- name: caption_sd15
dtype: string
- name: caption_flux
dtype: string
- name: caption_sdxl
dtype: string
- name: caption_sd3
dtype: string
splits:
- name: train
num_bytes: 783852.0
num_examples: 2
download_size: 795356
dataset_size: 783852.0
---
|
gopika13/answer_scripts | gopika13 | 2025-03-10T12:19:23Z | 84 | 0 | [
"language:en",
"region:us",
"images",
"OCR",
"handwritten-code"
] | [] | 2025-03-10T06:21:10Z | null | ---
tags:
- images
- OCR
- handwritten-code
language: en
dataset_info:
features:
image: image
code: string
---
# Answer Scripts Dataset
This dataset contains handwritten answer scripts along with extracte code text.
## Structure
- `images/`: Contains scanned answer sheets.
- `annotations.parquet`: Contains corresponding text for each image.
## Usage
```python
from datasets import load_dataset
dataset = load_dataset("gopika13/answer_scripts")
print(dataset) |
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_0fa76259-e911-4ea4-840c-11c74004fc02 | argilla-internal-testing | 2025-03-10T12:18:21Z | 17 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T12:18:19Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_024197c0-9e00-4e82-916f-c4a0a22ba3df | argilla-internal-testing | 2025-03-10T12:18:16Z | 15 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T12:18:16Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
SaylorTwift/details_HuggingFaceTB__SmolLM-1.7B_private | SaylorTwift | 2025-03-10T12:17:26Z | 12 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-18T13:52:35Z | null | ---
pretty_name: Evaluation run of HuggingFaceTB/SmolLM-1.7B
dataset_summary: "Dataset automatically created during the evaluation run of model\
\ [HuggingFaceTB/SmolLM-1.7B](https://huggingface.co/HuggingFaceTB/SmolLM-1.7B).\n\
\nThe dataset is composed of 3 configuration, each one coresponding to one of the\
\ evaluated task.\n\nThe dataset has been created from 5 run(s). Each run can be\
\ found as a specific split in each configuration, the split being named using the\
\ timestamp of the run.The \"train\" split is always pointing to the latest results.\n\
\nAn additional configuration \"results\" store all the aggregated results of the\
\ run.\n\nTo load the details from a run, you can for instance do the following:\n\
```python\nfrom datasets import load_dataset\ndata = load_dataset(\"SaylorTwift/details_HuggingFaceTB__SmolLM-1.7B_private\"\
,\n\t\"results\",\n\tsplit=\"train\")\n```\n\n## Latest results\n\nThese are the\
\ [latest results from run 2025-03-10T12:17:22.177520](https://huggingface.co/datasets/SaylorTwift/details_HuggingFaceTB__SmolLM-1.7B_private/blob/main/results_2025-03-10T12-17-22.177520.json)(note\
\ that their might be results for other tasks in the repos if successive evals didn't\
\ cover the same tasks. You find each in the results and the \"latest\" split for\
\ each eval):\n\n```python\n{\n \"all\": {\n \"prompt_level_strict_acc\"\
: 0.0,\n \"inst_level_strict_acc\": 0.0,\n \"prompt_level_loose_acc\"\
: 0.0,\n \"inst_level_loose_acc\": 0.0\n },\n \"extended|ifeval|0\"\
: {\n \"prompt_level_strict_acc\": 0.0,\n \"inst_level_strict_acc\"\
: 0.0,\n \"prompt_level_loose_acc\": 0.0,\n \"inst_level_loose_acc\"\
: 0.0\n }\n}\n```"
repo_url: https://huggingface.co/HuggingFaceTB/SmolLM-1.7B
configs:
- config_name: extended_ifeval_0
data_files:
- split: 2025_03_10T12_17_22.177520
path:
- '**/details_extended|ifeval|0_2025-03-10T12-17-22.177520.parquet'
- split: latest
path:
- '**/details_extended|ifeval|0_2025-03-10T12-17-22.177520.parquet'
- config_name: leaderboard_truthfulqa_mc_0
data_files:
- split: 2025_02_18T14_09_20.214887
path:
- '**/details_leaderboard|truthfulqa:mc|0_2025-02-18T14-09-20.214887.parquet'
- split: latest
path:
- '**/details_leaderboard|truthfulqa:mc|0_2025-02-18T14-09-20.214887.parquet'
- config_name: lighteval_gsm8k_0
data_files:
- split: 2025_02_18T13_52_35.588572
path:
- '**/details_lighteval|gsm8k|0_2025-02-18T13-52-35.588572.parquet'
- split: 2025_02_18T13_58_38.100145
path:
- '**/details_lighteval|gsm8k|0_2025-02-18T13-58-38.100145.parquet'
- split: 2025_02_18T14_02_44.455918
path:
- '**/details_lighteval|gsm8k|0_2025-02-18T14-02-44.455918.parquet'
- split: latest
path:
- '**/details_lighteval|gsm8k|0_2025-02-18T14-02-44.455918.parquet'
- config_name: results
data_files:
- split: 2025_02_18T13_52_35.588572
path:
- results_2025-02-18T13-52-35.588572.parquet
- split: 2025_02_18T13_58_38.100145
path:
- results_2025-02-18T13-58-38.100145.parquet
- split: 2025_02_18T14_02_44.455918
path:
- results_2025-02-18T14-02-44.455918.parquet
- split: 2025_02_18T14_09_20.214887
path:
- results_2025-02-18T14-09-20.214887.parquet
- split: 2025_03_10T12_17_22.177520
path:
- results_2025-03-10T12-17-22.177520.parquet
- split: latest
path:
- results_2025-03-10T12-17-22.177520.parquet
---
# Dataset Card for Evaluation run of HuggingFaceTB/SmolLM-1.7B
<!-- Provide a quick summary of the dataset. -->
Dataset automatically created during the evaluation run of model [HuggingFaceTB/SmolLM-1.7B](https://huggingface.co/HuggingFaceTB/SmolLM-1.7B).
The dataset is composed of 3 configuration, each one coresponding to one of the evaluated task.
The dataset has been created from 5 run(s). Each run can be found as a specific split in each configuration, the split being named using the timestamp of the run.The "train" split is always pointing to the latest results.
An additional configuration "results" store all the aggregated results of the run.
To load the details from a run, you can for instance do the following:
```python
from datasets import load_dataset
data = load_dataset("SaylorTwift/details_HuggingFaceTB__SmolLM-1.7B_private",
"results",
split="train")
```
## Latest results
These are the [latest results from run 2025-03-10T12:17:22.177520](https://huggingface.co/datasets/SaylorTwift/details_HuggingFaceTB__SmolLM-1.7B_private/blob/main/results_2025-03-10T12-17-22.177520.json)(note that their might be results for other tasks in the repos if successive evals didn't cover the same tasks. You find each in the results and the "latest" split for each eval):
```python
{
"all": {
"prompt_level_strict_acc": 0.0,
"inst_level_strict_acc": 0.0,
"prompt_level_loose_acc": 0.0,
"inst_level_loose_acc": 0.0
},
"extended|ifeval|0": {
"prompt_level_strict_acc": 0.0,
"inst_level_strict_acc": 0.0,
"prompt_level_loose_acc": 0.0,
"inst_level_loose_acc": 0.0
}
}
```
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation [optional]
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Dataset Card Authors [optional]
[More Information Needed]
## Dataset Card Contact
[More Information Needed] |
Emilichcka/sticker_funny_cat | Emilichcka | 2025-03-10T12:17:22Z | 13 | 0 | [
"license:apache-2.0",
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T11:32:16Z | null | ---
license: apache-2.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
dataset_info:
features:
- name: image
dtype: image
- name: prompt
dtype: string
splits:
- name: train
num_bytes: 16158235.0
num_examples: 91
download_size: 16150049
dataset_size: 16158235.0
---
|
fazliimam/temporal-vqa | fazliimam | 2025-03-10T12:16:06Z | 41 | 5 | [
"task_categories:image-text-to-text",
"license:apache-2.0",
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2501.10674",
"region:us"
] | [
"image-text-to-text"
] | 2025-01-16T11:23:05Z | null | ---
license: apache-2.0
task_categories:
- image-text-to-text
dataset_info:
- config_name: temporal_order
features:
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: label
dtype: string
splits:
- name: test
num_bytes: 211564460.0
num_examples: 720
download_size: 202986206
dataset_size: 211564460.0
- config_name: timelapse_estimation
features:
- name: image_1
dtype: image
- name: image_2
dtype: image
- name: label
dtype: string
splits:
- name: test
num_bytes: 48450099.0
num_examples: 125
download_size: 48184050
dataset_size: 48450099.0
configs:
- config_name: temporal_order
data_files:
- split: test
path: temporal_order/test-*
- config_name: timelapse_estimation
data_files:
- split: test
path: timelapse_estimation/test-*
---
### **Dataset Description**
The Temporal-VQA dataset is a challenging benchmark designed to evaluate the temporal reasoning capabilities of Multimodal Large Language Models (MLLMs) in tasks requiring visual temporal understanding. It emphasizes real-world temporal dynamics through two core evaluation tasks:-
- **Temporal Order Understanding:** This task presents MLLMs with temporally consecutive frames from video sequences. The models must analyze and determine the correct sequence of events, assessing their ability to comprehend event progression over time.
- **Time-Lapse Estimation:** In this task, MLLMs are shown pairs of images taken at varying time intervals. The models are required to estimate the time-lapse between the images by selecting from multiple-choice options that span from seconds to years.
### **GPT4o Usage**
- The __Temporal Order Understanding__ task contains 720 image pairs of which 360 image pairs are unique image pairs (while the other 360 are image pairs in reveresed position) created by sampling frames from copyright-free videos.
- The __Timelapse Estimation__ task contains 125 image pairs compiled from copyright-free sources. The _image_A_ refers to the image that was taken first and the _image_B_ refers to the latest image.
```python
from datasets import load_dataset
import base64
import requests
import os
from io import BytesIO
# Replace with your OpenAI API key
API_KEY = "YOUR_API_KEY"
def encode_image(image):
buffer = BytesIO()
image.save(buffer, format="JPEG")
return base64.b64encode(buffer.getvalue()).decode('utf-8')
def get_gpt_response(image1, image2, query):
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {API_KEY}"
}
payload = {
"model": "gpt-4o",
"messages": [
{
"role": "user",
"content": [
{"type": "text", "text": query},
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image1}"}},
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image2}"}}
]
}
],
"max_tokens": 512
}
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
return response.json()
### TASK 1
dataset = load_dataset('fazliimam/temporal-vqa', 'temporal_order', split='test')
image1 = encode_image(dataset[0]['image_1'])
image2 = encode_image(dataset[0]['image_2'])
prompt_1 = "Did the event in the first image happen before the event in the second image? Provide your answer in dictionary format: {'Answer':'True or False', 'Reasoning':'Brief explanation of your choice'}"
prompt_2 = "Between these two images, which one depicts the event that happened first? Provide your answer in dictionary format: {'Answer':'First image or Second image', 'Reasoning':'Brief explanation of your choice'}"
response = get_gpt_response(image1, image2, prompt_1)
print(response)
### TASK 2
dataset = load_dataset('fazliimam/temporal-vqa', 'timelapse_estimation', split='test')
image1 = encode_image(dataset[0]['image_1'])
image2 = encode_image(dataset[0]['image_2'])
prompt = "In the given image, estimate the time that has passed between the first image (left) and the second image (right). Choose one of the following options: A. Less than 15 seconds B. Between 2 minutes to 15 minutes C. Between 1 hour to 12 hours D. Between 2 days to 30 days E. Between 4 months to 12 months F. More than 3 years. Provide your answer in dictionary format: {'Answer':'Selected option', 'Reasoning':'Brief explanation of your choice'}"
response = get_gpt_response(image1, image2, prompt)
print(response)
```
### **Cite Us**
```
@misc{imam2025multimodalllmsvisualtemporal,
title={Can Multimodal LLMs do Visual Temporal Understanding and Reasoning? The answer is No!},
author={Mohamed Fazli Imam and Chenyang Lyu and Alham Fikri Aji},
year={2025},
eprint={2501.10674},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2501.10674},
}
``` |
Bperju/dsm_colpali_test | Bperju | 2025-03-10T11:46:29Z | 17 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-02-21T13:28:46Z | null | ---
dataset_info:
features:
- name: image
dtype: image
- name: raw_queries
sequence: string
- name: ww_source
sequence: string
- name: ww_collection
sequence: string
- name: ww_treatment
sequence: string
- name: ww_discharge
sequence: string
- name: ww_sink
sequence: string
- name: parsed_into_json
dtype: bool
splits:
- name: train
num_bytes: 16595279.0
num_examples: 25
download_size: 16410693
dataset_size: 16595279.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
shaquille69/my-distiset-a2fc17a0 | shaquille69 | 2025-03-10T11:43:09Z | 9 | 0 | [
"task_categories:text-generation",
"task_categories:text2text-generation",
"task_categories:question-answering",
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"library:distilabel",
"region:us",
"synthetic",
"distilabel",
"rlaif",
"datacraft"
] | [
"text-generation",
"text2text-generation",
"question-answering"
] | 2025-03-10T11:43:08Z | null | ---
size_categories: n<1K
task_categories:
- text-generation
- text2text-generation
- question-answering
dataset_info:
features:
- name: prompt
dtype: string
- name: completion
dtype: string
- name: system_prompt
dtype: string
splits:
- name: train
num_bytes: 15285
num_examples: 10
download_size: 24560
dataset_size: 15285
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
tags:
- synthetic
- distilabel
- rlaif
- datacraft
---
<p align="left">
<a href="https://github.com/argilla-io/distilabel">
<img src="https://raw.githubusercontent.com/argilla-io/distilabel/main/docs/assets/distilabel-badge-light.png" alt="Built with Distilabel" width="200" height="32"/>
</a>
</p>
# Dataset Card for my-distiset-a2fc17a0
This dataset has been created with [distilabel](https://distilabel.argilla.io/).
## Dataset Summary
This dataset contains a `pipeline.yaml` which can be used to reproduce the pipeline that generated it in distilabel using the `distilabel` CLI:
```console
distilabel pipeline run --config "https://huggingface.co/datasets/shaquille69/my-distiset-a2fc17a0/raw/main/pipeline.yaml"
```
or explore the configuration:
```console
distilabel pipeline info --config "https://huggingface.co/datasets/shaquille69/my-distiset-a2fc17a0/raw/main/pipeline.yaml"
```
## Dataset structure
The examples have the following structure per configuration:
<details><summary> Configuration: default </summary><hr>
```json
{
"completion": "The emphasis of the theory of Transactional Analysis (TA) developed by Eric Berne is on the study of communication patterns and interactions between people, based on the idea that these interactions can be understood at three ego states: Parent, Adult, and Child.",
"prompt": "What is the emphasis of theory of Transactional Analysis (TA) developed by Eric Berne? \nThe theory of Transactional Analysis (TA) is a psychological and theory used in social psychology that was developed by Canadian psychiatrist Eric Berne in the 1950s. The theory focuses on the study of communication patterns and interactions between people and the idea that these interactions can be understood at three ego states. These states areparent, adult, and child. Each state exists in different levels of psychological maturity which seems in various states of development within the human mind.",
"system_prompt": ""
}
```
This subset can be loaded as:
```python
from datasets import load_dataset
ds = load_dataset("shaquille69/my-distiset-a2fc17a0", "default")
```
Or simply as it follows, since there's only one configuration and is named `default`:
```python
from datasets import load_dataset
ds = load_dataset("shaquille69/my-distiset-a2fc17a0")
```
</details>
|
Danielrahmai1991/row_data | Danielrahmai1991 | 2025-03-10T11:28:19Z | 24 | 0 | [
"language:fa",
"language:en",
"license:mit",
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-04T12:58:13Z | null | ---
license: mit
dataset_info:
features:
- name: text
dtype: string
- name: topic
dtype: string
- name: date
dtype: string
splits:
- name: train
num_bytes: 398326
num_examples: 42
download_size: 85389
dataset_size: 398326
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
language:
- fa
- en
---
In this dataset card, you can see uploaded dataset in 1024 token length.
If you finetunine your model with this dataset with max otken more than 1024 there is need to enable packing. |
MatrixIA/TEXT2SQLDATA | MatrixIA | 2025-03-10T11:25:46Z | 30 | 0 | [
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T11:23:39Z | null | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: input
dtype: string
- name: response
dtype: string
splits:
- name: train
num_bytes: 276792400
num_examples: 368059
download_size: 91308742
dataset_size: 276792400
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
limphanith/kh-opensrl-fleurs | limphanith | 2025-03-10T11:25:01Z | 18 | 1 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T11:22:57Z | null | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: text
dtype: string
splits:
- name: train
num_bytes: 1637695959.462
num_examples: 4581
download_size: 2668510229
dataset_size: 1637695959.462
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Rajarshi-Roy-research/COCO_OVSEG_Som | Rajarshi-Roy-research | 2025-03-10T11:20:07Z | 49 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"arxiv:2310.11441",
"region:us"
] | [] | 2025-01-31T10:57:14Z | null | ---
dataset_info:
features:
- name: id
dtype: string
- name: image
dtype: image
- name: depth_caption
dtype: string
splits:
- name: train
num_bytes: 15346054.0
num_examples: 148
download_size: 15326866
dataset_size: 15346054.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
# COCO_OVSEG_Som Dataset
This dataset contains images from the COCO dataset, specifically processed for open-vocabulary segmentation (OVSEG) and used in the [Set-of-Mark (SoM)](https://github.com/microsoft/SoM/tree/main/benchmark) benchmark.
**Original Dataset:**
This dataset is based on the [COCO (Common Objects in Context)](https://cocodataset.org/#home) dataset. Please refer to the original COCO dataset for its terms of use and licensing.
**Benchmark Reference:**
This dataset is formatted to be compatible with the benchmark setup described in the following repository:
* [Set-of-Mark (SoM) Benchmark](https://github.com/microsoft/SoM/tree/main/benchmark)
**Citation (SoM):**
If you use this *benchmark setup* in your research, please cite the following paper:
```bibtex
@article{yang2023setofmark,
title={Set-of-Mark Prompting Unleashes Extraordinary Visual Grounding in GPT-4V},
author={Jianwei Yang and Hao Zhang and Feng Li and Xueyan Zou and Chunyuan Li and Jianfeng Gao},
journal={arXiv preprint arXiv:2310.11441},
year={2023},
} |
HelloMizz/flux-poster-outpainting-train | HelloMizz | 2025-03-10T11:17:55Z | 108 | 0 | [
"task_categories:image-to-image",
"size_categories:n<1K",
"format:text",
"modality:image",
"modality:text",
"library:datasets",
"library:mlcroissant",
"region:us",
"art"
] | [
"image-to-image"
] | 2025-03-07T13:49:38Z | null | ---
task_categories:
- image-to-image
tags:
- art
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_cbd49266-1f10-4d2a-a45d-2d1ecec17647 | argilla-internal-testing | 2025-03-10T11:16:57Z | 17 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T11:16:56Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_f0c2bbc1-d268-4df2-8cc2-a5c79a07aa0a | argilla-internal-testing | 2025-03-10T11:15:05Z | 53 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T11:14:47Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Boko99/ultrafeedback_binarized_warmup | Boko99 | 2025-03-10T11:12:00Z | 66 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T11:03:59Z | null | ---
dataset_info:
features:
- name: prompt
dtype: string
- name: prompt_id
dtype: string
- name: chosen
list:
- name: content
dtype: string
- name: role
dtype: string
- name: rejected
list:
- name: content
dtype: string
- name: role
dtype: string
- name: messages
list:
- name: content
dtype: string
- name: role
dtype: string
- name: score_chosen
dtype: float64
- name: score_rejected
dtype: float64
splits:
- name: train_prefs
num_bytes: 20279456.139232844
num_examples: 3056
- name: test_prefs
num_bytes: 385409205.8607671
num_examples: 58079
download_size: 225522555
dataset_size: 405688662.0
configs:
- config_name: default
data_files:
- split: train_prefs
path: data/train_prefs-*
- split: test_prefs
path: data/test_prefs-*
---
|
HariModelMaven/pumed-finetuning | HariModelMaven | 2025-03-10T11:00:13Z | 17 | 0 | [
"size_categories:10K<n<100K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T11:00:09Z | null | ---
dataset_info:
features:
- name: instruction
dtype: string
- name: context
dtype: string
- name: context_neg
dtype: string
splits:
- name: train
num_bytes: 22980369
num_examples: 8000
- name: validation
num_bytes: 2830159
num_examples: 1000
- name: test
num_bytes: 2882826
num_examples: 1000
download_size: 16233011
dataset_size: 28693354
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
---
|
mahdin70/merged_bigvul_primevul | mahdin70 | 2025-03-10T10:59:28Z | 9 | 0 | [
"task_categories:text-classification",
"task_categories:feature-extraction",
"license:mit",
"size_categories:100K<n<1M",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"doi:10.57967/hf/4774",
"region:us",
"Code",
"Vulnerability"
] | [
"text-classification",
"feature-extraction"
] | 2025-03-09T11:59:30Z | null | ---
dataset_info:
features:
- name: project
dtype: string
- name: commit_id
dtype: string
- name: CVE ID
dtype: string
- name: CWE ID
dtype: string
- name: func
dtype: string
- name: vul
dtype: int8
splits:
- name: train
num_bytes: 299777443
num_examples: 239822
- name: test
num_bytes: 63350463
num_examples: 51390
- name: validation
num_bytes: 63678823
num_examples: 51392
download_size: 190857204
dataset_size: 426806729
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
- split: validation
path: data/validation-*
license: mit
task_categories:
- text-classification
- feature-extraction
tags:
- Code
- Vulnerability
---
# Merged BigVul and PrimeVul Dataset
**Dataset ID**: `mahdin70/merged_bigvul_primevul`
This dataset is a merged and preprocessed combination of the **BigVul** (`bstee615/bigvul`) and **PrimeVul** (`colin/PrimeVul`, "default" configuration) datasets, designed for vulnerability analysis and machine learning tasks. The preprocessing ensures consistency in column names, data types, and formats, making it suitable for fine-tuning models.
## Dataset Overview
The dataset integrates vulnerability data from two sources:
- **BigVul**: A dataset of real-world vulnerabilities from open-source C/C++ projects.
- **Paper**: (https://doi.org/10.1145/3379597.3387501)
- **Repository**: (https://github.com/ZeoVan/MSR_20_Code_vulnerability_CSV_Dataset)
- **PrimeVul**: A vulnerability dataset with additional project-specific details.
- **Paper**: (https://doi.org/10.48550/arXiv.2403.18624)
- **Repository**:(https://github.com/DLVulDet/PrimeVul)
The merged dataset retains key information about projects, commits, functions, and vulnerabilities, standardized for consistency.
### Columns
The dataset contains the following columns:
- **`project`**: String - The name of the project (e.g., "qemu", "linux-2.6").
- **`commit_id`**: String - Unique identifier of the commit associated with the function.
- **`func`**: String - The source code of the function before fixing (from `func_before` in BigVul).
- **`vul`**: Int8 - Vulnerability label (1 = vulnerable, 0 = not vulnerable).
- **`CVE ID`**: String - Common Vulnerabilities and Exposures identifier (e.g., `CVE-2007-1320`), or `NOT_APPLICABLE` if `vul = 0`.
- **`CWE ID`**: String - Common Weakness Enumeration identifier (e.g., `CWE-20`), or `NOT_APPLICABLE` if `vul = 0`.
### Splits
- **Train**: Combined training data from BigVul and PrimeVul.
- **Test**: Combined testing data from BigVul and PrimeVul.
- **Validation**: Combined validation data from BigVul and PrimeVul.
## Preprocessing Steps
The dataset was preprocessed to ensure consistency and quality:
### BigVul Preprocessing
- **Source Columns**:
- `project`, `commit_id`, `CVE ID`, `CWE ID`, `func_before`, `vul`.
- **Transformations**:
- Renamed `func_before` to `func`.
- Kept `CWE ID` in its original format (`CWE-XXX`).
- Converted `vul` to `int8`.
### PrimeVul Preprocessing
- **Source Columns**:
- `project`, `commit_id`, `cve`, `cwe`, `func`, `target`.
- **Transformations**:
- Renamed `cve` to `CVE ID`, `cwe` to `CWE ID`, `target` to `vul`.
- Standardized `CWE ID` by removing brackets from list format (e.g., `["CWE-XXX"]` → `CWE-XXX`), taking the first element if multiple CWEs exist.
- Converted `vul` from `int64` to `int8`.
### Merging and Final Preprocessing
- **Merging**: Concatenated preprocessed BigVul and PrimeVul data for each split (`train`, `test`, `validation`).
- **Final Steps**:
- Removed rows with any null values.
- Removed duplicates based on the `func` column.
- For rows where `vul = 0`, replaced `CVE ID` and `CWE ID` with `"NOT_APPLICABLE`.
## Dataset Statistics
Below are the analysis results for the final merged dataset:
### Train Split
- **Number of rows**: 239,822
- **Unique CWE IDs (excluding `NOT_APPLICABLE`)**: 127
- **Unique commit IDs**: 7,559
- **Vulnerable functions (`vul = 1`)**: 9,037
### Test Split
- **Number of rows**: 51,390
- **Unique CWE IDs (excluding `NOT_APPLICABLE`)**: 87
- **Unique commit IDs**: 6,032
- **Vulnerable functions (`vul = 1`)**: 1,911
### Validation Split
- **Number of rows**: 51,392
- **Unique CWE IDs (excluding `NOT_APPLICABLE`)**: 91
- **Unique commit IDs**: 6,059
- **Vulnerable functions (`vul = 1`)**: 1,933
## Usage
### Loading the Dataset
```python
from datasets import load_dataset
dataset = load_dataset("mahdin70/merged_bigvul_primevul") |
shunshao/reach-vb-pokemon-blip-captions | shunshao | 2025-03-10T10:45:42Z | 58 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:image",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T10:45:33Z | null | ---
dataset_info:
features:
- name: image
dtype: image
- name: text
dtype: string
splits:
- name: train
num_bytes: 119417305.0
num_examples: 833
download_size: 99573052
dataset_size: 119417305.0
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
hhim8826/japanese-anime-speech-v2-split | hhim8826 | 2025-03-10T10:43:15Z | 113 | 2 | [
"task_categories:automatic-speech-recognition",
"language:ja",
"license:gpl",
"size_categories:10K<n<100K",
"format:parquet",
"modality:audio",
"modality:text",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"japanese",
"anime",
"speech",
"日本語",
"audio-text",
"asr",
"whisper",
"voice"
] | [
"automatic-speech-recognition"
] | 2025-03-08T20:21:21Z | null | ---
dataset_info:
features:
- name: audio
dtype:
audio:
sampling_rate: 16000
- name: sentence
dtype: string
splits:
- name: train
num_bytes: 3377117847.8302207
num_examples: 40000
- name: test
num_bytes: 844278011.957555
num_examples: 10000
download_size: 3958862524
dataset_size: 4221395859.787776
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: test
path: data/test-*
license: gpl
task_categories:
- automatic-speech-recognition
language:
- ja
tags:
- japanese
- anime
- speech
- 日本語
- audio-text
- asr
- whisper
- voice
size_categories:
- 1K<n<10K
---
dataset split from [joujiboi/japanese-anime-speech-v2](https://huggingface.co/datasets/joujiboi/japanese-anime-speech-v2) |
hoffnung1208/koch_test_lego | hoffnung1208 | 2025-03-10T10:42:53Z | 134 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:100K<n<1M",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot"
] | [
"robotics"
] | 2025-03-06T14:18:14Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "koch",
"total_episodes": 2,
"total_frames": 505,
"total_tasks": 1,
"total_videos": 2,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:2"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.realsense": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
sihyun77/so100_test77 | sihyun77 | 2025-03-10T10:41:30Z | 27 | 0 | [
"task_categories:robotics",
"license:apache-2.0",
"size_categories:10K<n<100K",
"format:parquet",
"modality:tabular",
"modality:timeseries",
"modality:video",
"library:datasets",
"library:dask",
"library:mlcroissant",
"library:polars",
"region:us",
"LeRobot",
"so100,tutorial"
] | [
"robotics"
] | 2025-03-10T10:40:53Z | null | ---
license: apache-2.0
task_categories:
- robotics
tags:
- LeRobot
- so100,tutorial
configs:
- config_name: default
data_files: data/*/*.parquet
---
This dataset was created using [LeRobot](https://github.com/huggingface/lerobot).
## Dataset Description
- **Homepage:** [More Information Needed]
- **Paper:** [More Information Needed]
- **License:** apache-2.0
## Dataset Structure
[meta/info.json](meta/info.json):
```json
{
"codebase_version": "v2.1",
"robot_type": "so100",
"total_episodes": 50,
"total_frames": 14693,
"total_tasks": 1,
"total_videos": 100,
"total_chunks": 1,
"chunks_size": 1000,
"fps": 30,
"splits": {
"train": "0:50"
},
"data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet",
"video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4",
"features": {
"action": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.state": {
"dtype": "float32",
"shape": [
6
],
"names": [
"main_shoulder_pan",
"main_shoulder_lift",
"main_elbow_flex",
"main_wrist_flex",
"main_wrist_roll",
"main_gripper"
]
},
"observation.images.laptop": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"observation.images.phone": {
"dtype": "video",
"shape": [
480,
640,
3
],
"names": [
"height",
"width",
"channels"
],
"info": {
"video.fps": 30.0,
"video.height": 480,
"video.width": 640,
"video.channels": 3,
"video.codec": "av1",
"video.pix_fmt": "yuv420p",
"video.is_depth_map": false,
"has_audio": false
}
},
"timestamp": {
"dtype": "float32",
"shape": [
1
],
"names": null
},
"frame_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"episode_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"index": {
"dtype": "int64",
"shape": [
1
],
"names": null
},
"task_index": {
"dtype": "int64",
"shape": [
1
],
"names": null
}
}
}
```
## Citation
**BibTeX:**
```bibtex
[More Information Needed]
``` |
nielsr/gemini-results-new-cs-papers | nielsr | 2025-03-10T10:18:56Z | 16 | 0 | [
"size_categories:1K<n<10K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T10:16:39Z | null | ---
dataset_info:
features:
- name: title
dtype: string
- name: authors
sequence: string
- name: subjects
sequence: string
- name: abstract
dtype: string
- name: arxiv_id
dtype: string
- name: pdf_url
dtype: string
- name: gemini_results
struct:
- name: github_issue_url
dtype: string
- name: github_url
dtype: string
- name: model_name
dtype: string
- name: new_datasets
dtype: string
- name: new_model_checkpoints
dtype: string
- name: note
dtype: string
- name: project_page_url
dtype: string
- name: reaching_out
dtype: string
- name: reasoning
dtype: string
- name: gemini_github_issue_url
dtype: string
- name: gemini_github_url
dtype: string
- name: gemini_model_name
dtype: string
- name: gemini_new_datasets
dtype: string
- name: gemini_new_model_checkpoints
dtype: string
- name: gemini_note
dtype: string
- name: gemini_project_page_url
dtype: string
- name: gemini_reaching_out
dtype: string
- name: gemini_reasoning
dtype: string
- name: gemini_huggingface_pull_request_urls
dtype: string
- name: date
dtype: string
splits:
- name: train
num_bytes: 11550096
num_examples: 2410
download_size: 4105733
dataset_size: 11550096
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_3105304a-c5b7-4569-845b-a71cebe3fc20 | argilla-internal-testing | 2025-03-10T10:07:06Z | 16 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T10:07:01Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_4ade333b-7c93-4510-b2b4-77a3f387c63b | argilla-internal-testing | 2025-03-10T10:07:05Z | 15 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T10:06:50Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
thanhnam46/test | thanhnam46 | 2025-03-10T09:50:48Z | 14 | 0 | [
"license:apache-2.0",
"region:us"
] | [] | 2025-03-10T09:50:48Z | null | ---
license: apache-2.0
---
|
argilla-internal-testing/test_import_dataset_from_hub_with_classlabel_c5cdb4e6-f8a5-46d0-ac0c-2e2fbdf421ce | argilla-internal-testing | 2025-03-10T09:40:22Z | 14 | 0 | [
"size_categories:n<1K",
"format:parquet",
"modality:text",
"library:datasets",
"library:pandas",
"library:mlcroissant",
"library:polars",
"region:us"
] | [] | 2025-03-10T09:40:21Z | null | ---
dataset_info:
features:
- name: text
dtype: string
- name: label
dtype:
class_label:
names:
'0': positive
'1': negative
splits:
- name: train
num_bytes: 111
num_examples: 3
download_size: 1256
dataset_size: 111
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
---
|
Subsets and Splits