--- language: - ara - dan - deu - eng - fas - fra - hin - ind - ita - jpn - kor - nld - pol - por - rus - spa - swe - tur - vie - zho multilinguality: - multilingual task_categories: - text-retrieval task_ids: - document-retrieval config_names: - corpus tags: - text-retrieval dataset_info: - config_name: ara-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 6293965 num_examples: 132664 - name: test num_bytes: 474351 num_examples: 10000 - config_name: ara-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 162827578 num_examples: 142664 - config_name: ara-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 35458944 num_examples: 142664 - config_name: dan-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 6050436 num_examples: 127686 - name: test num_bytes: 473958 num_examples: 10000 - config_name: dan-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 49171909 num_examples: 137686 - config_name: dan-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 10733277 num_examples: 137686 - config_name: deu-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 42959189 num_examples: 881201 - name: test num_bytes: 487440 num_examples: 10000 - config_name: deu-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 377457585 num_examples: 891201 - config_name: deu-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 72730983 num_examples: 891201 - config_name: eng-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 266487037 num_examples: 5268725 - name: test num_bytes: 505718 num_examples: 10000 - config_name: eng-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 1772481467 num_examples: 5278725 - config_name: eng-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 394021606 num_examples: 5278725 - config_name: fas-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 10417693 num_examples: 216940 - name: test num_bytes: 480147 num_examples: 10000 - config_name: fas-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 240471393 num_examples: 226940 - config_name: fas-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 57867968 num_examples: 226940 - config_name: fra-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 27197426 num_examples: 559505 - name: test num_bytes: 486099 num_examples: 10000 - config_name: fra-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 256564231 num_examples: 569505 - config_name: fra-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 51751140 num_examples: 569505 - config_name: hin-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 4211543 num_examples: 90031 - name: test num_bytes: 467756 num_examples: 10000 - config_name: hin-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 87202578 num_examples: 100031 - config_name: hin-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 24557386 num_examples: 100031 - config_name: ind-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 4762307 num_examples: 101315 - name: test num_bytes: 469908 num_examples: 10000 - config_name: ind-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 32240964 num_examples: 111315 - config_name: ind-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 8791501 num_examples: 111315 - config_name: ita-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 11928808 num_examples: 247803 - name: test num_bytes: 481319 num_examples: 10000 - config_name: ita-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 96693889 num_examples: 257803 - config_name: ita-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 21887337 num_examples: 257803 - config_name: jpn-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 14443770 num_examples: 299157 - name: test num_bytes: 482703 num_examples: 10000 - config_name: jpn-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 254914767 num_examples: 309157 - config_name: jpn-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 52646303 num_examples: 309157 - config_name: kor-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 4307606 num_examples: 92000 - name: test num_bytes: 468174 num_examples: 10000 - config_name: kor-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 65463396 num_examples: 102000 - config_name: kor-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 14462715 num_examples: 102000 - config_name: nld-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 17456195 num_examples: 360662 - name: test num_bytes: 484023 num_examples: 10000 - config_name: nld-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 134247494 num_examples: 370662 - config_name: nld-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 27592780 num_examples: 370662 - config_name: pol-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 8732582 num_examples: 182515 - name: test num_bytes: 478433 num_examples: 10000 - config_name: pol-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 83829979 num_examples: 192515 - config_name: pol-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 17279177 num_examples: 192515 - config_name: por-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 9556791 num_examples: 199353 - name: test num_bytes: 479286 num_examples: 10000 - config_name: por-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 80179713 num_examples: 209353 - config_name: por-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 17117819 num_examples: 209353 - config_name: rus-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 18281224 num_examples: 377504 - name: test num_bytes: 484252 num_examples: 10000 - config_name: rus-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 612916055 num_examples: 387504 - config_name: rus-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 117356334 num_examples: 387504 - config_name: spa-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 28919818 num_examples: 594661 - name: test num_bytes: 486351 num_examples: 10000 - config_name: spa-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 240959272 num_examples: 604661 - config_name: spa-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 54894661 num_examples: 604661 - config_name: swe-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 7079817 num_examples: 148738 - name: test num_bytes: 476125 num_examples: 10000 - config_name: swe-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 59133680 num_examples: 158738 - config_name: swe-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 12773304 num_examples: 158738 - config_name: tur-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 6400585 num_examples: 134846 - name: test num_bytes: 474649 num_examples: 10000 - config_name: tur-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 57145253 num_examples: 144846 - config_name: tur-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 11816043 num_examples: 144846 - config_name: vie-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 5380433 num_examples: 113972 - name: test num_bytes: 471975 num_examples: 10000 - config_name: vie-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 76390471 num_examples: 123972 - config_name: vie-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 16076620 num_examples: 123972 - config_name: zho-qrels features: - name: query-id dtype: string - name: corpus-id dtype: string - name: score dtype: float64 splits: - name: train num_bytes: 5796592 num_examples: 122491 - name: test num_bytes: 473247 num_examples: 10000 - config_name: zho-corpus features: - name: _id dtype: string - name: title dtype: string - name: text dtype: string splits: - name: corpus num_bytes: 79790293 num_examples: 132491 - config_name: zho-queries features: - name: _id dtype: string - name: text dtype: string splits: - name: queries num_bytes: 15738014 num_examples: 132491 configs: - config_name: ara-qrels data_files: - split: train path: ara/train.jsonl - split: test path: ara/test.jsonl - config_name: ara-corpus data_files: - split: corpus path: ara/corpus.jsonl - config_name: ara-queries data_files: - split: queries path: ara/queries.jsonl - config_name: dan-qrels data_files: - split: train path: dan/train.jsonl - split: test path: dan/test.jsonl - config_name: dan-corpus data_files: - split: corpus path: dan/corpus.jsonl - config_name: dan-queries data_files: - split: queries path: dan/queries.jsonl - config_name: deu-qrels data_files: - split: train path: deu/train.jsonl - split: test path: deu/test.jsonl - config_name: deu-corpus data_files: - split: corpus path: deu/corpus.jsonl - config_name: deu-queries data_files: - split: queries path: deu/queries.jsonl - config_name: eng-qrels data_files: - split: train path: eng/train.jsonl - split: test path: eng/test.jsonl - config_name: eng-corpus data_files: - split: corpus path: eng/corpus.jsonl - config_name: eng-queries data_files: - split: queries path: eng/queries.jsonl - config_name: fas-qrels data_files: - split: train path: fas/train.jsonl - split: test path: fas/test.jsonl - config_name: fas-corpus data_files: - split: corpus path: fas/corpus.jsonl - config_name: fas-queries data_files: - split: queries path: fas/queries.jsonl - config_name: fra-qrels data_files: - split: train path: fra/train.jsonl - split: test path: fra/test.jsonl - config_name: fra-corpus data_files: - split: corpus path: fra/corpus.jsonl - config_name: fra-queries data_files: - split: queries path: fra/queries.jsonl - config_name: hin-qrels data_files: - split: train path: hin/train.jsonl - split: test path: hin/test.jsonl - config_name: hin-corpus data_files: - split: corpus path: hin/corpus.jsonl - config_name: hin-queries data_files: - split: queries path: hin/queries.jsonl - config_name: ind-qrels data_files: - split: train path: ind/train.jsonl - split: test path: ind/test.jsonl - config_name: ind-corpus data_files: - split: corpus path: ind/corpus.jsonl - config_name: ind-queries data_files: - split: queries path: ind/queries.jsonl - config_name: ita-qrels data_files: - split: train path: ita/train.jsonl - split: test path: ita/test.jsonl - config_name: ita-corpus data_files: - split: corpus path: ita/corpus.jsonl - config_name: ita-queries data_files: - split: queries path: ita/queries.jsonl - config_name: jpn-qrels data_files: - split: train path: jpn/train.jsonl - split: test path: jpn/test.jsonl - config_name: jpn-corpus data_files: - split: corpus path: jpn/corpus.jsonl - config_name: jpn-queries data_files: - split: queries path: jpn/queries.jsonl - config_name: kor-qrels data_files: - split: train path: kor/train.jsonl - split: test path: kor/test.jsonl - config_name: kor-corpus data_files: - split: corpus path: kor/corpus.jsonl - config_name: kor-queries data_files: - split: queries path: kor/queries.jsonl - config_name: nld-qrels data_files: - split: train path: nld/train.jsonl - split: test path: nld/test.jsonl - config_name: nld-corpus data_files: - split: corpus path: nld/corpus.jsonl - config_name: nld-queries data_files: - split: queries path: nld/queries.jsonl - config_name: pol-qrels data_files: - split: train path: pol/train.jsonl - split: test path: pol/test.jsonl - config_name: pol-corpus data_files: - split: corpus path: pol/corpus.jsonl - config_name: pol-queries data_files: - split: queries path: pol/queries.jsonl - config_name: por-qrels data_files: - split: train path: por/train.jsonl - split: test path: por/test.jsonl - config_name: por-corpus data_files: - split: corpus path: por/corpus.jsonl - config_name: por-queries data_files: - split: queries path: por/queries.jsonl - config_name: rus-qrels data_files: - split: train path: rus/train.jsonl - split: test path: rus/test.jsonl - config_name: rus-corpus data_files: - split: corpus path: rus/corpus.jsonl - config_name: rus-queries data_files: - split: queries path: rus/queries.jsonl - config_name: spa-qrels data_files: - split: train path: spa/train.jsonl - split: test path: spa/test.jsonl - config_name: spa-corpus data_files: - split: corpus path: spa/corpus.jsonl - config_name: spa-queries data_files: - split: queries path: spa/queries.jsonl - config_name: swe-qrels data_files: - split: train path: swe/train.jsonl - split: test path: swe/test.jsonl - config_name: swe-corpus data_files: - split: corpus path: swe/corpus.jsonl - config_name: swe-queries data_files: - split: queries path: swe/queries.jsonl - config_name: tur-qrels data_files: - split: train path: tur/train.jsonl - split: test path: tur/test.jsonl - config_name: tur-corpus data_files: - split: corpus path: tur/corpus.jsonl - config_name: tur-queries data_files: - split: queries path: tur/queries.jsonl - config_name: vie-qrels data_files: - split: train path: vie/train.jsonl - split: test path: vie/test.jsonl - config_name: vie-corpus data_files: - split: corpus path: vie/corpus.jsonl - config_name: vie-queries data_files: - split: queries path: vie/queries.jsonl - config_name: zho-qrels data_files: - split: train path: zho/train.jsonl - split: test path: zho/test.jsonl - config_name: zho-corpus data_files: - split: corpus path: zho/corpus.jsonl - config_name: zho-queries data_files: - split: queries path: zho/queries.jsonl ---

WebFAQ Retrieval Dataset

Overview | Details | Structure | Examples | Considerations | License | Citation | Contact | Acknowledgement

## Overview The **WebFAQ Retrieval Dataset** is a carefully **filtered and curated subset** of the broader [WebFAQ Q&A Dataset](https://huggingface.co/datasets/anonymous202501/webfaq). It is **purpose-built for Information Retrieval (IR)** tasks, such as **training and evaluating** dense or sparse retrieval models in **multiple languages**. Each of the **20 largest** languages from the WebFAQ corpus has been **thoroughly cleaned** and **refined** to ensure an unblurred notion of relevance between a query (question) and its corresponding document (answer). In particular, we applied: - **Deduplication** of near-identical questions, - **Semantic consistency checks** for question-answer alignment, - **Train/Test splits** for retrieval experiments. ## Details ### Languages The **WebFAQ Retrieval Dataset** covers **20 high-resource languages** from the original WebFAQ corpus, each comprising tens of thousands to hundreds of thousands of QA pairs after our rigorous filtering steps: | Language | # QA pairs | |----------|-----------:| | ara | 143k | | dan | 138k | | deu | 891k | | eng | 5.28M | | fas | 227k | | fra | 570k | | hin | 96.6k | | ind | 96.6k | | ita | 209k | | jpn | 280k | | kor | 79.1k | | nld | 349k | | pol | 179k | | por | 186k | | rus | 346k | | spa | 558k | | swe | 144k | | tur | 110k | | vie | 105k | | zho | 125k | ## Structure Unlike the raw Q&A dataset, **WebFAQ Retrieval** provides explicit **train/test splits** for each of the 20 languages. The general structure for each language is: - **Corpus**: A set of unique documents (answers) with IDs and text fields. - **Queries**: A set of question strings, each tied to a document ID for relevance. - **Qrels**: Relevance labels, mapping each question to its relevant document (corresponding answer). ### Folder Layout (e.g., for eng) ``` eng/ ├── corpus.jsonl # all unique documents (answers) ├── queries.jsonl # all queries for train/test ├── train.jsonl # relevance annotations for train └── test.jsonl # relevance annotations for test ``` ## Examples Below is a small snippet showing how to load English train/test sets with [🤗 Datasets](https://github.com/huggingface/datasets): ```python import json from datasets import load_dataset from tqdm import tqdm # Load train qrels train_qrels = load_dataset( "anonymous202501/webfaq-retrieval", "eng-qrels", split="train" ) # Inspect first qrel print(json.dumps(train_qrels[0], indent=4)) # Load the corpus (answers) data_corpus = load_dataset( "anonymous202501/webfaq-retrieval", "eng-corpus", split="corpus" ) corpus = { d["_id"]: {"title": d["title"], "text": d["text"]} for d in tqdm(data_corpus) } # Inspect first document print("Document:") print(json.dumps(corpus[train_qrels[0]["corpus-id"]], indent=4)) # Load all queries data_queries = load_dataset( "anonymous202501/webfaq-retrieval", "eng-queries", split="queries" ) queries = { q["_id"]: q["text"] for q in tqdm(data_queries) } # Inspect first query print("Query:") print(json.dumps(queries[train_qrels[0]["query-id"]], indent=4)) # Keep only those queries with relevance annotations query_ids = set([q["query-id"] for q in train_qrels]) queries = { qid: query for qid, query in queries.items() if qid in query_ids } print(f"Number of queries: {len(queries)}") ``` Below is a code snippet showing how to evaluate retrieval performance using the `mteb` library: > **Note**: WebFAQ is not yet available as multilingual task in the `mteb` library. The code snippet below is a placeholder for when it becomes available. ```python from mteb import MTEB from mteb.tasks.Retrieval.multilingual.WebFAQRetrieval import WebFAQRetrieval # ... Load model ... # Load the WebFAQ task task = WebFAQRetrieval() eval_split = "test" evaluation = MTEB(tasks=[task]) evaluation.run( model, eval_splits=[eval_split], output_folder="output", overwrite_results=True ) ``` ## Considerations Please note the following considerations when using the collected QAs: - *[Q&A Dataset]* **Risk of Duplicate or Near-Duplicate Content**: The raw Q&A dataset is large and includes minor paraphrases. - *[Retrieval Dataset]* **Sparse Relevance**: As raw FAQ data, each question typically has one “best” (on-page) answer. Additional valid answers may exist on other websites but are not labeled as relevant. - **Language Detection Limitations**: Some QA pairs mix languages, or contain brand names, which can confuse automatic language classification. - **No Guarantee of Factual Accuracy**: Answers reflect the content of the source websites. They may include outdated, biased, or incorrect information. - **Copyright and Privacy**: Please ensure compliance with any applicable laws and the source website’s terms. ## License The **Collection of WebFAQ Datasets** is shared under [**Creative Commons Attribution 4.0 (CC BY 4.0)**](https://creativecommons.org/licenses/by/4.0/) license. > **Note**: The dataset is derived from public webpages in Common Crawl snapshots (2022–2024) and intended for **research purposes**. Each FAQ’s text is published by the original website under their terms. Downstream users should verify any usage constraints from the **original websites** as well as [Common Crawl’s Terms of Use](https://commoncrawl.org/terms-of-use/). ## Citation If you use this dataset in your research, please consider citing the associated paper: ```bibtex @misc{webfaq2025, title = {WebFAQ: A Multilingual Collection of Natural Q&A Datasets for Dense Retrieval}, author = {Anonymous Author(s)}, year = {2025}, howpublished = {...}, note = {Under review} } ``` ## Contact TBD ## Acknowledgement We thank the Common Crawl and Web Data Commons teams for providing the underlying data, and all contributors who helped shape the WebFAQ project. ### Thank you We hope the **Collection of WebFAQ Datasets** serves as a valuable resource for your research. Please consider citing it in any publications or projects that use it. If you encounter issues or want to contribute improvements, feel free to get in touch with us on HuggingFace or GitHub. Happy researching!