|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:55:03.456062Z" |
|
}, |
|
"title": "Investigating Pretrained Language Models for Graph-to-Text Generation", |
|
"authors": [ |
|
{ |
|
"first": "Leonardo", |
|
"middle": [ |
|
"F R" |
|
], |
|
"last": "Ribeiro", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Research Training Group AIPHES and UKP Lab", |
|
"institution": "LMU Munich", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Schmitt", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Research Training Group AIPHES and UKP Lab", |
|
"institution": "LMU Munich", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Research Training Group AIPHES and UKP Lab", |
|
"institution": "LMU Munich", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Research Training Group AIPHES and UKP Lab", |
|
"institution": "LMU Munich", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "U", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Austin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Research Training Group AIPHES and UKP Lab", |
|
"institution": "LMU Munich", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Alfred", |
|
"middle": [], |
|
"last": "Worden", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Research Training Group AIPHES and UKP Lab", |
|
"institution": "LMU Munich", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Scott", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Research Training Group AIPHES and UKP Lab", |
|
"institution": "LMU Munich", |
|
"location": {} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Graph-to-text generation aims to generate fluent texts from graph-based data. In this paper, we investigate two recent pretrained language models (PLMs) and analyze the impact of different task-adaptive pretraining strategies for PLMs in graph-to-text generation. We present a study across three graph domains: meaning representations, Wikipedia knowledge graphs (KGs) and scientific KGs. We show that approaches based on PLMs BART and T5 achieve new state-of-the-art results and that task-adaptive pretraining strategies improve their performance even further. We report new state-of-the-art BLEU scores of 49.72 on AMR-LDC2017T10, 59.70 on WebNLG, and 25.66 on AGENDA datasets-a relative improvement of 31.8%, 4.5%, and 42.4%, respectively, with our models generating significantly more fluent texts than human references. In an extensive analysis, we identify possible reasons for the PLMs' success on graph-totext tasks. Our findings suggest that the PLMs benefit from similar facts seen during pretraining or fine-tuning, such that they perform well even when the input graph is reduced to a simple bag of node and edge labels. 1 Linearized representation: <H> Apollo 12 <R> backup pilot <T> Alfred Worden <H> Alan Bean <R> was a crew member of <T> Apollo 12 <H> Apollo 12 <R> operator <T> NASA <H> Alan Bean <R> occupation <T> Test pilot <H> Apollo 12 <R> commander <T> David Scott <H> Alan Bean <R> was selected by NASA <T> 1963 <H> Alan Bean <R> alma Mater <T> UT Austin B.S. 1955", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Graph-to-text generation aims to generate fluent texts from graph-based data. In this paper, we investigate two recent pretrained language models (PLMs) and analyze the impact of different task-adaptive pretraining strategies for PLMs in graph-to-text generation. We present a study across three graph domains: meaning representations, Wikipedia knowledge graphs (KGs) and scientific KGs. We show that approaches based on PLMs BART and T5 achieve new state-of-the-art results and that task-adaptive pretraining strategies improve their performance even further. We report new state-of-the-art BLEU scores of 49.72 on AMR-LDC2017T10, 59.70 on WebNLG, and 25.66 on AGENDA datasets-a relative improvement of 31.8%, 4.5%, and 42.4%, respectively, with our models generating significantly more fluent texts than human references. In an extensive analysis, we identify possible reasons for the PLMs' success on graph-totext tasks. Our findings suggest that the PLMs benefit from similar facts seen during pretraining or fine-tuning, such that they perform well even when the input graph is reduced to a simple bag of node and edge labels. 1 Linearized representation: <H> Apollo 12 <R> backup pilot <T> Alfred Worden <H> Alan Bean <R> was a crew member of <T> Apollo 12 <H> Apollo 12 <R> operator <T> NASA <H> Alan Bean <R> occupation <T> Test pilot <H> Apollo 12 <R> commander <T> David Scott <H> Alan Bean <R> was selected by NASA <T> 1963 <H> Alan Bean <R> alma Mater <T> UT Austin B.S. 1955", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Graphs are important data structures in NLP as they represent complex relations within a set of objects. For example, semantic and syntactic structures of sentences can be represented using different graph representations (e.g., AMRs, Banarescu et al., 2013; semantic-role labeling, Surdeanu et al., 2008 ; syntactic and semantic graphs, Belz et al., 2011) and knowledge graphs (KGs) are used to describe factual knowledge in the form of relations between entities (Gardent et al., 2017; Vougiouklis et al., 2018; Koncel-Kedziorski et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 229, |
|
"end": 258, |
|
"text": "AMRs, Banarescu et al., 2013;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 259, |
|
"end": 304, |
|
"text": "semantic-role labeling, Surdeanu et al., 2008", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 356, |
|
"text": "Belz et al., 2011)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 465, |
|
"end": 487, |
|
"text": "(Gardent et al., 2017;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 488, |
|
"end": 513, |
|
"text": "Vougiouklis et al., 2018;", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 514, |
|
"end": 545, |
|
"text": "Koncel-Kedziorski et al., 2019)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Graph-to-text generation, a subtask of data-totext generation (Gatt and Krahmer, 2018) , aims to create fluent natural language text to describe an input graph (see Figure 1 ). This task is important for NLP applications such as dialogue generation (Moon et al., 2019) and question answering (Duan et al., 2017) . Recently, it has been shown that structured meaning representation, such as AMR or KG, can store the internal state of a dialog system, providing core semantic knowledge (Bonial et al., 2020; Bai et al., 2021) or can be the result of a database query for conversational QA (Yu et al., 2019) . Moreover, dialog states can be represented as KGs to encode compositionality and can be shared across different domains, slot types and dialog participators (Cheng et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 62, |
|
"end": 86, |
|
"text": "(Gatt and Krahmer, 2018)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 249, |
|
"end": 268, |
|
"text": "(Moon et al., 2019)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 292, |
|
"end": 311, |
|
"text": "(Duan et al., 2017)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 484, |
|
"end": 505, |
|
"text": "(Bonial et al., 2020;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 506, |
|
"end": 523, |
|
"text": "Bai et al., 2021)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 587, |
|
"end": 604, |
|
"text": "(Yu et al., 2019)", |
|
"ref_id": "BIBREF61" |
|
}, |
|
{ |
|
"start": 764, |
|
"end": 784, |
|
"text": "(Cheng et al., 2020)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 165, |
|
"end": 173, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Transfer learning has become ubiquitous in NLP and pretrained Transformer-based architectures (Vaswani et al., 2017) have considerably outperformed prior state of the art in various downstream tasks (Devlin et al., 2019; Yang et al., 2019a; Radford et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 116, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 199, |
|
"end": 220, |
|
"text": "(Devlin et al., 2019;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 221, |
|
"end": 240, |
|
"text": "Yang et al., 2019a;", |
|
"ref_id": "BIBREF58" |
|
}, |
|
{ |
|
"start": 241, |
|
"end": 262, |
|
"text": "Radford et al., 2019)", |
|
"ref_id": "BIBREF40" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we analyze the applicability of two recent text-to-text pretrained language models (PLMs), BART and T5 (Raffel et al., 2019) , for graph-to-text generation. We choose these models because of their encoderdecoder architecture, which makes them particularly suitable for conditional text generation. Our study comprises three graph domains (meaning representations, Wikipedia KGs, and scientific KGs). We further introduce task-adaptive graph-to-text pretraining approaches for PLMs and demonstrate that such strategies improve the state of the art by a substantial margin.", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 139, |
|
"text": "(Raffel et al., 2019)", |
|
"ref_id": "BIBREF41" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "While recent works have shown the benefit of explicitly encoding the graph structure in graph-totext generation (Song et al., 2018; Ribeiro et al., , 2020 Schmitt et al., 2020; Zhao et al., 2020a, to name a few), our approaches based on PLMs consistently outperform these models, even though PLMs -as sequence models -do not exhibit any Text: As his children, we feel very terrible now.", |
|
"cite_spans": [ |
|
{ |
|
"start": 112, |
|
"end": 131, |
|
"text": "(Song et al., 2018;", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 154, |
|
"text": "Ribeiro et al., , 2020", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 155, |
|
"end": 176, |
|
"text": "Schmitt et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 177, |
|
"end": 196, |
|
"text": "Zhao et al., 2020a,", |
|
"ref_id": "BIBREF63" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Linearized representation: ( feel :ARG0 ( we ) :ARG1 ( terrible :degree ( very ) ) :time ( now ) :ARG1-of ( cause :ARG0 ( have-rel-role :ARG0 we :ARG1 ( he ) :ARG2 ( child ) ) ) ) Figure 1: Examples of (a) AMR and (b) WebNLG graphs, the input for the models and the reference texts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "graph-specific structural bias. 2 Simply representing the graph as a linear traversal (see Figure 1 ) leads to remarkable generation performance in the presence of a strong language model. In our analysis we investigate to what extent fine-tuned PLMs make use of the graph structure represented in the graph linearization. We notably observe that PLMs achieve high performance on two popular KG-totext benchmarks even when the KG is reduced to a mere bag of node and edge labels.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 99, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our contributions are the following:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We investigate and compare two PLMs, BART and T5, for graph-to-text generation, exploring language model adaptation (LMA) and supervised task adaptation (STA) pretraining, employing additional task-specific data. \u2022 Our approaches consistently outperform the state of the art by significant margins, ranging from 2.6 to 12.0 BLEU points, on three established graph-to-text benchmarks from different domains, exceeding specialized graph architectures (e.g., Graph Neural Networks, GNNs, Kipf and Welling, 2017). \u2022 In a crowdsourcing experiment, we demonstrate that our methods generate texts with significantly better fluency than existing works and the human references. \u2022 We discover that PLMs perform well even when trained on a shuffled linearized graph representation without any information about connectivity (bag of node and edge labels), which is surprising since prior studies showed that explicitly encoding the graph structure improves models trained from scratch (e.g., Zhao et al., 2020a) ; and investigate the possible reasons for such a good performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 983, |
|
"end": 1002, |
|
"text": "Zhao et al., 2020a)", |
|
"ref_id": "BIBREF63" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Graph-to-text Learning. Various neural models have been proposed to generate sentences from graphs from different domains. Konstas et al. (2017) propose the first neural approach for AMRto-text generation that uses a linearized input graph. Prior approaches for KG-to-text generation train text-to-text neural models using sequences of KG triples as input (Trisedya et al., 2018; Moryossef et al., 2019; Castro Ferreira et al., 2019; Ribeiro et al., 2021a) . Recent approaches (Marcheggiani and Perez Beltrachini, 2018; Song et al., 2018; Beck et al., 2018; Damonte and Cohen, 2019; Zhao et al., 2020a; Schmitt et al., 2021; Ribeiro et al., 2021b) propose architectures based on GNNs to directly encode the graph structure, whereas other efforts (Ribeiro et al., 2020; Schmitt et al., 2020; Yao et al., 2020; inject the graph structure information into Transformer-based architectures. The success of those approaches suggests that imposing a strong relational inductive bias into the graph-to-text model can assist the generation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 123, |
|
"end": 144, |
|
"text": "Konstas et al. (2017)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 356, |
|
"end": 379, |
|
"text": "(Trisedya et al., 2018;", |
|
"ref_id": "BIBREF51" |
|
}, |
|
{ |
|
"start": 380, |
|
"end": 403, |
|
"text": "Moryossef et al., 2019;", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 404, |
|
"end": 433, |
|
"text": "Castro Ferreira et al., 2019;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 434, |
|
"end": 456, |
|
"text": "Ribeiro et al., 2021a)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 477, |
|
"end": 519, |
|
"text": "(Marcheggiani and Perez Beltrachini, 2018;", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 520, |
|
"end": 538, |
|
"text": "Song et al., 2018;", |
|
"ref_id": "BIBREF49" |
|
}, |
|
{ |
|
"start": 539, |
|
"end": 557, |
|
"text": "Beck et al., 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 558, |
|
"end": 582, |
|
"text": "Damonte and Cohen, 2019;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 583, |
|
"end": 602, |
|
"text": "Zhao et al., 2020a;", |
|
"ref_id": "BIBREF63" |
|
}, |
|
{ |
|
"start": 603, |
|
"end": 624, |
|
"text": "Schmitt et al., 2021;", |
|
"ref_id": "BIBREF46" |
|
}, |
|
{ |
|
"start": 625, |
|
"end": 647, |
|
"text": "Ribeiro et al., 2021b)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 746, |
|
"end": 768, |
|
"text": "(Ribeiro et al., 2020;", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 769, |
|
"end": 790, |
|
"text": "Schmitt et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 791, |
|
"end": 808, |
|
"text": "Yao et al., 2020;", |
|
"ref_id": "BIBREF60" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Transformer-based models, such as BERT (Devlin et al., 2019) , XLNet (Yang et al., 2019b) , or RoBERTa , have established a qualitatively new level of baseline performance for many widely used natural language understanding (NLU) benchmarks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 60, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 69, |
|
"end": 89, |
|
"text": "(Yang et al., 2019b)", |
|
"ref_id": "BIBREF59" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pretrained Language Models. Pretrained", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Generative pretrained Transformer-based methods, such as GPT-2 (Radford et al., 2019) , BART , and T5 (Raffel et al., 2019) , are employed in many natural language generation (NLG) tasks. Mager et al. (2020) were the first to employ GPT-2, a decoder-only PLM, for AMR-to-text generation and use cycle consistency to improve the adequacy. In contrast, we are the first to investigate BART and T5 models, which have both a Transformer-based encoder and decoder, in AMRto-text generation. Recently, Harkous et al. (2020) and Kale (2020) demonstrate state-of-the-art results in different data-to-text datasets, employing GPT-2 and T5 models respectively. Radev et al. (2020) propose DART, a new data-to-text dataset, and train a BART model gradually augmenting the WebNLG training data with DART data. Hoyle et al. (2021) explore scaffolding objectives in PLMs and show gains in low-resource graph-to-text settings. Different from the above works, we focus on a general transfer learning strategies for graph-to-text generation, investigating task-adaptive pretraining approaches, employing additional collected task-specific data for different PLMs (BART and T5) and benchmarks. In addition, we provide a detailed analysis aimed at explaining the good performance of PLMs on KGto-text tasks.", |
|
"cite_spans": [ |
|
{ |
|
"start": 63, |
|
"end": 85, |
|
"text": "(Radford et al., 2019)", |
|
"ref_id": "BIBREF40" |
|
}, |
|
{ |
|
"start": 102, |
|
"end": 123, |
|
"text": "(Raffel et al., 2019)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 188, |
|
"end": 207, |
|
"text": "Mager et al. (2020)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 496, |
|
"end": 517, |
|
"text": "Harkous et al. (2020)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 522, |
|
"end": 533, |
|
"text": "Kale (2020)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 651, |
|
"end": 670, |
|
"text": "Radev et al. (2020)", |
|
"ref_id": "BIBREF39" |
|
}, |
|
{ |
|
"start": 798, |
|
"end": 817, |
|
"text": "Hoyle et al. (2021)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pretrained Language Models. Pretrained", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Recently, Gururangan et al. (2020) explored taskadaptive pretraining strategies for text classification. While our LMA (see \u00a73) is related to their DAPT as both use a self-supervised objective on a domainspecific corpus, they notably differ in that DAPT operates on the model input while LMA models the output. We are the first to show the benefits of additional task-specific pretraining in PLMs for graph-to-text tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pretrained Language Models. Pretrained", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "3 PLMs for Graph-to-Text Generation", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Pretrained Language Models. Pretrained", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We investigate BART and T5 (Raffel et al., 2019) , two PLMs based on the Transformer encoder-decoder architecture (Vaswani et al., 2017) , for graph-to-text generation. They mainly differ in how they are pretrained and the input corpora used for pretraining. We experiment with different T5 (small -60M parameters, base -220M, and large -770M) and BART (base -140M and large -400M) capacity models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 27, |
|
"end": 48, |
|
"text": "(Raffel et al., 2019)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 114, |
|
"end": 136, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF52" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models in this Study", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We fine-tune both PLMs for a few epochs on the supervised downstream graph-to-text datasets. For T5, in the supervised setup, we add a prefix \"translate from Graph to Text:\" before the graph input. We add this prefix to imitate the T5 setup, when translating between different languages.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Models in this Study", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "Inspired by previous work (Konstas et al., 2017; Gururangan et al., 2020) , we investigate whether leveraging additional task-specific data can improve the PLMs' performance on graph-to-text generation. Task-specific data refers to a pretraining corpus that is more task-relevant and usually smaller than the text corpora used for taskindependent pretraining. In order to leverage the task-specific data, we add an intermediate adaptive pretraining step between the original pretraining and fine-tuning phases for graph-to-text generation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 26, |
|
"end": 48, |
|
"text": "(Konstas et al., 2017;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 49, |
|
"end": 73, |
|
"text": "Gururangan et al., 2020)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-specific Adaptation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "More precisely, we first continue pretraining BART and T5 using language model adaptation (LMA) or supervised task adaptation (STA) training. In the supervised approach, we use pairs of graphs and corresponding texts collected from the same or similar domain as the target task. In the LMA approach, we follow BART and T5 pretraining strategies for language modeling, using the reference texts that describe the graphs. Note that we do not use the graphs in the LMA pretraining, but only the target text of our task-specific data collections. The goal is to adapt the decoder to the domain of the final task (Gururangan et al., 2020). In particular, we randomly mask text spans, replacing 15% of the tokens. 3 Before evaluation, we finally fine-tune the models using the original training set as usual.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task-specific Adaptation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We evaluate the text-to-text PLMs on three graph-to-text benchmarks: AMR (LDC2017T10), WebNLG (Gardent et al., 2017) , and AGENDA (Koncel-Kedziorski et al., 2019) . We chose those datasets because they comprise different domains and are widely used in prior work. Table 10 in Appendix shows statistics for each dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 116, |
|
"text": "(Gardent et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 130, |
|
"end": 162, |
|
"text": "(Koncel-Kedziorski et al., 2019)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 264, |
|
"end": 272, |
|
"text": "Table 10", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Datasets", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "is a semantic formalism that represents the meaning of a sentence as a rooted directed graph expressing \"who is doing what to whom\" (Banarescu et al., 2013) . In an AMR graph, nodes represent concepts and edges represent semantic relations. An instance in LDC2017T10 consists of a sentence annotated with its corresponding AMR graph. Following Mager et al. 2020, we linearize the AMR graphs using the PENMAN notation (see Figure 1a ). 4", |
|
"cite_spans": [ |
|
{ |
|
"start": 132, |
|
"end": 156, |
|
"text": "(Banarescu et al., 2013)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 422, |
|
"end": 431, |
|
"text": "Figure 1a", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "AMR. Abstract meaning representation (AMR)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "WebNLG. Each instance of WebNLG contains a KG from DBPedia (Auer et al., 2007) and a target text with one or multiple sentences that describe the graph. The test set is divided into two partitions: seen, which contains only DBPedia categories present in the training set, and unseen, which covers categories never seen during training. Their union is called all. Following previous work (Harkous et al., 2020), we prepend H , R , and T tokens before the head entity, the relation and tail entity of a triple (see Figure 1b ).", |
|
"cite_spans": [ |
|
{ |
|
"start": 59, |
|
"end": 78, |
|
"text": "(Auer et al., 2007)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 513, |
|
"end": 522, |
|
"text": "Figure 1b", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "AMR. Abstract meaning representation (AMR)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "AGENDA. In this dataset, KGs are paired with scientific abstracts extracted from proceedings of AI conferences. Each sample contains the paper title, a KG, and the corresponding abstract. The KG contains entities corresponding to scientific terms and the edges represent relations between these entities. This dataset has loose alignments between the graph and the corresponding text as the graphs were automatically generated. The input for the models is a text containing the title, a sequence of all KG entities, and the triples. The target text is the paper abstract. We add special tokens into the triples in the same way as for WebNLG.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "AMR. Abstract meaning representation (AMR)", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In order to evaluate the proposed task-adaptive pretraining strategies for graph-to-text generation, we collect task-specific data for two graph domains: meaning representations (like AMR) and scientific data (like AGENDA). We did not attempt collecting additional data like WebNLG because the texts in this benchmark do not stem from a corpus but were specifically written by annotators.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional Task-specific Data", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "AMR Silver Data. In order to generate additional data for AMR, we sample two sentence collections of size 200K and 2M from the Gigaword 5 corpus and use a state-of-the-art AMR parser (Cai and Lam, 2020a) to parse them into AMR graphs. 6 For supervised pretraining, we condition a model on the AMR silver graphs to generate the corresponding sentences before fine-tuning it on gold AMR graphs. For self-supervised pretraining, we only use the sentences. 7 Semantic Scholar AI Data. We collect titles and abstracts of around 190K scientific papers from the Semantic Scholar (Ammar et al., 2018) taken from the proceedings of 36 top Computer Science/AI conferences. We construct KGs from the paper abstracts employing DyGIE++ (Wadden et al., 2019) , an information extraction system for scientific texts. Note that the AGENDA dataset was constructed using the older SciIE system (Luan et al., 2018) , which also extracts KGs from AI scientific papers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 183, |
|
"end": 203, |
|
"text": "(Cai and Lam, 2020a)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 235, |
|
"end": 236, |
|
"text": "6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 572, |
|
"end": 592, |
|
"text": "(Ammar et al., 2018)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 723, |
|
"end": 744, |
|
"text": "(Wadden et al., 2019)", |
|
"ref_id": "BIBREF54" |
|
}, |
|
{ |
|
"start": 876, |
|
"end": 895, |
|
"text": "(Luan et al., 2018)", |
|
"ref_id": "BIBREF31" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional Task-specific Data", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "A second difference is that in our new dataset, the domain is broader as we collected data from 36 conferences compared to 12 from AGENDA. Furthermore, to prevent data leakage, all AGENDA samples used for performance evaluation are removed from our dataset. We will call the new dataset KGAIA (KGs from AI Abstracts). 8 Table 11 in Appendix shows relevant dataset statistics.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 320, |
|
"end": 328, |
|
"text": "Table 11", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We modify the BART and T5 implementations released by Hugging Face (Wolf et al., 2019) in order to adapt them to graph-to-text generation. For the KG datasets, we add the H , R , and T tokens to the models' vocabulary. We add all edge labels seen in the training set to the vocabulary of the with an initial learning rate of 3 \u2022 10 \u22125 . We employ a linearly decreasing learning rate schedule without warm-up. The batch and beam search sizes are chosen from {2,4,8} and {1,3,5}, respectively, based on the respective development set. Dev BLEU is used for model selection. Following previous works, we evaluate the results with BLEU (Papineni et al., 2002) , ME-TEOR (Denkowski and Lavie, 2014) , and chrF++ (Popovi\u0107, 2015) metrics. We also use Mover-Score (Zhao et al., 2019) , BERTScore , and BLEURT (Sellam et al., 2020) metrics, as they employ contextual and semantic knowledge and thus depend less on the surface symbols. Additionally, we perform a human evaluation (cf. \u00a75.4) quantifying the fluency, semantic adequacy and meaning similarity of the generated texts. Table 1 shows our results for the setting without additional pretraining, with additional self-supervised task-adaptive pretraining solely using the collected Gigaword sentences (LMA), and with additional supervised task adaptation (STA), before fine-tuning. We also report several recent results on the AMR test set. Mager et al. (2020) and Harkous et al. (2020) employ GPT-2 in their approaches. Note that GPT-2 only consists of a Transformer-based decoder.", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 86, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": "BIBREF57" |
|
}, |
|
{ |
|
"start": 631, |
|
"end": 654, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF36" |
|
}, |
|
{ |
|
"start": 665, |
|
"end": 692, |
|
"text": "(Denkowski and Lavie, 2014)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 706, |
|
"end": 721, |
|
"text": "(Popovi\u0107, 2015)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 755, |
|
"end": 774, |
|
"text": "(Zhao et al., 2019)", |
|
"ref_id": "BIBREF64" |
|
}, |
|
{ |
|
"start": 800, |
|
"end": 821, |
|
"text": "(Sellam et al., 2020)", |
|
"ref_id": "BIBREF48" |
|
}, |
|
{ |
|
"start": 1388, |
|
"end": 1407, |
|
"text": "Mager et al. (2020)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 1412, |
|
"end": 1433, |
|
"text": "Harkous et al. (2020)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1070, |
|
"end": 1077, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Only considering approaches without task adaptation, BART large already achieves a considerable improvement of 5.77 BLEU and 3.98 METEOR scores over the previous state of the art. With a BLEU score of 45.80, T5 large performs best. The other metrics follow similar trends. See Table 13 in Appendix for evaluation with more metrics. The strong performance of both BART and T5 in the AMR dataset suggests that PLMs can infer the AMR structure by a simple linear sequence of the graph, in contrast to GNN-based models that explicitly consider the graph structure using messagepassing between adjacent nodes (Beck et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 604, |
|
"end": 623, |
|
"text": "(Beck et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 277, |
|
"end": 285, |
|
"text": "Table 13", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results on AMR-to-Text", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Task-specific Adaptation. LMA already brings some gains with T5 benefitting more than BART in most metrics. It still helps less than STA even though we only have automatically generated annotations. This suggests that the performance increases with STA do not only come from additional exposure to task-specific target texts and that the models learn how to handle graphs and the graphtext correspondence even with automatically generated AMRs. After STA, T5 achieves 49.72 BLEU points, the new state of the art for AMR-to-text generation. Interestingly, gains from STA with 2M over 200K are larger in BART than in T5, suggesting that large amounts of silver data may not be required for a good performance with T5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results on AMR-to-Text", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "In general, models pretrained on the STA setup converge faster than without task-specific adaptation. For example, T5 large without additional pretraining converges after 5 epochs of fine-tuning whereas T5 large with STA already converges after 2 epochs. hand, fully end-to-end models (Ribeiro et al., 2020; Schmitt et al., 2020) have strong performance on the seen dataset and usually perform poorly in unseen data. Models that explicitly encode the graph structure (Ribeiro et al., 2020; Zhao et al., 2020a) achieve the best performance among approaches that do not employ PLMs. Note that T5 is also used in Kale (2020) . Differences in our T5 setup include a modified model vocabulary, the use of beam search, the learning rate schedule and the prefix before the input graph. Our T5 approach achieves 59.70, 65.05 and 54.69 BLEU points on all, seen and unseen sets, the new state of the art. We conjecture that the performance gap between seen and unseen sets stems from the advantage obtained by a model seeing examples of relation-text pairs during fine-tuning. For example, the relation party (political party) was never seen during training and the model is required to generate a text that verbalizes the tuple: Abdul Taib Mahmud, party, Parti Bumiputera Sarawak . Interestingly, BART performs much worse than T5 on this benchmark, especially in the unseen partition with 9.7 BLEU points lower compared to T5.", |
|
"cite_spans": [ |
|
{ |
|
"start": 285, |
|
"end": 307, |
|
"text": "(Ribeiro et al., 2020;", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 308, |
|
"end": 329, |
|
"text": "Schmitt et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 610, |
|
"end": 621, |
|
"text": "Kale (2020)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results on AMR-to-Text", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "For lack of a suitable data source (cf. \u00a74), we did not explore our LMA or STA approaches for WebNLG. However, we additionally discuss crossdomain STA in Appendix B. dataset. We believe that their capacity to generate fluent text helps when generating paper abstracts, even though they were not pretrained in the scientific domain. BART large shows an impressive performance with a BLEU score of 23.65, which is 5.6 points higher than the previous state of the art.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results on WebNLG", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Task-specific Adaptation. On AGENDA, BART benefits more from our task-adaptive pretraining, achieving the new state of the art of 25.66 BLEU points, a further gain of 2 BLEU points compared to its performance without task adaptation. The improvements from task-adaptive pretraining are not as large as for AMR. We hypothesize that this is due to the fact that the graphs do not completely cover the target text (Koncel-Kedziorski et al., 2019) , making this dataset more challenging. See Table 12 in Appendix for more automatic metrics.", |
|
"cite_spans": [ |
|
{ |
|
"start": 411, |
|
"end": 443, |
|
"text": "(Koncel-Kedziorski et al., 2019)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 488, |
|
"end": 496, |
|
"text": "Table 12", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results on AGENDA", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "To further assess the quality of the generated text, we conduct a human evaluation on AMR and WebNLG via crowd sourcing on Amazon Mechanical Turk. 9 Following previous works (Gardent et al., 2017; Castro Ferreira et al., 2019) , we assess three quality criteria: (i) Fluency (i.e., does the text flow in a natural, easy-to-read manner?), for AMR and WebNLG; (ii) Meaning Similarity (i.e., how Arrabbiata sauce can be found in Italy where Sergio Mattarella is the leader and the capital city is Rome. Italians are the people who live there and the language spoken is Italian.", |
|
"cite_spans": [ |
|
{ |
|
"start": 174, |
|
"end": 196, |
|
"text": "(Gardent et al., 2017;", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 226, |
|
"text": "Castro Ferreira et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Italians live in Italy where the capital is Rome and the language is Italian. Sergio Mattarella is the leader of the country and arrabbiata sauce can be found there.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "Reference: Arrabbiata sauce is from Italy where the capital is Rome, Italian is the language spoken and Sergio Mattarella is a leader.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "T5", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Figure 2: Example graph with 5 triples, from WebNLG dev linearized with the neutral separator tag, denoted \u2022, (top left), its shuffled version (top right), texts generated with two fine-tuned versions of T5 small and a gold reference (bottom). Note that T5 can produce a reasonable text even when the input triples are shuffled randomly. close in meaning is the generated text to the reference sentence?) for AMR; (ii) Semantic Adequacy (i.e., does the text clearly express the data?) for WebNLG. We randomly select 100 generated texts of each model, which the annotators then rate on a 1-7 Likert scale. For each text, we collect scores from 3 annotators and average them. 10 Table 4 shows the results. Our approaches improve the fluency, meaning similarity, and semantic adequacy on both datasets compared to other stateof-the-art approaches with statistically significant margins (p<0.05). Interestingly, the highest fluency improvement (+0.97) is on AMR, where our approach also has the largest BLEU improvement (+8.10) over Harkous et al. (2020) . Finally, our models score higher than the references in fluency with statistically significant margins, highlighting their strong language generation abilities. 11", |
|
"cite_spans": [ |
|
{ |
|
"start": 1029, |
|
"end": 1050, |
|
"text": "Harkous et al. (2020)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 677, |
|
"end": 684, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "order shuf", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In Figure 3 , we investigate the PLMs' performance, measured with BLEU score, while varying (from 1% to 100%) the amount of training data used for 10 Inter-annotator agreement for the three criteria ranged from 0.40 to 0.79, with an average Krippendorff's \u03b1 of 0.56.", |
|
"cite_spans": [ |
|
{ |
|
"start": 147, |
|
"end": 149, |
|
"text": "10", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 11, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Limiting the Training Data", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "11 Examples of fluent generations can be found in the Ta fine-tuning. We find that, when fine-tuned with only 40% of the data, both BART and T5 already greatly improve the performance compared to using the entire training data in all three benchmarks. For example, BART fine-tuned on 40% of AMR training data achieves 91% of the BLEU score when fine-tuned on full data. Note that in a low-resource scenario in AMR and WebNLG, T5 considerably outperforms BART. In particular, with only 1% of training examples, the difference between T5 and BART is 7.51 and 5.64 BLEU points for AMR and WebNLG, respectively. This suggests that T5 is more data efficient when adapting to the new task, likewise our findings in AMR-STA (cf. \u00a75.1).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 54, |
|
"end": 56, |
|
"text": "Ta", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Limiting the Training Data", |
|
"sec_num": "5.5" |
|
}, |
|
{ |
|
"text": "We conduct further experiments to examine how much the PLMs consider the graph structure. To this end, we remove parentheses in AMRs and replace H , R , and T tokens with neutral separator tokens, denoted \u2022, for KGs, such that the graph structure is only defined by the order of node and edge labels. If we shuffle such a sequence, the graph structure is thus completely obscured and the input effectively becomes a bag of node and edge labels. See Figure 2 for an example of both a correctly ordered and a shuffled triple sequence. Table 5 shows the effect on T5's performance when its input contains correctly ordered triples (T5 order )", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 449, |
|
"end": 457, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 533, |
|
"end": 540, |
|
"text": "Table 5", |
|
"ref_id": "TABREF10" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Influence of the Graph Structure", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "T/F Input Fact T5 order T5 shuf (1) S \u2022 German language \u2022 Antwerp \u2022 Antwerp \u2022 Antwerp International Air- port \u2022 Belgium \u2022 Belgium \u2022 Charles Michel \u2022 city Served \u2022 leader Name \u2022 Belgium \u2022 language \u2022 country", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative Analysis", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Antwerp International Airport serves the city of Antwerp. German is the language spoken in Belgium where Charles Michel is the leader.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative Analysis", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Antwerp International Airport serves the city of Antwerp in Belgium where the German language is spoken and Charles Michel is the leader.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative Analysis", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "(", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative Analysis", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "2) T \u2022 California \u2022 is Part Of \u2022 US \u2022 California \u2022 capital \u2022 Sacramento", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative Analysis", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "California is part of the United States and its capital is Sacramento.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative Analysis", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "California is part of the United States and its capital is Sacramento. vs. shuffled ones (T5 shuf ) for both training and evaluation. We first observe that T5 order only has marginally lower performance (around 2-4%) with the neutral separators than with the H / R / T tags or parentheses. 12 We see that as evidence that the graph structure is similarly well captured by T5 order . Without the graph structure (T5 shuf ), AMR-to-text performance drops significantly. Possible explanations of this drop are: (i) the relative ordering of the AMR graph is known to correlate with the target sentence order (Konstas et al., 2017) ; (ii) in contrast to WebNLG that contains common knowledge, the AMR dataset contains very specific sentences with higher surprisal; 13 (iii) AMRs are much more complex graph structures than the KGs from WebNLG and AGENDA. 14 On the other hand, KG-to-text performance is not much lower, indicating that most of the PLMs' success in this task stems from their language modeling rather than their graph encoding capabilities. We hypothesize that a PLM can match the entities in a shuffled input with sentences mentioning these entities from the pretraining or fine-tuning phase. It has recently been argued that large PLMs can recall certain common knowledge facts from pretraining (Petroni et al., 2019; Bosselut et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 290, |
|
"end": 292, |
|
"text": "12", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 604, |
|
"end": 626, |
|
"text": "(Konstas et al., 2017)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 1307, |
|
"end": 1329, |
|
"text": "(Petroni et al., 2019;", |
|
"ref_id": "BIBREF37" |
|
}, |
|
{ |
|
"start": 1330, |
|
"end": 1352, |
|
"text": "Bosselut et al., 2019)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative Analysis", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "(3) F \u2022 US \u2022 is Part Of \u2022 California \u2022 California \u2022 capital \u2022 Sacramento California'", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Quantitative Analysis", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "The example in Figure 2 confirms our impression. T5 shuf produces a text with the same content as", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 15, |
|
"end": 23, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Qualitative Analysis", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "12 See a more fine-grained comparison in Appendix C. 13 Perplexities estimated on the dev sets of AMR and WebNLG datasets, with GPT-2 fine-tuned on the corresponding training set, are 20.9 and 7.8, respectively.", |
|
"cite_spans": [ |
|
{ |
|
"start": 53, |
|
"end": 55, |
|
"text": "13", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative Analysis", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "14 In Appendix D, we present the graph properties of the datasets and discuss the differences. T5 order but does not need the correct triple structure to do so. Example (1) in Table 6 shows the output of both models with shuffled input. Interestingly, even T5 order produces a reasonable and truthful text. This suggests that previously seen facts serve as a strong guide during text generation, even for models that were fine-tuned with a clearly marked graph structure, suggesting that T5 order also relies more on language modeling than the graph structure. It does have more difficulties covering the whole input graph though. The fact that Antwerp is located in Belgium is missing from its output.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 176, |
|
"end": 183, |
|
"text": "Table 6", |
|
"ref_id": "TABREF12" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Qualitative Analysis", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "To further test our hypothesis that PLMs make use of previously seen facts during KG-to-text generation, we generate example true facts, corrupt them in a controlled setting, and feed them to both T5 order and T5 shuf to observe their output (examples (2)-(5) in Table 6 ). The model trained on correctly ordered input has learned a bit more to rely on the input graph structure. The false fact in example (3) with two triples is reliably transferred to the text by T5 order but not by T5 shuf , which silently corrects it. Also note that, in example (5), both models refuse to generate an incorrect fact. More examples can be found in Table 14 in the Appendix.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 263, |
|
"end": 270, |
|
"text": "Table 6", |
|
"ref_id": "TABREF12" |
|
}, |
|
{ |
|
"start": 636, |
|
"end": 644, |
|
"text": "Table 14", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Qualitative Analysis", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Our qualitative analysis illustrates that state-ofthe-art PLMs, despite their fluency capacities (cf. \u00a75.4), bear the risk of parroting back training sentences while ignoring the input structure. This issue can limit the practical usage of those models as, in many cases, it is important for a generation model to stay true to its input (Wiseman et al., 2017; Falke et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 337, |
|
"end": 359, |
|
"text": "(Wiseman et al., 2017;", |
|
"ref_id": "BIBREF56" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 379, |
|
"text": "Falke et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative Analysis", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We investigated two pretrained language models (PLMs) for graph-to-text generation and show that the pretraining strategies, language model adaptation (LMA) and supervised task adaptation (STA), can lead to notable improvements. Our approaches outperform the state of the art by a substantial margin on three graph-to-text benchmarks. Moreover, in a human evaluation our generated texts are perceived significantly more fluent than human references. Examining the influence of the graph structure on the text generation process, we find that PLMs may not always follow the graph structure and instead use memorized facts to guide the generation. A promising direction for future work is to explore ways of injecting a stronger graphstructural bias into PLMs, thus possibly leveraging their strong language modeling capabilities and keeping the output faithful to the input graph.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "In this supplementary material, we provide: (i) additional information about the data used in the experiments, and (ii) results that we could not fit into the main body of the paper.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Appendices", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We test three variants for the representation of the input AMR graph. Following previous work (Konstas et al., 2017; Mager et al., 2020) , we evaluate (i) only node representation, where the edge information is removed from the linearization; (ii) depth-first search (DFS) through the graph and the (iii) PENMAN representation. An example for each representation is illustrated below: In this experiment we employ T5 small . ", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 116, |
|
"text": "(Konstas et al., 2017;", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 117, |
|
"end": 136, |
|
"text": "Mager et al., 2020)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "A AMR Input Representation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For a given task, it is not always possible to collect closely related data -as we saw, e.g., for WebNLG. We therefore report STA in a cross-domain setting for the different KG-to-text benchmarks. Table 8 shows the results using BART base and T5 base . While the texts in KGAIA and AGENDA share the domain of scientific abstracts, texts in WebNLG are more general. Also note that WebNLG graphs do not share any relations with the other KGs. For BART base , STA increases the performance in the cross-domain setting in most of the cases. For T5 base , STA in KGAIA improves the performance on WebNLG.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 197, |
|
"end": 204, |
|
"text": "Table 8", |
|
"ref_id": "TABREF16" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "B Cross-domain Adaptation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In general, we find that exploring additional adaptive pretraining for graph-to-text generation can improve the performance even if the data do not come from the same domain.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Cross-domain Adaptation", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Fine C Input Graph Size Figure 4 visualizes T5 small 's performance with respect to the number of input graph triples in WebNLG dataset. We observe that T5 order and T5 shuf perform similarly for inputs with only one triple but that the gap between the models increases with larger graphs. While it is obviously more difficult to reconstruct a larger graph than a smaller one, this also suggests that the graph structure is more taken into account for graphs with more than 2 triples. For the unseen setting, the performance gap for these graphs is even larger, suggesting that the PLM can make more use of the graph structure when it has to. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 32, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "STA on", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In Table 9 , we present the graph properties of the three datasets. All statistics are calculated using Austin is the capital of Texas where Andrews County Airport is located.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 9", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "D Graph Statistics", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Austin is the capital of Texas where Andrews County Airport is located.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "D Graph Statistics", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "(8) F \u2022 Austin \u2022 capital \u2022 Texas \u2022 Andrews County Airport \u2022 location \u2022 Texas", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "D Graph Statistics", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The capital of Austin is Texas and Andrews County Airport is located in Texas.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "D Graph Statistics", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Andrews County Airport is located in Texas where Austin is the capital. Table 14 : Example generations from shuffled (S), true (T), and corrupted (F) triple facts by T5 small , fine-tuned on correctly ordered triples (order) and randomly shuffled input (shuf ).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 72, |
|
"end": 80, |
|
"text": "Table 14", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "D Graph Statistics", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Reference I had to deal with verbal abuse from my dad for a long 8 years before I came to uni and honestly, the only reason why I'm here is because it was the only way out. T5 I had to deal with 8 years of verbal abuse from my dad before coming to university and honestly the only reason I'm here is because it's the only way out. BART I had to deal with my dad's verbal abuse for 8 years long before coming to uni and honestly the only reason I'm here is because it's the only way out. Mager et al. (2020) i've had to deal with verbal abuse from my dad for 8 years (before i came to uni i was honestly the only reason i was here) and it's only because of the way it is.", |
|
"cite_spans": [ |
|
{ |
|
"start": 487, |
|
"end": 506, |
|
"text": "Mager et al. (2020)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "AMR", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Aaron Turner is an electric guitar player who has played with the black metal band Twilight and with Old Man Gloom. Death metal is a musical fusion of black metal.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reference", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Aaron Turner plays the electric guitar and is associated with the band Twilight. He is also a member of the Old Man Gloom band. Black metal and death metal are both genres of music.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "T5", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The black metal genre is a variation of the death metal genre. It is also where the band, Twilight, and the alternative rock band, Old Man Gloom, are from. One of the members of the band is Aaron Turner, who plays the electric guitar. Harkous et al. (2020) Aaron Turner, a.k.a. Black Metal, is a member of the Twilight (band) and Old Man Gloom bands. He also plays electric guitar and has a strong connection with the Death Metal genre. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 235, |
|
"end": 256, |
|
"text": "Harkous et al. (2020)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 310, |
|
"end": 325, |
|
"text": "Twilight (band)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "BART", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Our code is available at https://github.com/UKPLab/plms-graph2text.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The model architecture does not explicitly encode the graph structure, i.e., which entities are connected to each other, but has to retrieve it from a sequence that tries to encode this information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Please, refer to andRaffel et al. (2019) for details about the self-supervised pretraining strategies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Details of the preprocessing procedure of AMRs are provided in Appendix A.5 https://catalog.ldc.upenn.edu/LDC2003T056 We filter out sentences that do not yield well-formed AMR graphs.7 Gigaword and AMR datasets share similar data sources.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We will release the collected additional task-specific data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We exclude AGENDA because its texts are scientific in nature and annotators are not necessarily AI experts.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We thank our anonymous reviewers for their thoughtful feedback. Leonardo F. R. Ribeiro is supported by the German Research Foundation (DFG) as part of the Research Training Group \"Adaptive Preparation of Information form Heterogeneous Sources\" (AIPHES, GRK 1994/1) and as part of the DFG funded project UKP-SQuARE with the number GU 798/29-1. Martin Schmitt is supported by the BMBF as part of the project MLWin (01IS18050) and by the German Academic Scholarship Foundation (Studienstiftung des deutschen Volkes).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "WebNLG AGENDA min, avg and max number of nodes 2 28.6 335 2 6.8 15 2 10.5 80 min, avg and max node degrees 1 2.2 21 1 1.7 7 1 1.67 15 min, avg and max number of edges 1 32.3 554 1 5.9 14 1 8.8 124 min, avg and max graph diameter 1 12.2 40 1 4.1 10 1 3.1 20 min, avg and max shortest path length 0 7.49 40 0 2.4 10 0 2.3 20 Table 9 : Graph statistics of AMR, WebNLG and AGENDA datasets. The values are calculated using the training data. Note that AMR graphs contain a more complex structure than WebNLG and AGENDA graphs.the Levi transformation (Beck et al., 2018) ", |
|
"cite_spans": [ |
|
{ |
|
"start": 545, |
|
"end": 564, |
|
"text": "(Beck et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 323, |
|
"end": 330, |
|
"text": "Table 9", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "AMR", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Reference in this paper, a new array signal processing technique by using particle swarm optimization is proposed to identify multipath channel parameters. the proposed array signal processing technique provides estimates to the channel parameters by finding a global minimum of an optimization problem. since the optimization problem is formulated in the cross-ambiguity function (caf) domain of the transmitted signal and the received array outputs, the proposed array signal processing technique is called as particle swarm optimization. the performance of the particle swarm optimization is compared with the space alternating generalized expectation maximization technique and with another recently proposed pso based technique for various snr values. simulation results indicate the superior performance of the pso based technique over mentioned techniques for all snr values.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Examples", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "this paper, we propose an array signal processing technique based on particle swarm optimization to estimate the multipath channel parameters from the received and the transmitted signal in a cross-ambiguity domain. the proposed array signal processing technique is formulated as an optimization problem and particle swarm optimization is compared with the generalized expectation maximization technique. simulation results show that the proposed particle swarm optimization outperforms the pso based technique for a wide range of snr values. in addition, the proposed particle swarm optimization is computationally more efficient than the gem based technique for small snr values.BART in this paper, a new array signal processing technique based on particle swarm optimization is proposed. the proposed array signal processing technique is used to estimate the multipath channel parameters from the transmitted signal. the proposed array signal processing technique is formulated as an optimization problem in the cross-ambiguity domain. the particle swarm optimization is compared with the generalized expectation maximization technique. simulation results show that the proposed particle swarm optimization outperforms the pso based technique for all snr values. furthermore, the proposed particle swarm optimization is able to estimate the channel parameters more accurately than the generalized expectation maximization technique.Ribeiro et al. (2020) in this paper, a novel array signal processing technique based on particle swarm optimization is proposed to estimate the multipath channel parameters from the transmitted signal. the proposed array signal processing technique uses particle swarm optimization to estimate the multipath channel parameters. the proposed array signal processing technique is formulated as an optimization problem. simulation results show that the proposed array signal processing technique outperforms the conventional generalized expectation maximization technique and the pso based technique is robust to the snr values. Table 16 : Examples of text generated by the different models trained on the AGENDA dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 2061, |
|
"end": 2069, |
|
"text": "Table 16", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "T5", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Construction of the literature graph in semantic scholar", |
|
"authors": [ |
|
{ |
|
"first": "Waleed", |
|
"middle": [], |
|
"last": "Ammar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Groeneveld", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chandra", |
|
"middle": [], |
|
"last": "Bhagavatula", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miles", |
|
"middle": [], |
|
"last": "Crawford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doug", |
|
"middle": [], |
|
"last": "Downey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [], |
|
"last": "Dunkelberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Elgohary", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sergey", |
|
"middle": [], |
|
"last": "Feldman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vu", |
|
"middle": [], |
|
"last": "Ha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rodney", |
|
"middle": [], |
|
"last": "Kinney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Kohlmeier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tyler", |
|
"middle": [], |
|
"last": "Murray", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hsu-Han", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Ooi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joanna", |
|
"middle": [], |
|
"last": "Peters", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Power", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucy", |
|
"middle": [], |
|
"last": "Skjonsberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zheng", |
|
"middle": [], |
|
"last": "Wilhelm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Madeleine", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Van Zuylen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "3", |
|
"issue": "", |
|
"pages": "84--91", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N18-3011" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Waleed Ammar, Dirk Groeneveld, Chandra Bhagavat- ula, Iz Beltagy, Miles Crawford, Doug Downey, Ja- son Dunkelberger, Ahmed Elgohary, Sergey Feld- man, Vu Ha, Rodney Kinney, Sebastian Kohlmeier, Kyle Lo, Tyler Murray, Hsu-Han Ooi, Matthew Pe- ters, Joanna Power, Sam Skjonsberg, Lucy Wang, Chris Wilhelm, Zheng Yuan, Madeleine van Zuylen, and Oren Etzioni. 2018. Construction of the litera- ture graph in semantic scholar. In Proceedings of the 2018 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, Volume 3 (Industry Papers), pages 84-91, New Orleans -Louisiana. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Repulsive bayesian sampling for diversified attention modeling", |
|
"authors": [], |
|
"year": 2019, |
|
"venue": "4th workshop on Bayesian Deep Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bang An. 2019. Repulsive bayesian sampling for di- versified attention modeling. In 4th workshop on Bayesian Deep Learning (NeurIPS 2019).", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Dbpedia: A nucleus for a web of open data", |
|
"authors": [ |
|
{ |
|
"first": "S\u00f6ren", |
|
"middle": [], |
|
"last": "Auer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [], |
|
"last": "Bizer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgi", |
|
"middle": [], |
|
"last": "Kobilarov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jens", |
|
"middle": [], |
|
"last": "Lehmann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Cyganiak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Ives", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2007, |
|
"venue": "Proceedings of the 6th International The Semantic Web and 2nd Asian Conference on Asian Semantic Web Conference, ISWC'07/ASWC'07", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "722--735", |
|
"other_ids": { |
|
"DOI": [ |
|
"https://link.springer.com/chapter/10.1007/978-3-540-76298-0_52" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "S\u00f6ren Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary Ives. 2007. Dbpedia: A nucleus for a web of open data. In Proceedings of the 6th International The Seman- tic Web and 2nd Asian Conference on Asian Se- mantic Web Conference, ISWC'07/ASWC'07, page 722-735, Berlin, Heidelberg. Springer-Verlag.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Semantic representation for dialogue modeling", |
|
"authors": [ |
|
{ |
|
"first": "Xuefeng", |
|
"middle": [], |
|
"last": "Bai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yulong", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linfeng", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4430--4445", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.acl-long.342" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xuefeng Bai, Yulong Chen, Linfeng Song, and Yue Zhang. 2021. Semantic representation for dialogue modeling. In Proceedings of the 59th Annual Meet- ing of the Association for Computational Linguistics and the 11th International Joint Conference on Nat- ural Language Processing (Volume 1: Long Papers), pages 4430-4445, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Abstract Meaning Representation for sembanking", |
|
"authors": [ |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Banarescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Bonial", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shu", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Madalina", |
|
"middle": [], |
|
"last": "Georgescu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kira", |
|
"middle": [], |
|
"last": "Griffitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulf", |
|
"middle": [], |
|
"last": "Hermjakob", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Knight", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Koehn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathan", |
|
"middle": [], |
|
"last": "Schneider", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 7th Linguistic Annotation Workshop and Interoperability with Discourse", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "178--186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laura Banarescu, Claire Bonial, Shu Cai, Madalina Georgescu, Kira Griffitt, Ulf Hermjakob, Kevin Knight, Philipp Koehn, Martha Palmer, and Nathan Schneider. 2013. Abstract Meaning Representation for sembanking. In Proceedings of the 7th Linguis- tic Annotation Workshop and Interoperability with Discourse, pages 178-186, Sofia, Bulgaria. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Graph-to-sequence learning using gated graph neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Beck", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gholamreza", |
|
"middle": [], |
|
"last": "Haffari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "273--283", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Beck, Gholamreza Haffari, and Trevor Cohn. 2018. Graph-to-sequence learning using gated graph neural networks. In Proceedings of the 56th Annual Meeting of the Association for Com- putational Linguistics (Volume 1: Long Papers), pages 273-283, Melbourne, Australia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "The first surface realisation shared task: Overview and evaluation results", |
|
"authors": [ |
|
{ |
|
"first": "Anja", |
|
"middle": [], |
|
"last": "Belz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "White", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dominic", |
|
"middle": [], |
|
"last": "Espinosa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Kow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deirdre", |
|
"middle": [], |
|
"last": "Hogan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanda", |
|
"middle": [], |
|
"last": "Stent", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 13th European Workshop on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "217--226", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anja Belz, Michael White, Dominic Espinosa, Eric Kow, Deirdre Hogan, and Amanda Stent. 2011. The first surface realisation shared task: Overview and evaluation results. In Proceedings of the 13th European Workshop on Natural Language Genera- tion, pages 217-226, Nancy, France. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Dialogue-AMR: Abstract Meaning Representation for dialogue", |
|
"authors": [ |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Bonial", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucia", |
|
"middle": [], |
|
"last": "Donatelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mitchell", |
|
"middle": [], |
|
"last": "Abrams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephanie", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Lukin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Tratz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Marge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ron", |
|
"middle": [], |
|
"last": "Artstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Traum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clare", |
|
"middle": [], |
|
"last": "Voss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 12th Language Resources and Evaluation Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "684--695", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Claire Bonial, Lucia Donatelli, Mitchell Abrams, Stephanie M. Lukin, Stephen Tratz, Matthew Marge, Ron Artstein, David Traum, and Clare Voss. 2020. Dialogue-AMR: Abstract Meaning Representation for dialogue. In Proceedings of the 12th Lan- guage Resources and Evaluation Conference, pages 684-695, Marseille, France. European Language Re- sources Association.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "COMET: Commonsense transformers for automatic knowledge graph construction", |
|
"authors": [ |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bosselut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannah", |
|
"middle": [], |
|
"last": "Rashkin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maarten", |
|
"middle": [], |
|
"last": "Sap", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chaitanya", |
|
"middle": [], |
|
"last": "Malaviya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asli", |
|
"middle": [], |
|
"last": "Celikyilmaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4762--4779", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1470" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Antoine Bosselut, Hannah Rashkin, Maarten Sap, Chai- tanya Malaviya, Asli Celikyilmaz, and Yejin Choi. 2019. COMET: Commonsense transformers for au- tomatic knowledge graph construction. In Proceed- ings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4762-4779, Florence, Italy. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "AMR parsing via graph-sequence iterative inference", |
|
"authors": [ |
|
{ |
|
"first": "Deng", |
|
"middle": [], |
|
"last": "Cai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wai", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1290--1301", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.119" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deng Cai and Wai Lam. 2020a. AMR parsing via graph-sequence iterative inference. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1290-1301, On- line. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "The Thirty-Second Innovative Applications of Artificial Intelligence Conference", |
|
"authors": [], |
|
"year": 2020, |
|
"venue": "The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence", |
|
"volume": "2020", |
|
"issue": "", |
|
"pages": "7464--7471", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Deng Cai and Wai Lam. 2020b. Graph transformer for graph-to-sequence learning. In The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2020, New York, NY, USA, February 7-12, 2020, pages 7464-7471. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Neural data-to-text generation: A comparison between pipeline and end-to-end architectures", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Thiago Castro Ferreira", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Van Der Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emiel", |
|
"middle": [], |
|
"last": "Emiel Van Miltenburg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Krahmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "552--562", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1052" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thiago Castro Ferreira, Chris van der Lee, Emiel van Miltenburg, and Emiel Krahmer. 2019. Neu- ral data-to-text generation: A comparison between pipeline and end-to-end architectures. In Proceed- ings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Interna- tional Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP), pages 552-562, Hong Kong, China. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Conversational semantic parsing for dialog state tracking", |
|
"authors": [ |
|
{ |
|
"first": "Jianpeng", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Devang", |
|
"middle": [], |
|
"last": "Agrawal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shruti", |
|
"middle": [], |
|
"last": "H\u00e9ctor Mart\u00ednez Alonso", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joris", |
|
"middle": [], |
|
"last": "Bhargava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Federico", |
|
"middle": [], |
|
"last": "Driesen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dain", |
|
"middle": [], |
|
"last": "Flego", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitri", |
|
"middle": [], |
|
"last": "Kaplan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lin", |
|
"middle": [], |
|
"last": "Kartsaklis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhivya", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Piraviperumal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u00d3", |
|
"middle": [], |
|
"last": "Diarmuid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anders", |
|
"middle": [], |
|
"last": "S\u00e9aghdha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Johannsen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8107--8117", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.emnlp-main.651" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jianpeng Cheng, Devang Agrawal, H\u00e9ctor Mart\u00ednez Alonso, Shruti Bhargava, Joris Driesen, Federico Flego, Dain Kaplan, Dimitri Kartsaklis, Lin Li, Dhivya Piraviperumal, Jason D. Williams, Hong Yu, Diarmuid \u00d3 S\u00e9aghdha, and Anders Johannsen. 2020. Conversational semantic parsing for dialog state tracking. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 8107-8117, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Structural neural encoders for AMR-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "Marco", |
|
"middle": [], |
|
"last": "Damonte", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shay", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Cohen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3649--3658", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1366" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marco Damonte and Shay B. Cohen. 2019. Structural neural encoders for AMR-to-text generation. In Pro- ceedings of the 2019 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, Vol- ume 1 (Long and Short Papers), pages 3649-3658, Minneapolis, Minnesota. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Meteor universal: Language specific translation evaluation for any target language", |
|
"authors": [ |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Denkowski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Lavie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the Ninth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "376--380", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/W14-3348" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael Denkowski and Alon Lavie. 2014. Meteor uni- versal: Language specific translation evaluation for any target language. In Proceedings of the Ninth Workshop on Statistical Machine Translation, pages 376-380, Baltimore, Maryland, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Question generation for question answering", |
|
"authors": [ |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Duyu", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "866--874", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1090" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nan Duan, Duyu Tang, Peng Chen, and Ming Zhou. 2017. Question generation for question answering. In Proceedings of the 2017 Conference on Empiri- cal Methods in Natural Language Processing, pages 866-874, Copenhagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Ranking generated summaries by correctness: An interesting but challenging application for natural language inference", |
|
"authors": [ |
|
{ |
|
"first": "Tobias", |
|
"middle": [], |
|
"last": "Falke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leonardo", |
|
"middle": [ |
|
"F R" |
|
], |
|
"last": "Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Prasetya Ajie Utama", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2214--2220", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1213" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tobias Falke, Leonardo F. R. Ribeiro, Prasetya Ajie Utama, Ido Dagan, and Iryna Gurevych. 2019. Ranking generated summaries by correctness: An in- teresting but challenging application for natural lan- guage inference. In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 2214-2220, Florence, Italy. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "The WebNLG challenge: Generating text from RDF data", |
|
"authors": [ |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anastasia", |
|
"middle": [], |
|
"last": "Shimorina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shashi", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [], |
|
"last": "Perez-Beltrachini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 10th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "124--133", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-3518" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Claire Gardent, Anastasia Shimorina, Shashi Narayan, and Laura Perez-Beltrachini. 2017. The WebNLG challenge: Generating text from RDF data. In Pro- ceedings of the 10th International Conference on Natural Language Generation, pages 124-133, San- tiago de Compostela, Spain. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Survey of the state of the art in natural language generation: Core tasks, applications and evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Albert", |
|
"middle": [], |
|
"last": "Gatt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emiel", |
|
"middle": [], |
|
"last": "Krahmer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "61", |
|
"issue": "1", |
|
"pages": "65--170", |
|
"other_ids": { |
|
"DOI": [ |
|
"https://dl.acm.org/doi/10.5555/3241691.3241693" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Albert Gatt and Emiel Krahmer. 2018. Survey of the state of the art in natural language generation: Core tasks, applications and evaluation. Journal of Artifi- cial Intelligence Research, 61(1):65-170.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Densely connected graph convolutional networks for graph-to-sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "Zhijiang", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiyang", |
|
"middle": [], |
|
"last": "Teng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "7", |
|
"issue": "", |
|
"pages": "297--312", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00269" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhijiang Guo, Yan Zhang, Zhiyang Teng, and Wei Lu. 2019. Densely connected graph convolutional networks for graph-to-sequence learning. Transac- tions of the Association for Computational Linguis- tics, 7:297-312.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Don't stop pretraining: Adapt language models to domains and tasks", |
|
"authors": [ |
|
{ |
|
"first": "Ana", |
|
"middle": [], |
|
"last": "Suchin Gururangan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Swabha", |
|
"middle": [], |
|
"last": "Marasovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Swayamdipta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Doug", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Downey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "8342--8360", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.740" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Suchin Gururangan, Ana Marasovi\u0107, Swabha Swayamdipta, Kyle Lo, Iz Beltagy, Doug Downey, and Noah A. Smith. 2020. Don't stop pretraining: Adapt language models to domains and tasks. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8342-8360, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Have your text and use it too! end-to-end neural data-to-text generation with semantic fidelity", |
|
"authors": [ |
|
{ |
|
"first": "Hamza", |
|
"middle": [], |
|
"last": "Harkous", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabel", |
|
"middle": [], |
|
"last": "Groves", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Saffari", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2410--2424", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.coling-main.218" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hamza Harkous, Isabel Groves, and Amir Saffari. 2020. Have your text and use it too! end-to-end neural data-to-text generation with semantic fidelity. In Proceedings of the 28th International Conference on Computational Linguistics, pages 2410-2424, Barcelona, Spain (Online). International Committee on Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Promoting graph awareness in linearized graph-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "Alexander Miserlis", |
|
"middle": [], |
|
"last": "Hoyle", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ana", |
|
"middle": [], |
|
"last": "Marasovi\u0107", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Smith", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "944--956", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.findings-acl.82" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexander Miserlis Hoyle, Ana Marasovi\u0107, and Noah A. Smith. 2021. Promoting graph awareness in linearized graph-to-text generation. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pages 944-956, Online. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Text-to-text pre-training for data-totext tasks", |
|
"authors": [ |
|
{ |
|
"first": "Mihir", |
|
"middle": [], |
|
"last": "Kale", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihir Kale. 2020. Text-to-text pre-training for data-to- text tasks. arXiv e-prints.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "3rd International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In 3rd Inter- national Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Semi-Supervised Classification with Graph Convolutional Networks", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Kipf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Welling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 5th International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas N. Kipf and Max Welling. 2017. Semi- Supervised Classification with Graph Convolutional Networks. In Proceedings of the 5th International Conference on Learning Representations, ICLR 2017.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Text Generation from Knowledge Graphs with Graph Transformers", |
|
"authors": [ |
|
{ |
|
"first": "Rik", |
|
"middle": [], |
|
"last": "Koncel-Kedziorski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dhanush", |
|
"middle": [], |
|
"last": "Bekal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mirella", |
|
"middle": [], |
|
"last": "Lapata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2284--2293", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1238" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rik Koncel-Kedziorski, Dhanush Bekal, Yi Luan, Mirella Lapata, and Hannaneh Hajishirzi. 2019. Text Generation from Knowledge Graphs with Graph Transformers. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 2284-2293, Minneapolis, Minnesota. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Neural amr: Sequence-to-sequence models for parsing and generation", |
|
"authors": [ |
|
{ |
|
"first": "Ioannis", |
|
"middle": [], |
|
"last": "Konstas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Srinivasan", |
|
"middle": [], |
|
"last": "Iyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yejin", |
|
"middle": [], |
|
"last": "Choi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "146--157", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1014" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ioannis Konstas, Srinivasan Iyer, Mark Yatskar, Yejin Choi, and Luke Zettlemoyer. 2017. Neural amr: Sequence-to-sequence models for parsing and gener- ation. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Vol- ume 1: Long Papers), pages 146-157, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "BART: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal ; Abdelrahman Mohamed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7871--7880", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Mar- jan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre- training for natural language generation, translation, and comprehension. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Roberta: A robustly optimized bert pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2020. Roberta: A robustly optimized bert pretraining ap- proach. arXiv e-prints.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Multi-task identification of entities, relations, and coreference for scientific knowledge graph construction", |
|
"authors": [ |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mari", |
|
"middle": [], |
|
"last": "Ostendorf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3219--3232", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1360" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi Luan, Luheng He, Mari Ostendorf, and Hannaneh Hajishirzi. 2018. Multi-task identification of enti- ties, relations, and coreference for scientific knowl- edge graph construction. In Proceedings of the 2018 Conference on Empirical Methods in Natural Lan- guage Processing, pages 3219-3232, Brussels, Bel- gium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "GPT-too: A language-model-first approach for AMR-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "Manuel", |
|
"middle": [], |
|
"last": "Mager", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ram\u00f3n", |
|
"middle": [], |
|
"last": "Fernandez Astudillo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tahira", |
|
"middle": [], |
|
"last": "Naseem", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arafat", |
|
"middle": [], |
|
"last": "Md", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Young-Suk", |
|
"middle": [], |
|
"last": "Sultan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Radu", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Florian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1846--1852", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manuel Mager, Ram\u00f3n Fernandez Astudillo, Tahira Naseem, Md Arafat Sultan, Young-Suk Lee, Radu Florian, and Salim Roukos. 2020. GPT-too: A language-model-first approach for AMR-to-text gen- eration. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 1846-1852, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Deep graph convolutional encoders for structured data to text generation", |
|
"authors": [ |
|
{ |
|
"first": "Diego", |
|
"middle": [], |
|
"last": "Marcheggiani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Laura", |
|
"middle": [ |
|
"Perez" |
|
], |
|
"last": "Beltrachini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 11th International Conference on Natural Language Generation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diego Marcheggiani and Laura Perez Beltrachini. 2018. Deep graph convolutional encoders for structured data to text generation. In Proceedings of the 11th International Conference on Natural Language Gen- eration, pages 1-9, Tilburg University, The Nether- lands. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "OpenDialKG: Explainable conversational reasoning with attention-based walks over knowledge graphs", |
|
"authors": [ |
|
{ |
|
"first": "Seungwhan", |
|
"middle": [], |
|
"last": "Moon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pararth", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anuj", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajen", |
|
"middle": [], |
|
"last": "Subba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "845--854", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1081" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seungwhan Moon, Pararth Shah, Anuj Kumar, and Ra- jen Subba. 2019. OpenDialKG: Explainable conver- sational reasoning with attention-based walks over knowledge graphs. In Proceedings of the 57th An- nual Meeting of the Association for Computational Linguistics, pages 845-854, Florence, Italy. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Step-by-step: Separating planning from realization in neural data-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "Amit", |
|
"middle": [], |
|
"last": "Moryossef", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Goldberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "2267--2277", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1236" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amit Moryossef, Yoav Goldberg, and Ido Dagan. 2019. Step-by-step: Separating planning from realization in neural data-to-text generation. In Proceedings of the 2019 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 2267-2277, Minneapolis, Minnesota. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Bleu: A method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting on Association for Computational Linguistics, ACL '02", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "311--318", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/1073083.1073135" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: A method for automatic eval- uation of machine translation. In Proceedings of the 40th Annual Meeting on Association for Com- putational Linguistics, ACL '02, pages 311-318, Stroudsburg, PA, USA. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "Association for Computational Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Fabio", |
|
"middle": [], |
|
"last": "Petroni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rockt\u00e4schel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anton", |
|
"middle": [], |
|
"last": "Bakhtin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuxiang", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Miller", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2463--2473", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1250" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fabio Petroni, Tim Rockt\u00e4schel, Sebastian Riedel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, and Alexander Miller. 2019. Language models as knowl- edge bases? In Proceedings of the 2019 Confer- ence on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 2463-2473, Hong Kong, China. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "chrF: character n-gram F-score for automatic MT evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Maja", |
|
"middle": [], |
|
"last": "Popovi\u0107", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Tenth Workshop on Statistical Machine Translation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "392--395", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W15-3049" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Maja Popovi\u0107. 2015. chrF: character n-gram F-score for automatic MT evaluation. In Proceedings of the Tenth Workshop on Statistical Machine Translation, pages 392-395, Lisbon, Portugal. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Dart: Open-domain structured data record to text generation", |
|
"authors": [ |
|
{ |
|
"first": "Dragomir", |
|
"middle": [], |
|
"last": "Radev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amrit", |
|
"middle": [], |
|
"last": "Rau", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhinand", |
|
"middle": [], |
|
"last": "Sivaprasad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chiachun", |
|
"middle": [], |
|
"last": "Hsieh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nazneen", |
|
"middle": [], |
|
"last": "Fatema Rajani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiangru", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aadit", |
|
"middle": [], |
|
"last": "Vyas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Neha", |
|
"middle": [], |
|
"last": "Verma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Krishna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yangxiaokang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nadia", |
|
"middle": [], |
|
"last": "Irwanto", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jessica", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Faiaz", |
|
"middle": [], |
|
"last": "Rahman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmad", |
|
"middle": [], |
|
"last": "Zaidi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Murori", |
|
"middle": [], |
|
"last": "Mutuma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yasin", |
|
"middle": [], |
|
"last": "Tarabar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankit", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dragomir Radev, Rui Zhang, Amrit Rau, Abhinand Sivaprasad, Chiachun Hsieh, Nazneen Fatema Ra- jani, Xiangru Tang, Aadit Vyas, Neha Verma, Pranav Krishna, Yangxiaokang Liu, Nadia Irwanto, Jessica Pan, Faiaz Rahman, Ahmad Zaidi, Murori Mutuma, Yasin Tarabar, Ankit Gupta, Tao Yu, Yi Chern Tan, Xi Victoria Lin, Caiming Xiong, and Richard Socher. 2020. Dart: Open-domain struc- tured data record to text generation. arXiv e-prints.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Language models are unsupervised multitask learners", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rewon", |
|
"middle": [], |
|
"last": "Child", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dario", |
|
"middle": [], |
|
"last": "Amodei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. arXiv e-prints.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Exploring the limits of transfer learning with a unified text-to", |
|
"authors": [ |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Raffel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sharan", |
|
"middle": [], |
|
"last": "Narang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Matena", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanqi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2019. Exploring the limits of transfer learning with a unified text-to-text trans- former. arXiv e-prints.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "Enhancing AMR-to-text generation with dual graph representations", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Leonardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3183--3194", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1314" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leonardo F. R. Ribeiro, Claire Gardent, and Iryna Gurevych. 2019. Enhancing AMR-to-text genera- tion with dual graph representations. In Proceed- ings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Inter- national Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP), pages 3183-3194, Hong Kong, China. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Smelting gold and silver for improved multilingual amr-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Leonardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonas", |
|
"middle": [], |
|
"last": "Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Pfeiffer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "2021", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leonardo F. R. Ribeiro, Jonas Pfeiffer, Yue Zhang, and Iryna Gurevych. 2021a. Smelting gold and silver for improved multilingual amr-to-text generation. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, EMNLP 2021, Punta Cana, November 7-11, 2021.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Modeling global and local node contexts for text generation from knowledge graphs", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Leonardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Claire", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gardent", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "589--604", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00332" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leonardo F. R. Ribeiro, Yue Zhang, Claire Gardent, and Iryna Gurevych. 2020. Modeling global and local node contexts for text generation from knowl- edge graphs. Transactions of the Association for Computational Linguistics, 8:589-604.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "Structural adapters in pretrained language models for amr-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "F", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Leonardo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "2021", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leonardo F. R. Ribeiro, Yue Zhang, and Iryna Gurevych. 2021b. Structural adapters in pretrained language models for amr-to-text generation. In Pro- ceedings of the 2021 Conference on Empirical Meth- ods in Natural Language Processing, EMNLP 2021, Punta Cana, November 7-11, 2021.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Modeling graph structure via relative position for text generation from knowledge graphs", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Schmitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leonardo", |
|
"middle": [ |
|
"F R" |
|
], |
|
"last": "Ribeiro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philipp", |
|
"middle": [], |
|
"last": "Dufter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hinrich", |
|
"middle": [], |
|
"last": "Sch\u00fctze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the Fifteenth Workshop on Graph-Based Methods for Natural Language Processing (TextGraphs-15)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "10--21", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Schmitt, Leonardo F. R. Ribeiro, Philipp Dufter, Iryna Gurevych, and Hinrich Sch\u00fctze. 2021. Mod- eling graph structure via relative position for text generation from knowledge graphs. In Proceedings of the Fifteenth Workshop on Graph-Based Methods for Natural Language Processing (TextGraphs-15), pages 10-21, Mexico City, Mexico. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "Philipp Dufter, Iryna Gurevych, and Hinrich Sch\u00fctze. 2020. Modeling graph structure via relative position for better text generation from knowledge graphs", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Schmitt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leonardo", |
|
"middle": [ |
|
"F R" |
|
], |
|
"last": "Ribeiro", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Schmitt, Leonardo F. R. Ribeiro, Philipp Dufter, Iryna Gurevych, and Hinrich Sch\u00fctze. 2020. Mod- eling graph structure via relative position for better text generation from knowledge graphs. arXiv e- prints.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "BLEURT: Learning robust metrics for text generation", |
|
"authors": [ |
|
{ |
|
"first": "Thibault", |
|
"middle": [], |
|
"last": "Sellam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dipanjan", |
|
"middle": [], |
|
"last": "Das", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ankur", |
|
"middle": [], |
|
"last": "Parikh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7881--7892", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.704" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thibault Sellam, Dipanjan Das, and Ankur Parikh. 2020. BLEURT: Learning robust metrics for text generation. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 7881-7892, Online. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "A graph-to-sequence model for AMRto-text generation", |
|
"authors": [ |
|
{ |
|
"first": "Linfeng", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiguo", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gildea", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1616--1626", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Linfeng Song, Yue Zhang, Zhiguo Wang, and Daniel Gildea. 2018. A graph-to-sequence model for AMR- to-text generation. In Proceedings of the 56th An- nual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1616- 1626, Melbourne, Australia. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "The CoNLL 2008 shared task on joint parsing of syntactic and semantic dependencies", |
|
"authors": [ |
|
{ |
|
"first": "Mihai", |
|
"middle": [], |
|
"last": "Surdeanu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Johansson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Meyers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llu\u00eds", |
|
"middle": [], |
|
"last": "M\u00e0rquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joakim", |
|
"middle": [], |
|
"last": "Nivre", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "CoNLL 2008: Proceedings of the Twelfth Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "159--177", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihai Surdeanu, Richard Johansson, Adam Meyers, Llu\u00eds M\u00e0rquez, and Joakim Nivre. 2008. The CoNLL 2008 shared task on joint parsing of syn- tactic and semantic dependencies. In CoNLL 2008: Proceedings of the Twelfth Conference on Computa- tional Natural Language Learning, pages 159-177, Manchester, England. Coling 2008 Organizing Com- mittee.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "GTR-LSTM: A triple encoder for sentence generation from RDF data", |
|
"authors": [ |
|
{ |
|
"first": "Jianzhong", |
|
"middle": [], |
|
"last": "Bayu Distiawan Trisedya", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1627--1637", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1151" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bayu Distiawan Trisedya, Jianzhong Qi, Rui Zhang, and Wei Wang. 2018. GTR-LSTM: A triple encoder for sentence generation from RDF data. In Proceed- ings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), pages 1627-1637, Melbourne, Australia. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "Neural wikipedian: Generating textual summaries from knowledge base triples", |
|
"authors": [ |
|
{ |
|
"first": "Pavlos", |
|
"middle": [], |
|
"last": "Vougiouklis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hady", |
|
"middle": [], |
|
"last": "Elsahar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucie-Aim\u00e9e", |
|
"middle": [], |
|
"last": "Kaffee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christophe", |
|
"middle": [], |
|
"last": "Gravier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fr\u00e9d\u00e9rique", |
|
"middle": [], |
|
"last": "Laforest", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathon", |
|
"middle": [], |
|
"last": "Hare", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Simperl", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Journal of Web Semantics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "52--53", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1016/j.websem.2018.07.002" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pavlos Vougiouklis, Hady Elsahar, Lucie-Aim\u00e9e Kaffee, Christophe Gravier, Fr\u00e9d\u00e9rique Laforest, Jonathon Hare, and Elena Simperl. 2018. Neu- ral wikipedian: Generating textual summaries from knowledge base triples. Journal of Web Semantics, 52-53:1 -15.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "Entity, relation, and event extraction with contextualized span representations", |
|
"authors": [ |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Wadden", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulme", |
|
"middle": [], |
|
"last": "Wennberg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5784--5789", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1585" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David Wadden, Ulme Wennberg, Yi Luan, and Han- naneh Hajishirzi. 2019. Entity, relation, and event extraction with contextualized span representations. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 5784- 5789, Hong Kong, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "Amr-to-text generation with graph transformer", |
|
"authors": [ |
|
{ |
|
"first": "Tianming", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaojun", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanqi", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Transactions of the Association for Computational Linguistics", |
|
"volume": "8", |
|
"issue": "", |
|
"pages": "19--33", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1162/tacl_a_00297" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianming Wang, Xiaojun Wan, and Hanqi Jin. 2020. Amr-to-text generation with graph transformer. Transactions of the Association for Computational Linguistics, 8:19-33.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "Challenges in data-to-document generation", |
|
"authors": [ |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Wiseman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stuart", |
|
"middle": [], |
|
"last": "Shieber", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2253--2263", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1239" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sam Wiseman, Stuart Shieber, and Alexander Rush. 2017. Challenges in data-to-document generation. In Proceedings of the 2017 Conference on Empiri- cal Methods in Natural Language Processing, pages 2253-2263, Copenhagen, Denmark. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "Huggingface's transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R\u00e9mi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R\u00e9mi Louf, Morgan Funtow- icz, and Jamie Brew. 2019. Huggingface's trans- formers: State-of-the-art natural language process- ing.", |
|
"links": null |
|
}, |
|
"BIBREF58": { |
|
"ref_id": "b58", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Russ", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "5753--5763", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019a. Xlnet: Generalized autoregressive pretrain- ing for language understanding. In Advances in Neural Information Processing Systems, volume 32, pages 5753-5763. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF59": { |
|
"ref_id": "b59", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Russ", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "5753--5763", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019b. Xlnet: Generalized autoregressive pre- training for language understanding. In H. Wal- lach, H. Larochelle, A. Beygelzimer, F. d'Alch\u00e9-Buc, E. Fox, and R. Garnett, editors, Advances in Neu- ral Information Processing Systems 32, pages 5753- 5763. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF60": { |
|
"ref_id": "b60", |
|
"title": "Heterogeneous graph transformer for graphto-sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "Shaowei", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianming", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaojun", |
|
"middle": [], |
|
"last": "Wan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7145--7154", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.640" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shaowei Yao, Tianming Wang, and Xiaojun Wan. 2020. Heterogeneous graph transformer for graph- to-sequence learning. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 7145-7154, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF61": { |
|
"ref_id": "b61", |
|
"title": "CoSQL: A conversational text-to-SQL challenge towards crossdomain natural language interfaces to databases", |
|
"authors": [ |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Heyang", |
|
"middle": [], |
|
"last": "Er", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Suyi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bo", |
|
"middle": [], |
|
"last": "Pang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victoria", |
|
"middle": [], |
|
"last": "Xi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianze", |
|
"middle": [], |
|
"last": "Chern Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihan", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Youxuan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michihiro", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungrok", |
|
"middle": [], |
|
"last": "Yasunaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Shim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zifan", |
|
"middle": [], |
|
"last": "Fabbri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luyao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuwen", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shreya", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vincent", |
|
"middle": [], |
|
"last": "Dixit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1962--1979", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1204" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tao Yu, Rui Zhang, Heyang Er, Suyi Li, Eric Xue, Bo Pang, Xi Victoria Lin, Yi Chern Tan, Tianze Shi, Zihan Li, Youxuan Jiang, Michihiro Yasunaga, Sungrok Shim, Tao Chen, Alexander Fabbri, Zifan Li, Luyao Chen, Yuwen Zhang, Shreya Dixit, Vin- cent Zhang, Caiming Xiong, Richard Socher, Wal- ter Lasecki, and Dragomir Radev. 2019. CoSQL: A conversational text-to-SQL challenge towards cross- domain natural language interfaces to databases. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 1962- 1979, Hong Kong, China. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF62": { |
|
"ref_id": "b62", |
|
"title": "Bertscore: Evaluating text generation with bert", |
|
"authors": [ |
|
{ |
|
"first": "Tianyi", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Varsha", |
|
"middle": [], |
|
"last": "Kishore", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felix", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kilian", |
|
"middle": [ |
|
"Q" |
|
], |
|
"last": "Weinberger", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. Bertscore: Eval- uating text generation with bert. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF63": { |
|
"ref_id": "b63", |
|
"title": "Bridging the structural gap between encoding and decoding for data-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "Chao", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marilyn", |
|
"middle": [], |
|
"last": "Walker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Snigdha", |
|
"middle": [], |
|
"last": "Chaturvedi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2481--2491", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.224" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chao Zhao, Marilyn Walker, and Snigdha Chaturvedi. 2020a. Bridging the structural gap between encod- ing and decoding for data-to-text generation. In Pro- ceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 2481- 2491, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF64": { |
|
"ref_id": "b64", |
|
"title": "MoverScore: Text generation evaluating with contextualized embeddings and earth mover distance", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxime", |
|
"middle": [], |
|
"last": "Peyrard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fei", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christian", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Meyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steffen", |
|
"middle": [], |
|
"last": "Eger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "563--578", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1053" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Zhao, Maxime Peyrard, Fei Liu, Yang Gao, Chris- tian M. Meyer, and Steffen Eger. 2019. MoverScore: Text generation evaluating with contextualized em- beddings and earth mover distance. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th Interna- tional Joint Conference on Natural Language Pro- cessing (EMNLP-IJCNLP), pages 563-578, Hong Kong, China. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF65": { |
|
"ref_id": "b65", |
|
"title": "Line graph enhanced AMR-to-text generation with mix-order graph attention networks", |
|
"authors": [ |
|
{ |
|
"first": "Yanbin", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruisheng", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Su", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "732--741", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.67" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yanbin Zhao, Lu Chen, Zhi Chen, Ruisheng Cao, Su Zhu, and Kai Yu. 2020b. Line graph enhanced AMR-to-text generation with mix-order graph at- tention networks. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, pages 732-741, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF66": { |
|
"ref_id": "b66", |
|
"title": "Modeling graph structure in transformer for better AMR-to-text generation", |
|
"authors": [ |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junhui", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Muhua", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Longhua", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guodong", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5459--5468", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1548" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jie Zhu, Junhui Li, Muhua Zhu, Longhua Qian, Min Zhang, and Guodong Zhou. 2019. Modeling graph structure in transformer for better AMR-to-text gen- eration. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natu- ral Language Processing (EMNLP-IJCNLP), pages 5459-5468, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "Performance of BART base and T5 base in the dev set when experimenting with different amounts of training data." |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"num": null, |
|
"type_str": "figure", |
|
"text": "chrF++ scores with respect to the number of triples for WebNLG seen and unseen test sets." |
|
}, |
|
"TABREF1": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "" |
|
}, |
|
"TABREF2": { |
|
"content": "<table><tr><td/><td/><td>BLEU</td><td/><td/><td>METEOR</td><td/><td/><td>chrF++</td><td/></tr><tr><td>Model</td><td>A</td><td>S</td><td>U</td><td>A</td><td>S</td><td>U</td><td>A</td><td>S</td><td>U</td></tr><tr><td/><td/><td/><td/><td/><td/><td/><td>-</td><td>-</td><td>-</td></tr><tr><td>Schmitt et al. (2020)</td><td>-</td><td>59.39</td><td>-</td><td>-</td><td>42.83</td><td>-</td><td>-</td><td>74.68</td><td>-</td></tr><tr><td>Ribeiro et al. (2020)</td><td>-</td><td>63.69</td><td>-</td><td>-</td><td>44.47</td><td>-</td><td>-</td><td>76.66</td><td>-</td></tr><tr><td>Zhao et al. (2020a)</td><td colspan=\"6\">52.78 64.42 38.23 41.00 46.00 37.00</td><td>-</td><td>-</td><td>-</td></tr><tr><td>based on PLMs</td><td/><td/><td/><td/><td/><td/><td/><td/><td/></tr><tr><td>Harkous et al. (2020)</td><td>52.90</td><td>-</td><td>-</td><td>42.40</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Kale (2020)</td><td colspan=\"6\">57.10 63.90 52.80 44.00 46.00 41.00</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Radev et al. (2020)</td><td colspan=\"6\">45.89 52.86 37.85 40.00 42.00 37.00</td><td>-</td><td>-</td><td>-</td></tr><tr><td>BARTbase</td><td>53.11</td><td/><td/><td/><td/><td/><td/><td/><td/></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "CastroFerreira et al. (2019) 51.68 56.35 38.92 32.00 41.00 21.00 ---Moryossef et al. (2019) 47.24 53.30 34.41 39.00 44.00 37.00 62.74 41.53 40.18 44.45 35.36 70.02 76.68 62.76 BARTlarge 54.72 63.45 43.97 42.23 45.49 38.61 72.29 77.57 66.53 T5small 56.34 65.05 45.37 42.78 45.94 39.29 73.31 78.46 67.69 T5base 59.17 64.64 52.55 43.19 46.02 41.49 74.82 78.40 70.92 T5large 59.70 64.71 53.67 44.18 45.85 42.26 75.40 78.29 72.25" |
|
}, |
|
"TABREF3": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Results on WebNLG. A, S and U stand for all, seen, and unseen partitions of the test set, respectively." |
|
}, |
|
"TABREF4": { |
|
"content": "<table><tr><td>Model</td><td>BLEU</td><td>M</td><td>BT</td></tr><tr><td>Koncel et al. 2019</td><td colspan=\"2\">14.30 18.80</td><td>-</td></tr><tr><td>An (2019)</td><td colspan=\"2\">15.10 19.50</td><td>-</td></tr><tr><td colspan=\"3\">Schmitt et al. (2020) 17.33 21.43</td><td>-</td></tr><tr><td colspan=\"3\">Ribeiro et al. (2020) 18.01 22.23</td><td>-</td></tr><tr><td>BART base</td><td colspan=\"3\">22.01 23.54 -13.02</td></tr><tr><td>BART large</td><td colspan=\"3\">23.65 25.19 -10.93</td></tr><tr><td>T5 small</td><td colspan=\"3\">20.22 21.62 -24.10</td></tr><tr><td>T5 base</td><td colspan=\"3\">20.73 21.88 -21.03</td></tr><tr><td>T5 large</td><td colspan=\"3\">22.15 23.73 -13.96</td></tr><tr><td colspan=\"2\">with task-adaptive pretraining</td><td/><td/></tr><tr><td>BART large + LMA</td><td colspan=\"3\">25.30 25.54 -08.79</td></tr><tr><td>T5 large + LMA</td><td colspan=\"3\">22.92 24.40 -10.39</td></tr><tr><td>BART large + STA</td><td colspan=\"3\">25.66 25.74 -08.97</td></tr><tr><td>T5 large + STA</td><td colspan=\"3\">23.69 24.92 -08.94</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "shows the results for the WebNLG test set. Neural pipeline models(Moryossef et al., 2019;Castro Ferreira et al., 2019) achieve strong performance in the unseen dataset. On the other" |
|
}, |
|
"TABREF5": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "" |
|
}, |
|
"TABREF6": { |
|
"content": "<table><tr><td>Model</td><td colspan=\"2\">AMR</td></tr><tr><td/><td>F</td><td>MS</td></tr><tr><td>Mager et al. (2020)</td><td>5.69 A</td><td>5.08 A</td></tr><tr><td>Harkous et al. (2020)</td><td>5.78 A</td><td>5.47 AB</td></tr><tr><td>T5 large</td><td>6.55 B</td><td>6.44 C</td></tr><tr><td>BART large</td><td>6.70 B</td><td>5.72 BC</td></tr><tr><td>Reference</td><td>5.91 A</td><td>-</td></tr><tr><td>Model</td><td colspan=\"2\">WebNLG</td></tr><tr><td/><td>F</td><td>SA</td></tr><tr><td colspan=\"2\">Castro Ferreira et al. (2019) 5.52 A</td><td>4.77 A</td></tr><tr><td>Harkous et al. (2020)</td><td colspan=\"2\">5.74 AB 6.21 B</td></tr><tr><td>T5 large</td><td>6.71 C</td><td>6.63 B</td></tr><tr><td>BART large</td><td>6.53 C</td><td>6.50 B</td></tr><tr><td>Reference</td><td>5.89 B</td><td>6.47 B</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "lists the results for the AGENDA test set. The models also show strong performance on this" |
|
}, |
|
"TABREF7": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "" |
|
}, |
|
"TABREF10": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Impact (measured with BLEU) of using a bag of entities and relations (shuf ) as input for T5 small ." |
|
}, |
|
"TABREF12": { |
|
"content": "<table><tr><td>: Example generations from shuffled (S), true (T), and corrupted (F) triple facts by T5 small , fine-tuned on</td></tr><tr><td>correctly ordered triples (order) and randomly shuffled input (shuf ).</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "" |
|
}, |
|
"TABREF13": { |
|
"content": "<table><tr><td>Input</td><td>BLEU</td></tr><tr><td colspan=\"2\">only nodes 28.22</td></tr><tr><td>DFS</td><td>34.94</td></tr><tr><td>PENMAN</td><td>38.27</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "shows the results on the AMR development set. The PENMAN representation leads to best results. Therefore, this representation is used in the rest of the experiments." |
|
}, |
|
"TABREF14": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Results on the AMR dev set using T5 small for different AMR linearizations." |
|
}, |
|
"TABREF16": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Effect (measured with BLEU score) of crossdomain STA." |
|
}, |
|
"TABREF17": { |
|
"content": "<table><tr><td>Model</td><td colspan=\"3\">chrF++ BS (F1) MS</td></tr><tr><td colspan=\"2\">Schmitt et al. (2020) 44.53</td><td>-</td><td>-</td></tr><tr><td colspan=\"2\">Ribeiro et al. (2020) 46.37</td><td>-</td><td>-</td></tr><tr><td>BART base BART large</td><td>48.02 50.44</td><td>89.36 88.74</td><td>34.33 32.24</td></tr><tr><td>T5 small T5 base T5 large</td><td>44.91 48.14 48.14</td><td>88.56 88.81 89.60</td><td>30.25 31.33 35.23</td></tr><tr><td colspan=\"2\">with task-adaptive pretraining</td><td/><td/></tr><tr><td>BART large + LMA</td><td>51.33</td><td>89.12</td><td>33.42</td></tr><tr><td>T5 large + LMA</td><td>49.37</td><td>89.75</td><td>36.13</td></tr><tr><td>BART large + STA T5 large + STA</td><td>51.63 50.27</td><td>89.27 89.93</td><td>34.28 36.86</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Statistics for the KGAIA dataset." |
|
}, |
|
"TABREF18": { |
|
"content": "<table><tr><td colspan=\"4\">Bold (Italic) indicates best scores without (with) task-</td></tr><tr><td>adaptive pretraining.</td><td/><td/><td/></tr><tr><td>Model</td><td colspan=\"3\">chrF++ BS (F1) MS</td></tr><tr><td>Guo et al. (2019)</td><td>57.30</td><td>-</td><td>-</td></tr><tr><td>Zhu et al. (2019)</td><td>64.05</td><td>-</td><td>-</td></tr><tr><td>Cai and Lam (2020b)</td><td>59.40</td><td>-</td><td>-</td></tr><tr><td>Wang et al. (2020)</td><td>65.80</td><td>-</td><td>-</td></tr><tr><td>Yao et al. (2020)</td><td>65.60</td><td>-</td><td>-</td></tr><tr><td>based on PLMs</td><td/><td/><td/></tr><tr><td>Mager et al. (2020)</td><td>63.89</td><td>-</td><td>-</td></tr><tr><td>BART base BART large</td><td colspan=\"3\">66.65 95.22 60.78 71.06 96.08 65.74</td></tr><tr><td>T5 small T5 base T5 large</td><td colspan=\"3\">68.78 95.62 63.70 70.81 95.99 65.63 72.57 96.27 67.37</td></tr><tr><td colspan=\"2\">with task-adaptive pretraining</td><td/><td/></tr><tr><td>BART large + LMA</td><td colspan=\"3\">71.14 95.94 64.75</td></tr><tr><td>T5 large + LMA</td><td colspan=\"3\">72.83 96.32 67.44</td></tr><tr><td colspan=\"4\">BART large + STA (200K) 72.26 96.21 66.75</td></tr><tr><td>BART large + STA (2M)</td><td colspan=\"3\">73.58 96.43 68.14</td></tr><tr><td>T5 large + STA (200K) T5 large + STA (2M)</td><td colspan=\"3\">74.09 96.51 68.86 74.79 96.59 69.53</td></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Results of the chrF++, BertScore (BS) and MoverScore (MS) scores for AGENDA test set." |
|
}, |
|
"TABREF19": { |
|
"content": "<table><tr><td>T/F</td><td>Input Facts</td><td>T5 order</td><td>T5 shuf</td></tr><tr><td>(1) S</td><td/><td/><td/></tr></table>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Results of the chrF++, BertScore (BS) and MoverScore (MS) scores for the LDC2017T10 test set. Bold (Italic) indicates the best score without (with) task-adaptive pretraining.\u2022 capital \u2022 leader Name \u2022 London \u2022 Pound sterling \u2022 United Kingdom \u2022 leader Name \u2022 United Kingdom \u2022 Elizabeth II \u2022 United Kingdom \u2022 Boris Johnson \u2022 London \u2022 currencyThe capital city is London, the currency is the Pound sterling and the leader is Elizabeth II. Boris Johnson is also a leader in the UK.The capital of the United Kingdom is London, the currency is the Pound sterling and the country is lead by Elizabeth II and Boris Johnson.(2) T \u2022 Germany \u2022 capital \u2022 BerlinBerlin is the capital of Germany.Berlin is the capital of Germany.(3)F \u2022 Berlin \u2022 capital \u2022 Germany Berlin's capital is Germany.Berlin is the capital of Germany.(4)F \u2022 Leinster \u2022 is Part Of \u2022 Dublin Leinster is part of Dublin. Leinster is part of Dublin. (5) F \u2022 Rome \u2022 capital \u2022 Italy Rome's capital is Italy. Rome is the capital of Italy. (6) T \u2022 Italy \u2022 capital \u2022 Rome Italy's capital is Rome. Rome is the capital of Italy. (7) T \u2022 Texas \u2022 capital \u2022 Austin \u2022 Andrews County Airport \u2022 location \u2022 Texas" |
|
}, |
|
"TABREF20": { |
|
"content": "<table/>", |
|
"type_str": "table", |
|
"html": null, |
|
"num": null, |
|
"text": "Examples of text generated by the different models. D refers to the dataset." |
|
} |
|
} |
|
} |
|
} |