|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T14:08:59.247613Z" |
|
}, |
|
"title": "Noise Robust Named Entity Understanding for Voice Assistants", |
|
"authors": [ |
|
{ |
|
"first": "Deepak", |
|
"middle": [], |
|
"last": "Muralidharan", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Joel", |
|
"middle": [], |
|
"last": "Ruben", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Antony", |
|
"middle": [], |
|
"last": "Moniz", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sida", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Xiao", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Justine", |
|
"middle": [], |
|
"last": "Kao", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Pulman", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Atish", |
|
"middle": [], |
|
"last": "Kothari", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Ray", |
|
"middle": [], |
|
"last": "Shen", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yinying", |
|
"middle": [], |
|
"last": "Pan", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Kaul", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Seyed", |
|
"middle": [], |
|
"last": "Ibrahim", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Gang", |
|
"middle": [], |
|
"last": "Xiang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Dun", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yidan", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Pooja", |
|
"middle": [], |
|
"last": "Chitkara", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Xuan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Alkesh", |
|
"middle": [], |
|
"last": "Patel", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kushal", |
|
"middle": [], |
|
"last": "Tayal", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Roger", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Grasch", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Williams", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Lin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "\u2021", |
|
"middle": [], |
|
"last": "Apple", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Named Entity Recognition (NER) and Entity Linking (EL) play an essential role in voice assistant interaction, but are challenging due to the special difficulties associated with spoken user queries. In this paper, we propose a novel architecture that jointly solves the NER and EL tasks by combining them in a joint reranking module. We show that our proposed framework improves NER accuracy by up to 3.13% and EL accuracy by up to 3.6% in F1 score. The features used also lead to better accuracies in other natural language understanding tasks, such as domain classification and semantic parsing.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Named Entity Recognition (NER) and Entity Linking (EL) play an essential role in voice assistant interaction, but are challenging due to the special difficulties associated with spoken user queries. In this paper, we propose a novel architecture that jointly solves the NER and EL tasks by combining them in a joint reranking module. We show that our proposed framework improves NER accuracy by up to 3.13% and EL accuracy by up to 3.6% in F1 score. The features used also lead to better accuracies in other natural language understanding tasks, such as domain classification and semantic parsing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Understanding named entities correctly when interacting with virtual assistants (e.g. \"Call Jon\", \"Play Adele hello\", \"Score for Warrior Kings game\") is crucial for a satisfying user experience. However, NER and EL methods that work well on written text often perform poorly in such applications: utterances are relatively short (with just 5 tokens, on average), so there is not much context to help disambiguate; speech recognizers make errors (\"Play Bohemian raspberry\" for \"Play Bohemian Rhapsody\"); users also make mistakes (\"Cristiano Nando\" for \"Cristiano Ronaldo\"); non-canonical forms of names are frequent (\"Shaq\" for \"Shaquille O'Neal\"); and users often mention new entities unknown to the system.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In order to address these issues we propose a novel Named Entity Understanding (NEU) system that combines and optimizes NER and EL for noisy spoken natural language utterances. We pass multiple NER hypotheses to EL for reranking, enabling NER to benefit from EL by including information from the knowledge base (KB).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We also design a retrieval engine tuned for spoken utterances for retrieving candidates from the KB. The retrieval engine, along with other techniques devised to address fuzzy entity mentions, lets the EL model be more robust to partial mentions, variation in named entities, use of aliases, as well as human and speech transcription errors.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Finally, we demonstrate that our framework can also empower other natural language understanding tasks, such as domain classification (a sentence classification task) and semantic parsing.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "There have been a few attempts to explore NER on the output of a speech pipeline (Ghannay et al., 2018; Abujabal and Gaspers, 2018; Coucke et al., 2018) . Among these, our NER model is closest to Abujabal and Gaspers (2018) and Coucke et al. (2018) ; however, unlike the former, we use a richer set of features rather than phonemes as input, and unlike the latter, we are able to use a deep model because of the large volume of data available.", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 103, |
|
"text": "(Ghannay et al., 2018;", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 104, |
|
"end": 131, |
|
"text": "Abujabal and Gaspers, 2018;", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 132, |
|
"end": 152, |
|
"text": "Coucke et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 196, |
|
"end": 223, |
|
"text": "Abujabal and Gaspers (2018)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 228, |
|
"end": 248, |
|
"text": "Coucke et al. (2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "EL has been well explored in the context of clean (Martins et al., 2019; Kolitsas et al., 2018; Luo et al., 2015) and noisy text inputs (Eshel et al., 2017; Guo et al., 2013; Liu et al., 2013) , but as with NER, there have been only a few efforts to explore EL in the context of transcribed speech (Benton and Dredze, 2015; Gao et al., 2017) , although crucially, both these works assume gold standard NER and focus purely on the EL component.", |
|
"cite_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 72, |
|
"text": "(Martins et al., 2019;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 73, |
|
"end": 95, |
|
"text": "Kolitsas et al., 2018;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 96, |
|
"end": 113, |
|
"text": "Luo et al., 2015)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 136, |
|
"end": 156, |
|
"text": "(Eshel et al., 2017;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 157, |
|
"end": 174, |
|
"text": "Guo et al., 2013;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 175, |
|
"end": 192, |
|
"text": "Liu et al., 2013)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 298, |
|
"end": 323, |
|
"text": "(Benton and Dredze, 2015;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 324, |
|
"end": 341, |
|
"text": "Gao et al., 2017)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Traditionally, a pipelined architecture of NER followed by EL has been used to address the entity linking task (Lin et al., 2012; Derczynski et al., 2015; Bontcheva et al., 2017; Bowden et al., 2018) . Since these approaches rely only on the best NER hypothesis, errors from NER propagate to the EL step. To alleviate this, joint models have been proposed: Sil and Yates (2013) proposed an NER+EL model which re-ranks candidate mentions and entity links produced by their base model. Our work differs in that we use a high precision NER system, while they use a large number of heuristically obtained Noun Phrase (NP) chunks and word n-grams as input to the EL stage. Luo et al. (2015) jointly train an NER and EL system using a probabilistic graphical model. However, these systems are trained and tested on clean text and do not address the noise problems we are concerned with.", |
|
"cite_spans": [ |
|
{ |
|
"start": 111, |
|
"end": 129, |
|
"text": "(Lin et al., 2012;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 130, |
|
"end": 154, |
|
"text": "Derczynski et al., 2015;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 155, |
|
"end": 178, |
|
"text": "Bontcheva et al., 2017;", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 179, |
|
"end": 199, |
|
"text": "Bowden et al., 2018)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 357, |
|
"end": 377, |
|
"text": "Sil and Yates (2013)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 668, |
|
"end": 685, |
|
"text": "Luo et al. (2015)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "For a given utterance, we first detect and label entities using the NER model and generate the top-l candidate hypotheses using beam search. The EL model consists of two stages: (i) candidate retrieval and (ii) joint linking and re-ranking. In the retrieval stage, for each NER hypothesis, we construct a structured search query and retrieve the top-c candidates from the retrieval engine. In the ranking stage, we use a neural network to rank these candidate entity links within each NER hypothesis while simultaneously using rich signals (entity popularity, similarity between entity embeddings, the relation across multiple entities in one utterance, etc.) from these entity links as additional features to re-rank the NER hypotheses from the previous step, thus jointly addressing both the NER and EL tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Architecture Design", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "For the NER task, following Lample et al. (2016) we use a combination of character and word level features. They are extracted by a bi-directional LSTM (biLSTM) (Hochreiter and Schmidhuber, 1997) , and then concatenated with pre-trained GloVe word embeddings 1 (Pennington et al., 2014) to pass through another biLSTM and fed into a CRF model to produce the final label prediction based on a score s(\u1ef9 i , x; \u03b8) that jointly optimizes the probability of labels for the tokens and the transition score for the entire sequence\u1ef9 i = (y 1 , . . . , y T ) given the input x:", |
|
"cite_spans": [ |
|
{ |
|
"start": 28, |
|
"end": 48, |
|
"text": "Lample et al. (2016)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 161, |
|
"end": 195, |
|
"text": "(Hochreiter and Schmidhuber, 1997)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 261, |
|
"end": 286, |
|
"text": "(Pennington et al., 2014)", |
|
"ref_id": "BIBREF21" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "s(\u1ef9 i , x; \u03b8) = T t=0 (\u03c8 t,\u03b8 (y t ) + \u03c6 t,t+1 (y t , y t+1 )) ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where \u03c8 t,\u03b8 is the biLSTM prediction score from the label y t of the t th token, and \u03c6(j, k) is the transition score from label j to label k.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "During training, we maximize the probability of the correct label sequence p seq , which is defined as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "p seq (\u1ef9 i , x; \u03b8) = exp(s(\u1ef9 i , x; \u03b8)) \u1ef9 j \u2208S exp (s(\u1ef9 j , x; \u03b8)) ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where\u1ef9 i is the label sequence for hypothesis i, and S is the set of all possible label sequences. During inference, we generate up to 5 NER alternatives for each utterance using beam search. We also calculate a mention level confidence p men for each entity mention m k . p men is computed by aggregating the sequence level confidence for all the prediction sequences that share the same mention sub-path m k :", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "p men (m k , x; \u03b8) = \u1ef9 i \u2208Sm i exp(s(\u1ef9 i , x; \u03b8)) \u1ef9 j \u2208S exp(s(\u1ef9 j , x; \u03b8)) ,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "where S m i is the set of prediction sequences that all have m k as the prediction for the corresponding tokens. Both p seq and p men are computed by dynamic programming, and serve as informative features in the EL model.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "NER", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The entity linking system follows the NER model and consists of two steps: candidate retrieval, and joint linking and re-ranking.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Linking and Re-ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To build the candidate retrieval engine, we first index the list of entities in our knowledge base, which can be updated daily to capture new entities and change of their popularity. To construct the index, we iterate through the flattened list of entities and construct token-level unigram, bigram and trigram terms from the surface form of each entity. Apart from using the original entity names, we also use common aliases, harvested from usage logs, for popular entities (e.g. LOTR as an alias for \"Lord of the Rings\") to make the retrieval engine more robust to commonly occurring variations. Next, we create an inverted index which maps the unique list of n-gram terms to the list of entities that these n-grams are part of, also known as posting lists. Further, to capture cross-entity relationships in the knowledge base (such as relationships between an artist and a song or two sports teams belonging to the same league), we assign a pointer 2 for each entity in the knowledge base to its related entities and this relational information is leveraged by the EL model for entity disambiguation (described in 5.2). We then compute the tf-idf score for all the n-gram terms present in the entities and store them in the inverted index.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Linking and Re-ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "For each hypothesis predicted by the NER model we query the retrieval engine with the corresponding text. We first send the query through a highprecision seq-to-seq correction model (Schmaltz et al., 2017; Ge et al., 2019) trained using common errors observed in usage. Next, we construct ngram features from the corrected query in a similar way to the indexing phase and retrieve all entities matching these n-gram features in our inverted index. Additionally, we use synonyms derived from usage for each term in the query to expand our search criteria: for example, our synonym list for \"Friend\" contains \"Friends\", which matches the TV show name which would have been missed if only the original term was used.", |
|
"cite_spans": [ |
|
{ |
|
"start": 182, |
|
"end": 205, |
|
"text": "(Schmaltz et al., 2017;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 206, |
|
"end": 222, |
|
"text": "Ge et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Linking and Re-ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "For each entity retrieved, we get the tf-idf score for the terms present in the query chunk from the inverted index. We then aggregate the tf-idf scores of all the terms present in the query for this entity and linearly combine this aggregate score with other attributes such as popularity (i.e. prior usage probability) of the entity to generate a final score for all retrieved entity candidates for this query. Finally, we perform an efficient sort across all the entity candidates based on this score and return a top-c (in our case c = 25) list filtered by the entity type detected by the NER model for that hypothesis. These entity candidates coupled with the original NER hypothesis are sent to the ranker model described below for joint linking and re-ranking.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Linking and Re-ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Following the candidate retrieval step, we introduce a neural model to rerank the candidate entities, aggregating features from both the NER model and the candidate retrieval engine.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Linking and Re-ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The EL model scores each entity linking hypothesis separately. An entity linking hypothesis consists of a prediction from the NER model (which consists of named entity chunks in the input utterance and their types), and the candidate retrieval results for each chunk. Formally, we define an en-tity linking hypothesis y with k entity predictions as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Linking and Re-ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "y = {f utter , f NER , f CR , {j \u2208 {1 . . . k} : (m j , e j )}}", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Linking and Re-ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where m j is the j-th mention in the utterance, and e j is the entity name associated with this mention from the knowledge base. f utter , f NER , f CR are features derived from the original utterance text, the NER model and the candidate retrieval system respectively. In our system, f utter is a representation of the utterance from averaging the pre-trained word embeddings for the tokens in the utterance. Intuitively, having a dense representation of the full utterance can help the EL model better leverage signals from the utterance context. f NER includes the type of each mention, as well as the sequence and mention confidence computed by the NER model. f CR includes popularity, and whether a relation exists between the retrieved entities in y.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Linking and Re-ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To be robust to noise, the EL model adopts a pair of CNNs to compare each entity mention m j and its corresponding knowledge base entity name e j . The CNN learns a name embedding with onedimensional convolution on the character sequence, and the kernel parameters are shared between the CNN used for user mention and the one used for the canonical name. A character-based text representation model is better at handling mis-transcriptions or mis-pronounced entity names. While a noisy entity name may be far from the canonical name in the word embedding space when they are semantically different, they are usually close to each other in the character embedding space due to similar spellings. To model the similarity between CNN name embeddings of m j and e j , we use the standard cosine similarity as a baseline, we experiment with an MLP that takes the concatenated name embeddings as input. We are able to model more expressive interactions between the two name embeddings with the MLP, and in turn better handle errors. Finally, we concatenate the similarity features with other features as input to another MLP that computes the final score for y. Formally, the scoring function is defined in Equation 1, where \u2295 means concatenation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Linking and Re-ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "s(y) = MLP(f utter \u2295 f NER \u2295 f CR k j=1 [MLP(CNN(m j ), CNN(e j )) \u2295 CNN(m j ) \u2295 CNN(e j )]) (1)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Linking and Re-ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In our data, the number of entity mentions in an utterance averages less than 3. We pad the entity feature sequence to length 5, which provides a good coverage. In the scoring model above, we use a simple concatenation to aggregate the embedding similarities of multiple entity mentions which empirically performs as well as sequence models like LSTM, while being much cheaper in computation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Linking and Re-ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To train the EL model, we use the standard maxmargin loss for ranking tasks. If for the i-th example, we denote the ground truth as y * i and an incorrect prediction as\u0177 i , and the scoring function s(\u2022) is as defined in Equation 1, the loss function is", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Linking and Re-ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "L = 1 N N i=1 [\u03b3(\u0177 i , y * i ) + s(\u0177 i ) \u2212 s(y * i )] + . (2)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Linking and Re-ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The max-margin loss encourages the ground truth score to be at least a margin \u03b3 higher than the score of an incorrect prediction. The margin is defined as a function of the ground truth and the incorrect prediction, thus adaptive to the quality of prediction. A larger margin is needed when the incorrect prediction is further away from the ground truth. For our reranking task, we set a smaller margin when only the resolved entities are incorrect but the NER result is correct, and a larger margin when the NER result is wrong. This adaptive margin helps rerank NER hypotheses even when the model cannot rank the linking results correctly. During training, we uniformly sample the negative predictions from the candidates retrieved by the retrieval engine.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Joint Linking and Re-ranking", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "We also explore the impact of our NEU feature encoding on two tasks: a domain classifier and a domain-specific shallow semantic parser.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Improvement on Other Language Understanding Tasks", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Domain classification identifies which domain a user's request falls into: sports, weather, music, etc., and is usually done by posing the task as sequence classification: our baseline uses word embeddings and gazetteer features as inputs to an RNN, in a manner similar to Chen et al. (2019). Consider a specific token t. Let a be the number of alternatives used from the NER model in the domain classifier (which we treat as a hyperparameter), p i represent the (scalar) sequence level confidence score p seq (\u1ef9 i , x; \u03b8) of the i th NER alternative defined in Section 3.1, c i represent an integer for the entity type that NER hypothesis i assigns to the token t, and o(.) represent a function converting an integer into its corresponding one-hot vector. Then the additional NER feature vector f r concatenated to the input vector fed into token t as part of the domain classifier can be written as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Domain Classification", |
|
"sec_num": "3.3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "f r = i=a i=1 p i o(c i ).", |
|
"eq_num": "(3)" |
|
} |
|
], |
|
"section": "Domain Classification", |
|
"sec_num": "3.3.1" |
|
}, |
|
{ |
|
"text": "Likewise, for the featurization that uses both NER and EL, let a be the number of alternatives used from the NER+EL system in the domain classifier (also a hyperparameter); these a alternatives are now sorted by the scores from the EL hypotheses, as opposed to the sequence level confidence scores from NER. Let s i be the i th re-ranked alternative's cosine similarity score between the mention and knowledge base entity name as output by the EL model. p i and c i are consistent with our earlier notation, except that they now correspond to the i th NER alternative after re-ranking. Then the additional NER+EL feature vector f u concatenated to the input fed into token t as part of the domain classifier can be written as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Domain Classification", |
|
"sec_num": "3.3.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "f u = i=a i=1 p i o(c i ) \u2295 s i o(c i ).", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Domain Classification", |
|
"sec_num": "3.3.1" |
|
}, |
|
{ |
|
"text": "Our virtual assistant also uses domain-specific shallow semantic parsers, running after domain classification, responsible both for identifying the correct intent that the user expects (such as the \"play\" intent associated with a song) and for assigning semantic labels to each of the tokens in a user's utterance (such as the word \"score\" and \"game\" respectively being tagged as tokens related to a sports statistic and sports event respectively in the utterance \"What's the score of yesterday's Warriors game?\"). Each semantic parser is structured as a multi-task sequence classification (for the intent) and sequence tagging (for the token-level semantic labelling) task, with our production baseline using word embeddings and gazetteer features as inputs into an RNN similar to our domain classifier. Here, f r and f u are featurized as described above. Note that in contrast to the NEU system, the semantic parser uses a domain-specific ontology, to enable each domain to work independently and to not be encumbered by the need to align ontologies.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Parsing", |
|
"sec_num": "3.3.2" |
|
}, |
|
{ |
|
"text": "To create our datasets, we randomly sampled around 600k unique anonymous English transcripts (machine transcribed utterances), and annotated them with NER and EL labels. Utterances are subject to Apple's baseline privacy practices with respect to Siri requests, including that such requests are not associated with a user's Apple ID, email address, or other data Apple may have from a user's use of other Apple services, and have been filtered as described in Section 7. We then split the annotated data into 80/10/10 for train, development and test sets. For both the NER and EL tasks, we report our results on test sets sampled from the \"music\", \"sports\" and \"movie & TV\" domains. These are popular domains in the usage and have a high percentage of named entities: with an average of 0.6, 1.1 and 0.7 entities for each utterance in the 3 domains respectively. To evaluate model performance specifically on noisy user inputs, we select queries from the test sets that are marked as containing speech transcription or user errors by the annotators and report results on this \"noisy\" subset, which constitutes 13.5%, 12.7% data for movie&TV and music domain respectively when an entity exists. 3 To evaluate the relation feature, we also look at the \"related\" subset where a valid relation exists in the utterance. This subset consists 13.4% and 5.3% of data for the music and sports domain with at least one entity. 4 We first train the NER model described in Section 3.1. Next, for every example in our training dataset, we run inference on the trained NER model and generate the top-5 NER hypotheses using beam search. Following this, we retrieve the top 25 candidates for each of these hypotheses using our search engine combined with the ground truth NER and EL labels and fed to the EL model for training.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1194, |
|
"end": 1195, |
|
"text": "3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1417, |
|
"end": 1418, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets and Training Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "To measure the NER model performance, we use the standard NER F1 metric used for the CoNLL-2003 shared task (Tjong Kim Sang and De Meulder, 2003) . To measure the quality of the top-5 NER hypotheses, we compute the oracle top-5 F1 score by comparing and choosing the best alternative hypothesis among the 5 and calculate its F1 score, for each test utterance. In this manner, we also know the upper bound that EL can reach from reranking NER hypotheses. As described in section 3.2, the EL model is optimized to perform two tasks simultaneously: entity linking and reranking of NER hypotheses. Hence to evaluate the performance of the EL model, we use two metrics: reranked NER-F1 score and the EL-F1 score. The reranked NER F1 score is measured on the NER predictions according to the top EL hypothesis, and is defined in the same way as the previous NER task. To evaluate entity linking quality, we adopt a strict F1 metric similar to the one used for NER. Besides entity boundary and entity type, the resolved entity also needs to be correct for the entity prediction to be counted as a true positive.", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 145, |
|
"text": "(Tjong Kim Sang and De Meulder, 2003)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets and Training Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For NER model training, we use standard minibatch gradient descent using the Adam optimizer with an initial learning rate of 0.001, a scheduled learning rate decay of 0.99, LSTM with a hidden layer of size 350 and a batch size of 256. We apply a dropout of 0.5 to the embedding and biLSTM layers, and include token level gazetteer features (Ratinov and Roth, 2009) to boost performance in recognizing common entities. We linearly project these gazetteer features and concatenate the projection with the 200 dimensional word embeddings and 100 dimensional character embeddings which are then fed into the biLSTM followed by the CRF.", |
|
"cite_spans": [ |
|
{ |
|
"start": 340, |
|
"end": 364, |
|
"text": "(Ratinov and Roth, 2009)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets and Training Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For EL, the character CNN model we use has two layers, each with 100 convolution kernels of size 3, 4, and 5. Character embedding are 25 dimensional and trained end to end with the entity linking task. The MLP for embedding similarity takes the concatenation of two name embeddings, as well as their element-wise sum, difference, minimum, maximum, and multiplication. It has two hidden layers of size 1024 and 256, with output dimension 64. Similarity features of mentions in the prediction are averaged, while the other features like NER confidence and entity popularity are concatenated to the representation. The final MLP for scoring has two hidden layers, with size 256 and 64. We train the model on 4 GPUs with synchronous SGD, and for each gradient step we send a batch of 100 examples to each GPU.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Datasets and Training Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We present F1 scores in different domains of the NER and EL model in Table 1 . Since the EL model takes 5 NER hypotheses as input, it also acts as a reranker of the NER model, and we show substantial improvements on top-1 NER F1 score consistently over all test sets. In Table 2 , we show improvements achieved by several specific model design choices and features on entity linking performance. Table 2 (a) shows the MLP similarity substantially improves entity linking accuracy with its capacity to model text variations, especially on utterances with noisy entity mentions. The relation feature is powerful for disambiguating entities with similar names, and we show a considerable improvement in EL F1 on the subset of utterances that have related entities in Table 2(b). Table 2 (c) shows utterance embeddings brought improvements in the music, and media & TV domains. The improvement brought by log-scale popularity feature is the largest for the movie & TV domain as shown in Table 2(d) , where the popularity distribution has extremely long tails compared to other domains.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 69, |
|
"end": 76, |
|
"text": "Table 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 271, |
|
"end": 278, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 396, |
|
"end": 403, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 776, |
|
"end": 783, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
}, |
|
{ |
|
"start": 983, |
|
"end": 993, |
|
"text": "Table 2(d)", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We provide a few examples to showcase the effectiveness of our NEU system. Firstly, the EL model is able to link noisy entity mentions to the corresponding entity canonical name in the knowledge base. For instance, when the transcribed utterance is \"play Carla Cabello\", the EL model is able to resolve the mention \"Carla Carbello\" to the correct artist name \"Camila Cabello\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative Analysis", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Secondly, the EL model is able to recover from errors made by the NER system by leveraging the knowledge base to disambiguate entity mentions. The reranking is especially powerful when the utterance contains little context of the entity for the NER model to leverage. For example, for \"Doctor Strange\", the top NER hypothesis labels the full utterance as a generic \"Person\" type, and after reranking, EL model is able to leverage the popularity information (\"Doctor Strange\" is a movie that was recently released and has a high popularity in our knowledge base) and correctly label the utterance as \"movieTitle\". Reranking is also effective when the entity mentions are noisy, which will cause mismatches for the gazetteer features that NER uses. For \"play Avengers Age of Ultra\", the top NER hypothesis only predicts \"Avengers\" as \"movieTitle\", while after reranking, the EL model is able to recover the full span \"Avengers Age of Ultra\" as a \"movieTitle\", and resolve it to \"Avengers: Age of Ultron\", the correct canonical title.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative Analysis", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The entity relations from the knowledge base are helpful for entity disambiguation. When the user refers to a sports team with the name \"Giants\", they could be asking for either \"New York Giants\", a National Football League (NFL) team, or \"San Francisco Giants\", a Major League Baseball team. When there are multiple sports team mentions in an utterance, the EL model leverages a relation feature from the knowledge base indicating whether the teams are from the same sports league (as the user is more likely to mention two teams from the same league and the same sport). Knowing entity relations, the EL model is able to link the mention \"Giants\" in \"Cowboys versus Giants\" to the NFL team, knowing that \"Cowboys\" is referring to \"Dallas Cowboys\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Qualitative Analysis", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "To validate the utility of our proposed NEU framework, we illustrate performance improvements in the Domain Classifier and the Semantic Parsers corresponding to the three domains (music, movies & TV and sports) as described in Section 3.3. Table 3 reports the classification accuracy for the Domain Classifier and the parse accuracies for the Semantic Parsers (the model is said to have predicted the parse correctly if all the tokens are tagged with their correct semantic parse labels). We observe substantial improvements in all 4 cases when NER features are used as additional input, given all the other components of the system being the same. In turn, we observe further improvements when our NER+EL featurization is used.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 240, |
|
"end": 247, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Qualitative Analysis", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "In this work, we have proposed a Named Entity Understanding framework that jointly identifies and resolves entities present in an utterance when a user interacts with a voice assistant. Our proposed architecture consists of two modules: NER and EL, with the EL serving the additional task of possibly correcting the recognized entities from NER by leveraging rich signals from entity links in the knowledge base while simultaneously linking these entities to the knowledge base. With several design strategies in our system targeted towards noisy natural language utterances, we have shown that our framework is robust to speech transcription and user errors that occur frequently in spoken dialog systems. We have also shown that featurizing the output of NEU and feeding these features into other language understanding tasks substantially improves the accuracy of these models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "We randomly sampled transcripts from Siri production datasets over a period of months, and we believe it to be a representative sample of usage in the domains described. In accordance with Apple's privacy practices with respect to Siri requests, Siri utterances are not associated with a user's Apple ID, email address, or other data Apple may have from a user's use of other Apple services. In addition to Siri's baseline privacy guarantees, we filtered the sampled utterances to remove transcripts that were too long, contained rare words, or contained references to contacts before providing the dataset to our annotators.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Ethical Considerations", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We also tried more recent contextual embeddings such as BERT(Devlin et al., 2019), and empirically observed very little difference in performance when compared to GloVe. So we adopt GloVE, which is substantially more efficient in terms of inference time required.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Each entity in our knowledge base consists of metadata (for example, a song entry in our knowledge base would contain metadata such as the music artist, album, year the song was released in etc.) that we leverage to automatically construct these relationship pointers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Sports domain does not have the annotation for noisy data available when this experiment was conducted.4 Our KB does not have relation information for movie&TV domain.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank Alex Acero, Anmol Walia, Arjun Rangarajan, Barry-John Theobald, Bhagyashree Shekawat, Christopher Klein, Dhivya Piraviperumal, Hong Yu, Jianpeng Cheng, Jiarui Lu, John Giannandrea, John Keesling, Katy Linsky, Lanqing Wang, Ryan Templin, Robert Daland, Shruti Bhargava, Xi Chen and Yvonne Xiao for discussions and for their help and feedback. We also want to thank all the anonymous reviewers for the helpful feedback and suggestions on this work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Neural named entity recognition from subword units", |
|
"authors": [ |
|
{ |
|
"first": "Abdalghani", |
|
"middle": [], |
|
"last": "Abujabal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Judith", |
|
"middle": [], |
|
"last": "Gaspers", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1808.07364" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abdalghani Abujabal and Judith Gaspers. 2018. Neu- ral named entity recognition from subword units. arXiv preprint arXiv:1808.07364.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Entity linking for spoken language", |
|
"authors": [ |
|
{ |
|
"first": "Adrian", |
|
"middle": [], |
|
"last": "Benton", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dredze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "225--230", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/N15-1024" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adrian Benton and Mark Dredze. 2015. Entity link- ing for spoken language. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies, pages 225-230, Den- ver, Colorado. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Crowdsourcing named entity recognition and entity linking corpora", |
|
"authors": [ |
|
{ |
|
"first": "Kalina", |
|
"middle": [], |
|
"last": "Bontcheva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Leon", |
|
"middle": [], |
|
"last": "Derczynski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Handbook of Linguistic Annotation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "875--892", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kalina Bontcheva, Leon Derczynski, and Ian Roberts. 2017. Crowdsourcing named entity recognition and entity linking corpora. In Handbook of Linguistic Annotation, pages 875-892. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "SlugNERDS: A named entity recognition tool for open domain dialogue systems", |
|
"authors": [ |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Bowden", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiaqi", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shereen", |
|
"middle": [], |
|
"last": "Oraby", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amita", |
|
"middle": [], |
|
"last": "Misra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marilyn", |
|
"middle": [], |
|
"last": "Walker", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kevin Bowden, Jiaqi Wu, Shereen Oraby, Amita Misra, and Marilyn Walker. 2018. SlugNERDS: A named entity recognition tool for open domain dialogue sys- tems. In Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan. European Language Resources Association (ELRA).", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Active learning for domain classification in a commercial spoken personal assistant", |
|
"authors": [ |
|
{ |
|
"first": "C", |
|
"middle": [], |
|
"last": "Xi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adithya", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Justine", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Sagar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tony", |
|
"middle": [ |
|
"Y" |
|
], |
|
"last": "Kao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Klein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Pulman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jason", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Garg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Williams", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proc. Interspeech", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1478--1482", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xi C Chen, Adithya Sagar, Justine T Kao, Tony Y Li, Christopher Klein, Stephen Pulman, Ashish Garg, and Jason D Williams. 2019. Active learning for domain classification in a commercial spoken per- sonal assistant. Proc. Interspeech 2019, pages 1478- 1482.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Snips voice platform: an embedded spoken language understanding system for private-by-design voice interfaces", |
|
"authors": [ |
|
{ |
|
"first": "Alice", |
|
"middle": [], |
|
"last": "Coucke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alaa", |
|
"middle": [], |
|
"last": "Saade", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adrien", |
|
"middle": [], |
|
"last": "Ball", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Th\u00e9odore", |
|
"middle": [], |
|
"last": "Bluche", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexandre", |
|
"middle": [], |
|
"last": "Caulier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Leroy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cl\u00e9ment", |
|
"middle": [], |
|
"last": "Doumouro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thibault", |
|
"middle": [], |
|
"last": "Gisselbrecht", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francesco", |
|
"middle": [], |
|
"last": "Caltagirone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thibaut", |
|
"middle": [], |
|
"last": "Lavril", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1805.10190" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alice Coucke, Alaa Saade, Adrien Ball, Th\u00e9odore Bluche, Alexandre Caulier, David Leroy, Cl\u00e9ment Doumouro, Thibault Gisselbrecht, Francesco Calta- girone, Thibaut Lavril, et al. 2018. Snips voice plat- form: an embedded spoken language understanding system for private-by-design voice interfaces. arXiv preprint arXiv:1805.10190.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Analysis of named entity recognition and linking for tweets", |
|
"authors": [ |
|
{ |
|
"first": "Johann", |
|
"middle": [], |
|
"last": "Troncy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kalina", |
|
"middle": [], |
|
"last": "Petrak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bontcheva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Information Processing & Management", |
|
"volume": "51", |
|
"issue": "2", |
|
"pages": "32--49", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Troncy, Johann Petrak, and Kalina Bontcheva. 2015. Analysis of named entity recognition and linking for tweets. Information Processing & Management, 51(2):32-49.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Named entity disambiguation for noisy text", |
|
"authors": [ |
|
{ |
|
"first": "Yotam", |
|
"middle": [], |
|
"last": "Eshel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kira", |
|
"middle": [], |
|
"last": "Radinsky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaul", |
|
"middle": [], |
|
"last": "Markovitch", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ikuya", |
|
"middle": [], |
|
"last": "Yamada", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 21st Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "58--68", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/K17-1008" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yotam Eshel, Noam Cohen, Kira Radinsky, Shaul Markovitch, Ikuya Yamada, and Omer Levy. 2017. Named entity disambiguation for noisy text. In Pro- ceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017), pages 58-68, Vancouver, Canada. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Support for interactive identification of mentioned entities in conversational speech", |
|
"authors": [ |
|
{ |
|
"first": "Ning", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Douglas", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Oard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Dredze", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "953--956", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3077136.3080688" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ning Gao, Douglas W. Oard, and Mark Dredze. 2017. Support for interactive identification of mentioned entities in conversational speech. In Proceedings of the 40th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '17, page 953-956, New York, NY, USA. As- sociation for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Automatic grammatical error correction for sequence-to-sequence text generation: An empirical study", |
|
"authors": [ |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Ge", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xingxing", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Furu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6059--6064", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-1609" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tao Ge, Xingxing Zhang, Furu Wei, and Ming Zhou. 2019. Automatic grammatical error correction for sequence-to-sequence text generation: An empiri- cal study. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 6059-6064, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "End-to-end named entity and semantic concept extraction from speech", |
|
"authors": [ |
|
{ |
|
"first": "Sahar", |
|
"middle": [], |
|
"last": "Ghannay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Caubri\u00e8re", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yannick", |
|
"middle": [], |
|
"last": "Est\u00e8ve", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nathalie", |
|
"middle": [], |
|
"last": "Camelin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edwin", |
|
"middle": [], |
|
"last": "Simonnet", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "2018 IEEE Spoken Language Technology Workshop (SLT)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "692--699", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sahar Ghannay, Antoine Caubri\u00e8re, Yannick Est\u00e8ve, Nathalie Camelin, Edwin Simonnet, Antoine Lau- rent, and Emmanuel Morin. 2018. End-to-end named entity and semantic concept extraction from speech. In 2018 IEEE Spoken Language Technology Workshop (SLT), pages 692-699. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "To link or not to link? a study on end-toend tweet entity linking", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emre", |
|
"middle": [], |
|
"last": "Kiciman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1020--1030", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Guo, Ming-Wei Chang, and Emre Kiciman. 2013. To link or not to link? a study on end-to- end tweet entity linking. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1020-1030, Atlanta, Georgia. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Long short-term memory", |
|
"authors": [ |
|
{ |
|
"first": "Sepp", |
|
"middle": [], |
|
"last": "Hochreiter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00fcrgen", |
|
"middle": [], |
|
"last": "Schmidhuber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Neural computation", |
|
"volume": "9", |
|
"issue": "8", |
|
"pages": "1735--1780", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "End-to-end neural entity linking", |
|
"authors": [ |
|
{ |
|
"first": "Nikolaos", |
|
"middle": [], |
|
"last": "Kolitsas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Octavian-Eugen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Ganea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hofmann", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 22nd Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "519--529", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/K18-1050" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nikolaos Kolitsas, Octavian-Eugen Ganea, and Thomas Hofmann. 2018. End-to-end neural entity linking. In Proceedings of the 22nd Conference on Computational Natural Language Learning, pages 519-529, Brussels, Belgium. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Neural architectures for named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Guillaume", |
|
"middle": [], |
|
"last": "Lample", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Miguel", |
|
"middle": [], |
|
"last": "Ballesteros", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sandeep", |
|
"middle": [], |
|
"last": "Subramanian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kazuya", |
|
"middle": [], |
|
"last": "Kawakami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Dyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "260--270", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N16-1030" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guillaume Lample, Miguel Ballesteros, Sandeep Sub- ramanian, Kazuya Kawakami, and Chris Dyer. 2016. Neural architectures for named entity recognition. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 260-270, San Diego, California. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Entity linking at web scale", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mausam", |
|
"middle": [], |
|
"last": "", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oren", |
|
"middle": [], |
|
"last": "Etzioni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the Joint Workshop on Automatic Knowledge Base Construction and Web-scale Knowledge Extraction (AKBC-WEKEX)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "84--88", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Lin, Mausam, and Oren Etzioni. 2012. Entity linking at web scale. In Proceedings of the Joint Workshop on Automatic Knowledge Base Construc- tion and Web-scale Knowledge Extraction (AKBC- WEKEX), pages 84-88, Montr\u00e9al, Canada. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Entity linking for tweets", |
|
"authors": [ |
|
{ |
|
"first": "Xiaohua", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yitong", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haocheng", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Furu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "1304--1311", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaohua Liu, Yitong Li, Haocheng Wu, Ming Zhou, Furu Wei, and Yi Lu. 2013. Entity linking for tweets. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1304-1311, Sofia, Bulgaria. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Joint entity recognition and disambiguation", |
|
"authors": [ |
|
{ |
|
"first": "Gang", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaojiang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zaiqing", |
|
"middle": [], |
|
"last": "Nie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "879--888", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gang Luo, Xiaojiang Huang, Chin-Yew Lin, and Za- iqing Nie. 2015. Joint entity recognition and disam- biguation. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Process- ing, pages 879-888.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Joint learning of named entity recognition and entity linking", |
|
"authors": [ |
|
{ |
|
"first": "Pedro", |
|
"middle": [ |
|
"Henrique" |
|
], |
|
"last": "Martins", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zita", |
|
"middle": [], |
|
"last": "Marinho", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andr\u00e9", |
|
"middle": [ |
|
"F T" |
|
], |
|
"last": "Martins", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: Student Research Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "190--196", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P19-2026" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pedro Henrique Martins, Zita Marinho, and Andr\u00e9 F. T. Martins. 2019. Joint learning of named en- tity recognition and entity linking. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: Student Research Work- shop, pages 190-196, Florence, Italy. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Glove: Global vectors for word representation", |
|
"authors": [ |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Pennington", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1532--1543", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.3115/v1/D14-1162" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word rep- resentation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Process- ing (EMNLP), pages 1532-1543, Doha, Qatar. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Design challenges and misconceptions in named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Lev", |
|
"middle": [], |
|
"last": "Ratinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2009, |
|
"venue": "Proceedings of the Thirteenth Conference on Computational Natural Language Learning (CoNLL-2009)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "147--155", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lev Ratinov and Dan Roth. 2009. Design chal- lenges and misconceptions in named entity recog- nition. In Proceedings of the Thirteenth Confer- ence on Computational Natural Language Learning (CoNLL-2009), pages 147-155, Boulder, Colorado. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Adapting sequence models for sentence correction", |
|
"authors": [ |
|
{ |
|
"first": "Allen", |
|
"middle": [], |
|
"last": "Schmaltz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Rush", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stuart", |
|
"middle": [], |
|
"last": "Shieber", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2807--2813", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D17-1298" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Allen Schmaltz, Yoon Kim, Alexander Rush, and Stu- art Shieber. 2017. Adapting sequence models for sentence correction. In Proceedings of the 2017 Conference on Empirical Methods in Natural Lan- guage Processing, pages 2807-2813, Copenhagen, Denmark. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Re-ranking for joint named-entity recognition and linking", |
|
"authors": [ |
|
{ |
|
"first": "Avirup", |
|
"middle": [], |
|
"last": "Sil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Yates", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 22nd ACM international conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2369--2374", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Avirup Sil and Alexander Yates. 2013. Re-ranking for joint named-entity recognition and linking. In Pro- ceedings of the 22nd ACM international conference on Information and Knowledge Management, pages 2369--2374, San Francisco, USA.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition", |
|
"authors": [ |
|
{ |
|
"first": "Erik", |
|
"middle": [ |
|
"F" |
|
], |
|
"last": "Tjong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kim", |
|
"middle": [], |
|
"last": "Sang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fien", |
|
"middle": [], |
|
"last": "De Meulder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL 2003", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "142--147", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Erik F. Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition. In Proceedings of the Seventh Conference on Natu- ral Language Learning at HLT-NAACL 2003, pages 142-147.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF2": { |
|
"content": "<table><tr><td/><td>A</td><td>B</td><td>C</td></tr><tr><td>DC</td><td colspan=\"3\">88.95 89.46 90.04</td></tr><tr><td colspan=\"4\">SP [movie&TV] 89.62 90.99 91.67</td></tr><tr><td>SP [music]</td><td colspan=\"3\">83.97 84.26 84.42</td></tr><tr><td>SP [sports]</td><td colspan=\"3\">86.37 86.47 86.46</td></tr></table>", |
|
"html": null, |
|
"type_str": "table", |
|
"text": "EL mean F1 relative % improvements, reported on 10 runs average.", |
|
"num": null |
|
}, |
|
"TABREF3": { |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table", |
|
"text": "Results for domain classifier (first row) and semantic parser. A is the baseline, B is A+NER, C is A+NER+EL.", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |