|
{ |
|
"paper_id": "2021", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:55:32.831590Z" |
|
}, |
|
"title": "Towards Zero and Few-shot Knowledge-seeking Turn Detection in Task-orientated Dialogue Systems", |
|
"authors": [ |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Sunnyvale", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Shuyang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Sunnyvale", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Seokhwan", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Sunnyvale", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Sunnyvale", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "yangliud@amazon.com" |
|
}, |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-Tur", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Sunnyvale", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "hakkanit@amazon.com" |
|
}, |
|
{ |
|
"first": "Joey", |
|
"middle": [ |
|
"Tianyi" |
|
], |
|
"last": "Zhou", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Sunnyvale", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Sunnyvale", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Xi", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Sunnyvale", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Sunnyvale", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Hongyuan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Sunnyvale", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Meng", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Sunnyvale", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Rick", |
|
"middle": [], |
|
"last": "Siow", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Sunnyvale", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mong", |
|
"middle": [], |
|
"last": "Goh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Sunnyvale", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Kenneth", |
|
"middle": [], |
|
"last": "Kwok", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Amazon Alexa AI", |
|
"location": { |
|
"settlement": "Sunnyvale", |
|
"region": "CA", |
|
"country": "USA" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Most prior work on task-oriented dialogue systems is restricted to supporting domain APIs. However, users may have requests that are out of the scope of these APIs. This work focuses on identifying such user requests. Existing methods for this task mainly rely on finetuning pre-trained models on large annotated data. We propose a novel method, REDE, based on adaptive representation learning and density estimation. REDE can be applied to zero-shot cases, and quickly learns a highperforming detector with only a few shots by updating less than 3K parameters. We demonstrate REDE's competitive performance on DSTC9 data and our newly collected test set.", |
|
"pdf_parse": { |
|
"paper_id": "2021", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Most prior work on task-oriented dialogue systems is restricted to supporting domain APIs. However, users may have requests that are out of the scope of these APIs. This work focuses on identifying such user requests. Existing methods for this task mainly rely on finetuning pre-trained models on large annotated data. We propose a novel method, REDE, based on adaptive representation learning and density estimation. REDE can be applied to zero-shot cases, and quickly learns a highperforming detector with only a few shots by updating less than 3K parameters. We demonstrate REDE's competitive performance on DSTC9 data and our newly collected test set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Current task-oriented dialog systems often rely on pre-defined APIs to complete target tasks (Williams et al., 2017; Eric et al., 2017) and filter out any other requests beyond the APIs as out-of-domain cases. However, some in-domain user requests can be addressed by incorporating external domain knowledge from the web or any other sources (Kim et al., 2020) . To address this problem, recently organized a benchmark challenge on taskoriented conversational modeling with unstructured knowledge access in DSTC9 (Gunasekara et al., 2020) . This challenge includes the knowledgeseeking turn detection task to determine whether to invoke a knowledge-driven responder or just rely on available API functions. One data sample is provided in Table 1 . The state-of-the-art systems (He et al., 2021; Tang et al., 2021; Mi et al., 2021; implemented this detector by finetuning a large pre-trained model on the training dataset (about 72K samples) as a binary classifier, and achieved an F1 score of over 95% on the benchmark test set. However, after close investigation, we find those user queries in the test set are very limited in topic coverage and language variation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 93, |
|
"end": 116, |
|
"text": "(Williams et al., 2017;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 117, |
|
"end": 135, |
|
"text": "Eric et al., 2017)", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 360, |
|
"text": "(Kim et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 507, |
|
"end": 538, |
|
"text": "DSTC9 (Gunasekara et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 777, |
|
"end": 794, |
|
"text": "(He et al., 2021;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 795, |
|
"end": 813, |
|
"text": "Tang et al., 2021;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 814, |
|
"end": 830, |
|
"text": "Mi et al., 2021;", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 738, |
|
"end": 745, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To evaluate the detector performance on real-world user queries, we specially curate a new contrast set following Gardner et al. (2020) by manually collecting questions posted by real users on Tripadvisor forums. We found that the detector trained on DSTC9 Track 1 training samples had a large performance degradation on this contrast set (F1 score dropped by over 15%), suggesting the need for methods with better generalization.", |
|
"cite_spans": [ |
|
{ |
|
"start": 114, |
|
"end": 135, |
|
"text": "Gardner et al. (2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we propose a method that can quickly learn a knowledge-seeking turn detector with much fewer out-of-domain samples, such as only a few shots or even zero shot. Our method is composed of two stages: REpresentation learning and DEnsity estimation (REDE). First, we learn a representation model via fine-tuning a pre-trained sentence encoder on all non-knowledge-seeking turns (utterances that can be supported by APIs) via masked language modeling (MLM). Then we learn a density estimator using these representation vectors. During inference, the density estimator produces a density score for a given user utterance. If it is above a threshold, this utterance is counted as an in-domain API turn, otherwise as a knowledge-seeking turn.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To incorporate out-of-domain examples, we propose to use principle component analysis to quickly learn a projection matrix with few knowledgeseeking turn samples and then use this matrix to linearly transform the representation vectors. We conduct experiments on the DSTC9 Track 1 data as well as our new contrast test set. We demonstrate that REDE can achieve competitive performance as other supervised methods in the full-shot setting and outperform them by a large margin in the lowresource setting. More importantly, our approach generalizes much better in the new contrast test set that we created. Overall, our contributions are summarized as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We propose a new approach, REDE, for knowledge-seeking turn detection that be applied to zero or few shot cases. It can be quickly adapted to new knowledge-seeking turns data with much less training samples, which can achieve over 90% F1 score with only five shots;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 Once the continuous pretraining stage on nonknowledge-seeking turns data is finished, our model can be quickly adapted to any kinds of knowledge-seeking turns data within seconds with only a few parameters to be learned.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We curate and release a contrast set to examine the generalization capability of the knowledge seeking-turn detectors. 1 We demonstrate that our model is better at generalizing to this contrast set than the previous best models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 122, |
|
"text": "1", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our work is closely related to those participating systems in DSTC9 Track 1 (Kim et al., 2020 . All the systems proposed to treat the problem of knowledge-seeking turn detection as a binary classification task and fine-tuned pre-trained models such as RoBERTa, UniLM, PLATO, GPT2, on the whole training set (He et al., 2021; Tang et al., 2021; Mi et al., 2021) , which yielded around 99% and 1 https://github.com/jind11/REDE 96% F1 scores on the development and test sets, respectively. Our method differs in two aspects: 1) We do not need to fine-tune the pre-trained model on the training set; 2) Our model is at least 5 times smaller and we need less than 5% of training data to achieve similar performance. Our method is inspired by previous work for out-of-domain (OOD) detection (Ren et al., 2019; Gangal et al., 2020; Hendrycks et al., 2019) and one-class classification (Sohn et al., 2021) . Kim et al. (2020) also tried tackling this problem by applying an unsupervised anomaly detection algorithm, Local Outlier Factor (LOF) (Breunig et al., 2000) , which compares the local densities between a given input instance and its nearest neighbors, but did not obtain good results (F1 score is less than 50%). Sohn et al. (2021) proposed to first learn a representation model via contrastive learning, then learn a density estimator on the obtained representations. They showed decent performance for one-class classification. All these previous work assumed no access to OOD samples, however, we would like to make use of those OOD samples efficiently when they are available. Therefore we extend the general representation learning framework by proposing a novel representation transformation method to learn OOD samples, which leads to significantly boosted detection performance.", |
|
"cite_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 93, |
|
"text": "(Kim et al., 2020", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 307, |
|
"end": 324, |
|
"text": "(He et al., 2021;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 325, |
|
"end": 343, |
|
"text": "Tang et al., 2021;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 344, |
|
"end": 360, |
|
"text": "Mi et al., 2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 785, |
|
"end": 803, |
|
"text": "(Ren et al., 2019;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 804, |
|
"end": 824, |
|
"text": "Gangal et al., 2020;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 825, |
|
"end": 848, |
|
"text": "Hendrycks et al., 2019)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 878, |
|
"end": 897, |
|
"text": "(Sohn et al., 2021)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 900, |
|
"end": 917, |
|
"text": "Kim et al. (2020)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 1035, |
|
"end": 1057, |
|
"text": "(Breunig et al., 2000)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 1214, |
|
"end": 1232, |
|
"text": "Sohn et al. (2021)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our work is also related to few/zero-shot learn-ing, which has been widely studied previously (Gao et al., 2020; Jin et al., 2020c,b) . Transfer learning (Zhou et al., 2019c; Zhou et al., 2019a,b; Yan et al., 2020) and data augmentation (Jindal et al., 2020b,a) have been two major methods for this direction, while our work focuses on manipulation of learned representations, which provides a new perspective.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 112, |
|
"text": "(Gao et al., 2020;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 113, |
|
"end": 133, |
|
"text": "Jin et al., 2020c,b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 154, |
|
"end": 174, |
|
"text": "(Zhou et al., 2019c;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 175, |
|
"end": 196, |
|
"text": "Zhou et al., 2019a,b;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 214, |
|
"text": "Yan et al., 2020)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 237, |
|
"end": 261, |
|
"text": "(Jindal et al., 2020b,a)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our method includes three steps: encoder adaptation, representation transformation, and density estimation. The representation transformation step is only applicable when there are OOD examples (i.e., knowledge-seeking turns).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methods", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In this step, we adapt a pre-trained sentence encoder E to the in-domain data, i.e., non-knowledgeseeking turns, (Devlin et al., 2019) . Specifically, 15% of tokens of x N K i are masked and E is trained to predict these masked tokens.", |
|
"cite_spans": [ |
|
{ |
|
"start": 113, |
|
"end": 134, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoder Adaptation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "X N K = {x N K 1 , ..., x N K N }, via masked language modeling", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoder Adaptation", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "To incorporate the knowledge-seeking turns X K = {x K 1 , ..., x K M }, a standard solution is to fine-tune E on the combined data of knowledge-seeking and non-knowledge-seeking turns, X = X K \u222a X N K , as a supervised binary classifier. However, in fewshot settings where M << N , there is an extreme class imbalance problem. In addition, fine-tuning large models may take a long time and much computation power with large data size. Instead, we propose a simple linear transformation to the sentence representation e = E(x) without updating the model parameters, following (Su et al., 2021) :", |
|
"cite_spans": [ |
|
{ |
|
"start": 575, |
|
"end": 592, |
|
"text": "(Su et al., 2021)", |
|
"ref_id": "BIBREF25" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representation Transformation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "e = T (e) = (e \u2212 \u00b5)W", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Representation Transformation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representation Transformation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u00b5 = 1 M M i=1 E(x K i ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representation Transformation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "To calculate W , we first calculate the covariance matrix,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representation Transformation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "\u03a3 = 1 M M i=1 (E(x K i ) \u2212 \u00b5) T (E(x K i ) \u2212 \u00b5)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representation Transformation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": ", then perform Singular Value Decomposition (SVD) over \u03a3 such that: \u03a3 = U \u039bU T , and finally we obtain W = U \u221a \u039b \u22121 . The elements in diagonal matrix \u039b derived from SVD are sorted in descending order. Therefore, we can retain the first L columns of W to reduce the dimension of transformed vectors\u1ebd, which is theoretically equivalent to Principal Component Analysis (PCA). However, to be noted, both \u00b5 and W parameters are obtained using those knowledgeseeking turns instead of non-knowledge-seeking turns and the number of knowledge-seeking turns is much smaller, which can be as small as just a few shots. In another word, we only need a very small size of out-of-domain samples to learn the parameters needed for our representation transformation as defined in Eq. 1 to transform the representations of in-domain data. This is in contrast to the conventional PCA based density estimation method that assumes only having access to in-domain data, i.e. non-knowledge-seeking turns, and needs to learn and perform PCA transformation both on a good amount of those in-domain data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representation Transformation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "This step of transformation can be viewed as another round of unsupervised representation learning with knowledge-seeking turns, which helps us obtain a better representation and is extremely critical to our claimed great performance for few-shot learning, as analyzed in Section 5.2.2.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Representation Transformation", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In this step, we encode all the non-knowledgeseeking turns in the training set and transform them to obtain {\u1ebd N K 1 , ...,\u1ebd N K N }, normalize them into unit vectors, and then learn a shallow density estimator D over them, such as Gaussian Mixture Model (GMM). Note that in the zero-shot setting when no knowledge-seeking turns are available, the representation transformation step (in Section 3.2) is skipped.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Density estimation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "During inference, given a test sample x, we encode it with the encoder E, transform it with T defined in Eq. 1, and then use the learned density estimator D to produce a density score D(T (E(x))). If it is above a pre-set threshold \u03b7, x is considered as a non-knowledge-seeking turn, otherwise as a knowledge-seeking turn. This whole pipeline is motivated by the assumption that the well learned representations of in-domain (non-knowledge-seeking turns) and OOD samples (knowledge-seeking turns) should be distributed separately in the latent space, and thus the estimated density of in-domain data by the density estimator should be higher than that of OOD data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Density estimation", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "We use the DSTC9 Track 1 competition data (Kim et al., 2020 , and focus on the sub-task of binary knowledge-seeking turn detection. The data statistics are summarized in Table 2 . 2 We further curate a new contrast test set by first collecting questions posted by real users in the Tripadvisor forums 3 , then obtain the questions as knowledgeseeking turns that cannot be addressed by Multi-WOZ API schema (Eric et al., 2019 ) (this schema was also used for constructing the DSTC9 Track 1 dataset), and finally manually paraphrasing them to make them more like dialogue utterances. We obtained 617 knowledge-seeking turns and mixed them with those non-knowledge-seeking turns in the original test set to form the contrast set. Table 3 shows several data samples for the newly curated contrast set. These user queries collected from real users are much more diverse than those in the benchmark test set of DSTC9 Track 1 dataset. Among these examples, the user query of \"How much do you charge for parking?\" is actually quite challenging for knowledge-seeking turn detection since this query is very close to one of the available API functions that is responsible for checking whether there is free parking. However, in order to answer this query, we still need to invoke the knowledge module to retrieve external unstructured knowledge. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 42, |
|
"end": 59, |
|
"text": "(Kim et al., 2020", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 180, |
|
"end": 181, |
|
"text": "2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 406, |
|
"end": 424, |
|
"text": "(Eric et al., 2019", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 177, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 727, |
|
"end": 734, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The baselines are 1) the best performing model in the DSTC9 Track 1 competition , which is a fine-tuned RoBERTa-Large model on the training set. 2) Fine-tuned RoBERTa-Large-NLI (obtained by finetuning RoBERTa-Large on SNLI and MultiNLI datasets) and DistilBERT-Base-NLI-STSB (obtained by fine-tuning DistilBERT-Base on SNLI, MultiNLI, and STS-B datasets) on the training set. The sentence encoder E we used is DistilBERT-Base-NLI-STSB (Reimers and Gurevych, 2019 Table 3 : Examples of newly collected user questions in the contrast set. These user queries collected from real users are much more diverse than those in the benchmark test set of DSTC9 Track 1 dataset.", |
|
"cite_spans": [ |
|
{ |
|
"start": 435, |
|
"end": 462, |
|
"text": "(Reimers and Gurevych, 2019", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 463, |
|
"end": 470, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Baselines and Settings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The threshold \u03b7 is chosen based on the highest F1 score on the development set. For the density estimator, we have tried OC-SVM, KDE with various kinds of kernels, and GMM, and we find GMM performs the best and its inference time is the lowest. We set the number of components to 1 for GMM. Dimensionality L is set as 650 for PCA transformation by tuning on the development set. Details of comparison and tuning results are in the appendix. For evaluation metrics, we report precision (P), recall (R), and F1 scores.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baselines and Settings", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Full supervised setting Table 4 summarizes the comparison of our method REDE with baselines where all knowledge-seeking turn samples in the DSTC9 Track 1 training set are used for training. REDE has two advantages: (1) Once the first step of adaptive pre-training on non-knowledge-seeking turns is done, it only needs to update less than 3K parameters of the density estimator for learning the knowledge-seeking turns, but it can still achieve superior performance on the test set;", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 24, |
|
"end": 31, |
|
"text": "Table 4", |
|
"ref_id": "TABREF6" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Main Results", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "(2) It can be better generalized to the new contrast set that has distribution shift with respect to the training data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Main Results", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Low-resource setting We are more interested in exploring how our method performs under the low-resource setting compared with baselines. Therefore, we sub-sampled different numbers of knowledge-seeking turn samples and kept using all non-knowledge-seeking turn samples. We then trained the model and obtained F1 scores on the test set. We performed five times of random subsampling and report the average and standard deviation in Figure 1 . Since the error bar of REDE is too small to be seen, we further provide the complete results in Table 5 . As we can see, REDE is always superior than baselines for all sub-sampling ratios. The performance gap is larger when fewer examples are used. Most notably, for the zero-shot setting without using any knowledge-seeking turns, REDE can still achieve 85.95% of F1 score. For comparison, in the zero-shot setting, we also tested Local Outlier Factor (LOF) (Breunig et al., 2000) , which was used in (Kim et al., 2020) , and obtained an F1 score of 73.78% on the test set, which is much lower than our proposed density estimation method. Under the few-shots setting such as 5shots and 10-shots, REDE can obtain more than 90% of F1, whereas other supervised baselines' scores are under 20%. DistilBERT-Base RoBERTa-Large REDE Figure 1 : F1 score plots with error bar on the test set with different numbers of knowledge-seeking turns used for training Full name of DistilBERT-Base is DistilBERT-Base-NLI-STSB.", |
|
"cite_spans": [ |
|
{ |
|
"start": 901, |
|
"end": 923, |
|
"text": "(Breunig et al., 2000)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 944, |
|
"end": 962, |
|
"text": "(Kim et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 431, |
|
"end": 439, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 538, |
|
"end": 545, |
|
"text": "Table 5", |
|
"ref_id": "TABREF8" |
|
}, |
|
{ |
|
"start": 1269, |
|
"end": 1277, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Main Results", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "As shown in Table 6 , after removing the MLM adaptation step, our method has significant performance degradation, especially for the contrast set, indicating the importance of adapting the general pre-trained model to the target dataset via unsupervised learning. We have also tried adopting contrastive learning for such unsupervised adaptation (i.e., SimCSE), which has shown state-of-the-art performance for unsupervised representation learning (Gao et al., 2021) . Results in Table 6 show that it is worse than MLM. The reason could be that contrastive learning used in SimCSE would lead to more uniform and dispersed distribution over the latent space, however, the density estimation based OOD detection favors more dense and collapsed distribution for in-domain data. Table 8 : Comparison of different density estimators. Inference time is measure on the whole test set using the same machine. We have also tried other kernels for the KDE estimator, such as 'tophat', 'epanechnikov', 'linear', and 'cosine', but they all perform poorly. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 448, |
|
"end": 466, |
|
"text": "(Gao et al., 2021)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 19, |
|
"text": "Table 6", |
|
"ref_id": "TABREF10" |
|
}, |
|
{ |
|
"start": 480, |
|
"end": 487, |
|
"text": "Table 6", |
|
"ref_id": "TABREF10" |
|
}, |
|
{ |
|
"start": 775, |
|
"end": 782, |
|
"text": "Table 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Effect of MLM Adaptation", |
|
"sec_num": "5.2.1" |
|
}, |
|
{ |
|
"text": "In Section 3.2, the sentence representation is transformed with PCA learned from knowledge-seeking turns. Table 7 shows the F1 score on the test set using top L principle components with PCA learned using different data. Overall, we can see that PCA with knowledge-seeking turns achieves better performance, and using more principle components is always beneficial. PCA is well-known to help construct new subspaces by maximizing the global variance. Intuitively, by learning PCA over knowledge-seeking turns, we expect the manifolds on knowledge-seeking turns to spread out and non-knowledge seeking turns condense. Figure 2 shows the scatter plot of the top two principle components of transformed features. In Figure 2a, we learn PCA from non-knowledge-seeking turns, which results in the manifold of knowledgeseeking turns (red dots) to be within that of nonknowledge-seeking turns (blue dots). It hurts the performance since the density estimation is performed over non-knowledge-seeking turns, as confirmed by the zero shot result in Table 7 in comparison to that in Fig 1. In contrast, in Figure 2b , we learn PCA with knowledge-seeking turns, which makes knowledge-seeking turns (red dots) spread out and non-knowledge-seeking turns (blue dots) condense. By estimating the density of this condensed blue area, we obtain higher F1 score because all the red dots falling outside of the region of blue dots will be classified as out-of-distribution correctly.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 106, |
|
"end": 113, |
|
"text": "Table 7", |
|
"ref_id": "TABREF11" |
|
}, |
|
{ |
|
"start": 617, |
|
"end": 625, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF2" |
|
}, |
|
{ |
|
"start": 713, |
|
"end": 719, |
|
"text": "Figure", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1040, |
|
"end": 1047, |
|
"text": "Table 7", |
|
"ref_id": "TABREF11" |
|
}, |
|
{ |
|
"start": 1073, |
|
"end": 1079, |
|
"text": "Fig 1.", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1096, |
|
"end": 1105, |
|
"text": "Figure 2b", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Understanding PCA Transformation", |
|
"sec_num": "5.2.2" |
|
}, |
|
{ |
|
"text": "10 0 10 1 10 2 10 3 Top K 50 55 60 65 70 75 80 85 90 95 100 Dev Set F1 (%) ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 20, |
|
"end": 71, |
|
"text": "Top K 50 55 60 65 70 75 80 85 90 95 100", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Understanding PCA Transformation", |
|
"sec_num": "5.2.2" |
|
}, |
|
{ |
|
"text": "For the density estimator, we have tried OC-SVM, KDE with various kinds of kernels, and GMM, which are summarized in Table 8 . All these estimators are implemented using Scikit-Learn library. 5 From Table 8 , we see that GMM performs the best while being the fastest for inference, therefore we chose it as the density estimator in our work. Table 9 shows the performance under different number of components for the GMM density estimator. From it, we see that the number of components has minor influence on the performance so we decide to use 1 as the number of components in this work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 192, |
|
"end": 193, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 117, |
|
"end": 124, |
|
"text": "Table 8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 199, |
|
"end": 206, |
|
"text": "Table 8", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 349, |
|
"text": "Table 9", |
|
"ref_id": "TABREF13" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparison of Density Estimators", |
|
"sec_num": "5.2.3" |
|
}, |
|
{ |
|
"text": "We can retrain only the first L columns of W for the PCA transformation, which can help us reduce the dimension of transformed representation vector e. Figure 3 shows the development set performance under different values of L when all knowledgeseeking turns are used for training. We see that the first 50 dimensions can achieve over 95% F1 score and 300 dimensions are already enough to realize the peak performance, whereas the full dimension is 768.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 152, |
|
"end": 160, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Effects of L", |
|
"sec_num": "5.2.4" |
|
}, |
|
{ |
|
"text": "In this work, we propose a novel method REDE based on domain-adapted representation learning and density estimation for knowledge-seeking turn detection in tasked-orientated dialogue systems. Compared with previous SOTA models, REDE can achieve comparable performance in the full supervised setting and significantly superior performance for the low-resource setting. Besides, REDE has much better generalization capability onto a new contrast set we curated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://scikit-learn.org/stable/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Lof: identifying densitybased local outliers", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Markus", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hans-Peter", |
|
"middle": [], |
|
"last": "Breunig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Raymond", |
|
"middle": [ |
|
"T" |
|
], |
|
"last": "Kriegel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J\u00f6rg", |
|
"middle": [], |
|
"last": "Ng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sander", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Proceedings of the", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Markus M Breunig, Hans-Peter Kriegel, Raymond T Ng, and J\u00f6rg Sander. 2000. Lof: identifying density- based local outliers. In Proceedings of the 2000", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "ACM SIGMOD international conference on Management of data", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "93--104", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "ACM SIGMOD international conference on Manage- ment of data, pages 93-104.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Multiwoz 2.1: A consolidated multi-domain dialogue dataset with state corrections and state tracking baselines", |
|
"authors": [ |
|
{ |
|
"first": "Mihail", |
|
"middle": [], |
|
"last": "Eric", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rahul", |
|
"middle": [], |
|
"last": "Goel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shachi", |
|
"middle": [], |
|
"last": "Paul", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adarsh", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhishek", |
|
"middle": [], |
|
"last": "Sethi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Ku", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anuj", |
|
"middle": [], |
|
"last": "Kumar Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanchit", |
|
"middle": [], |
|
"last": "Agarwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuyang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-Tur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.01669" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihail Eric, Rahul Goel, Shachi Paul, Adarsh Ku- mar, Abhishek Sethi, Peter Ku, Anuj Kumar Goyal, Sanchit Agarwal, Shuyang Gao, and Dilek Hakkani-Tur. 2019. Multiwoz 2.1: A consoli- dated multi-domain dialogue dataset with state cor- rections and state tracking baselines. arXiv preprint arXiv:1907.01669.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Key-value retrieval networks for task-oriented dialogue", |
|
"authors": [ |
|
{ |
|
"first": "Mihail", |
|
"middle": [], |
|
"last": "Eric", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lakshmi", |
|
"middle": [], |
|
"last": "Krishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francois", |
|
"middle": [], |
|
"last": "Charette", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the SIGDIAL 2017 Conference", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "37--49", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mihail Eric, Lakshmi Krishnan, Francois Charette, and Christopher D. Manning. 2017. Key-value retrieval networks for task-oriented dialogue. In Proceedings of the SIGDIAL 2017 Conference, pages 37-49.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Likelihood ratios and generative classifiers for unsupervised out-of-domain detection in task oriented dialog", |
|
"authors": [ |
|
{ |
|
"first": "Varun", |
|
"middle": [], |
|
"last": "Gangal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abhinav", |
|
"middle": [], |
|
"last": "Arora", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arash", |
|
"middle": [], |
|
"last": "Einolghozati", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sonal", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "34", |
|
"issue": "", |
|
"pages": "7764--7771", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Varun Gangal, Abhinav Arora, Arash Einolghozati, and Sonal Gupta. 2020. Likelihood ratios and genera- tive classifiers for unsupervised out-of-domain de- tection in task oriented dialog. In Proceedings of the AAAI Conference on Artificial Intelligence, vol- ume 34, pages 7764-7771.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "From machine reading comprehension to dialogue state tracking: Bridging the gap", |
|
"authors": [ |
|
{ |
|
"first": "Shuyang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sanchit", |
|
"middle": [], |
|
"last": "Agarwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tagyoung", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-Tur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.05827" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shuyang Gao, Sanchit Agarwal, Tagyoung Chung, Di Jin, and Dilek Hakkani-Tur. 2020. From machine reading comprehension to dialogue state tracking: Bridging the gap. arXiv preprint arXiv:2004.05827.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Simcse: Simple contrastive learning of sentence embeddings", |
|
"authors": [ |
|
{ |
|
"first": "Tianyu", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xingcheng", |
|
"middle": [], |
|
"last": "Yao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2104.08821" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tianyu Gao, Xingcheng Yao, and Danqi Chen. 2021. Simcse: Simple contrastive learning of sentence em- beddings. arXiv preprint arXiv:2104.08821.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Evaluating models' local decision boundaries via contrast sets", |
|
"authors": [ |
|
{ |
|
"first": "Matt", |
|
"middle": [], |
|
"last": "Gardner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoav", |
|
"middle": [], |
|
"last": "Artzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victoria", |
|
"middle": [], |
|
"last": "Basmov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jonathan", |
|
"middle": [], |
|
"last": "Berant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Bogin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sihao", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pradeep", |
|
"middle": [], |
|
"last": "Dasigi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dheeru", |
|
"middle": [], |
|
"last": "Dua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanai", |
|
"middle": [], |
|
"last": "Elazar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ananth", |
|
"middle": [], |
|
"last": "Gottumukkala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nitish", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hannaneh", |
|
"middle": [], |
|
"last": "Hajishirzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gabriel", |
|
"middle": [], |
|
"last": "Ilharco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Khashabi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kevin", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiangming", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1307--1323", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.findings-emnlp.117" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Matt Gardner, Yoav Artzi, Victoria Basmov, Jonathan Berant, Ben Bogin, Sihao Chen, Pradeep Dasigi, Dheeru Dua, Yanai Elazar, Ananth Gottumukkala, Nitish Gupta, Hannaneh Hajishirzi, Gabriel Ilharco, Daniel Khashabi, Kevin Lin, Jiangming Liu, Nel- son F. Liu, Phoebe Mulcaire, Qiang Ning, Sameer Singh, Noah A. Smith, Sanjay Subramanian, Reut Tsarfaty, Eric Wallace, Ally Zhang, and Ben Zhou. 2020. Evaluating models' local decision boundaries via contrast sets. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 1307-1323, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Learning to select external knowledge with multi-scale negative sampling", |
|
"authors": [ |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Huang He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Siqi", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fan", |
|
"middle": [], |
|
"last": "Bao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hua", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhengyu", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Haifeng", |
|
"middle": [], |
|
"last": "Niu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2102.02096" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huang He, Hua Lu, Siqi Bao, Fan Wang, Hua Wu, Zhengyu Niu, and Haifeng Wang. 2021. Learning to select external knowledge with multi-scale nega- tive sampling. arXiv preprint arXiv:2102.02096.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Deep anomaly detection with outlier exposure", |
|
"authors": [ |
|
{ |
|
"first": "Dan", |
|
"middle": [], |
|
"last": "Hendrycks", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mantas", |
|
"middle": [], |
|
"last": "Mazeika", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Dietterich", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dan Hendrycks, Mantas Mazeika, and Thomas Diet- terich. 2019. Deep anomaly detection with outlier exposure. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Mmm: Multi-stage multi-task learning for multi-choice reading comprehension", |
|
"authors": [ |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuyang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiun-Yu", |
|
"middle": [], |
|
"last": "Kao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tagyoung", |
|
"middle": [], |
|
"last": "Chung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-Tur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "34", |
|
"issue": "", |
|
"pages": "8010--8017", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Di Jin, Shuyang Gao, Jiun-Yu Kao, Tagyoung Chung, and Dilek Hakkani-tur. 2020a. Mmm: Multi-stage multi-task learning for multi-choice reading compre- hension. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 8010-8017.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Hooks in the headline: Learning to generate headlines with controlled styles", |
|
"authors": [ |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhijing", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joey", |
|
"middle": [ |
|
"Tianyi" |
|
], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lisa", |
|
"middle": [], |
|
"last": "Orii", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Szolovits", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5082--5093", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.456" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Di Jin, Zhijing Jin, Joey Tianyi Zhou, Lisa Orii, and Pe- ter Szolovits. 2020b. Hooks in the headline: Learn- ing to generate headlines with controlled styles. In Proceedings of the 58th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 5082- 5093, Online. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A simple baseline to semisupervised domain adaptation for machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhijing", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joey", |
|
"middle": [ |
|
"Tianyi" |
|
], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Szolovits", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2001.08140" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Di Jin, Zhijing Jin, Joey Tianyi Zhou, and Peter Szolovits. 2020c. A simple baseline to semi- supervised domain adaptation for machine transla- tion. arXiv preprint arXiv:2001.08140.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Can I be of further assistance? using unstructured knowledge access to improve task-oriented conversational modeling", |
|
"authors": [ |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Seokhwan", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-Tur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 1st Workshop on Document-grounded Dialogue and Conversational Question Answering", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "119--127", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.dialdoc-1.16" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Di Jin, Seokhwan Kim, and Dilek Hakkani-Tur. 2021. Can I be of further assistance? using unstructured knowledge access to improve task-oriented conver- sational modeling. In Proceedings of the 1st Work- shop on Document-grounded Dialogue and Conver- sational Question Answering (DialDoc 2021), pages 119-127, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Augmenting NLP models using latent feature interpolations", |
|
"authors": [ |
|
{ |
|
"first": "Amit", |
|
"middle": [], |
|
"last": "Jindal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arijit", |
|
"middle": [], |
|
"last": "Ghosh Chowdhury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aniket", |
|
"middle": [], |
|
"last": "Didolkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ramit", |
|
"middle": [], |
|
"last": "Sawhney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rajiv Ratn", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6931--6936", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.coling-main.611" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amit Jindal, Arijit Ghosh Chowdhury, Aniket Didolkar, Di Jin, Ramit Sawhney, and Rajiv Ratn Shah. 2020a. Augmenting NLP models using latent feature in- terpolations. In Proceedings of the 28th Inter- national Conference on Computational Linguistics, pages 6931-6936, Barcelona, Spain (Online). Inter- national Committee on Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Ramit Sawhney, and Rajiv Ratn Shah. 2020b. Speechmixaugmenting deep sound recognition using hidden space interpolations", |
|
"authors": [ |
|
{ |
|
"first": "Amit", |
|
"middle": [], |
|
"last": "Jindal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aniket", |
|
"middle": [], |
|
"last": "Narayanan Elavathur Ranganatha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arijit", |
|
"middle": [ |
|
"Ghosh" |
|
], |
|
"last": "Didolkar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Chowdhury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "INTERSPEECH", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "861--865", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Amit Jindal, Narayanan Elavathur Ranganatha, Aniket Didolkar, Arijit Ghosh Chowdhury, Di Jin, Ramit Sawhney, and Rajiv Ratn Shah. 2020b. Speechmix- augmenting deep sound recognition using hidden space interpolations. In INTERSPEECH, pages 861- 865.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Beyond domain APIs: Task-oriented conversational modeling with unstructured knowledge access", |
|
"authors": [ |
|
{ |
|
"first": "Seokhwan", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihail", |
|
"middle": [], |
|
"last": "Eric", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Gopalakrishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Behnam", |
|
"middle": [], |
|
"last": "Hedayatnia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-Tur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 21th Annual Meeting of the Special Interest Group on Discourse and Dialogue", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "278--289", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seokhwan Kim, Mihail Eric, Karthik Gopalakrishnan, Behnam Hedayatnia, Yang Liu, and Dilek Hakkani- Tur. 2020. Beyond domain APIs: Task-oriented con- versational modeling with unstructured knowledge access. In Proceedings of the 21th Annual Meeting of the Special Interest Group on Discourse and Dia- logue, pages 278-289, 1st virtual meeting. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Beyond domain apis: Task-oriented conversational modeling with unstructured knowledge access track in dstc9", |
|
"authors": [ |
|
{ |
|
"first": "Seokhwan", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihail", |
|
"middle": [], |
|
"last": "Eric", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Behnam", |
|
"middle": [], |
|
"last": "Hedayatnia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Karthik", |
|
"middle": [], |
|
"last": "Gopalakrishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chao-Wei", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dilek", |
|
"middle": [], |
|
"last": "Hakkani-Tur", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2101.09276" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Seokhwan Kim, Mihail Eric, Behnam Hedayatnia, Karthik Gopalakrishnan, Yang Liu, Chao-Wei Huang, and Dilek Hakkani-Tur. 2021. Beyond do- main apis: Task-oriented conversational modeling with unstructured knowledge access track in dstc9. arXiv preprint arXiv:2101.09276.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Roberta: A robustly optimized BERT pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized BERT pretraining ap- proach. CoRR, abs/1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Jing Zheng, and Peng Xu. 2021. Towards generalized models for beyond domain api task-oriented dialogue. AAAI-21 DSTC9 Workshop", |
|
"authors": [ |
|
{ |
|
"first": "Haitao", |
|
"middle": [], |
|
"last": "Mi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiyu", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinpei", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yifan", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongbin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haitao Mi, Qiyu Ren, Yinpei Dai, Yifan He, Jian Sun, Yongbin Li, Jing Zheng, and Peng Xu. 2021. To- wards generalized models for beyond domain api task-oriented dialogue. AAAI-21 DSTC9 Workshop.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Sentencebert: Sentence embeddings using siamese bertnetworks", |
|
"authors": [ |
|
{ |
|
"first": "Nils", |
|
"middle": [], |
|
"last": "Reimers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence- bert: Sentence embeddings using siamese bert- networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Likelihood ratios for outof-distribution detection", |
|
"authors": [ |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Fertig", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jasper", |
|
"middle": [], |
|
"last": "Snoek", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [], |
|
"last": "Poplin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Depristo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joshua", |
|
"middle": [], |
|
"last": "Dillon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Balaji", |
|
"middle": [], |
|
"last": "Lakshminarayanan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jie Ren, Peter J. Liu, Emily Fertig, Jasper Snoek, Ryan Poplin, Mark Depristo, Joshua Dillon, and Balaji Lakshminarayanan. 2019. Likelihood ratios for out- of-distribution detection. In Advances in Neural In- formation Processing Systems, volume 32. Curran Associates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Learning and evaluating representations for deep one-class classification", |
|
"authors": [ |
|
{ |
|
"first": "Kihyuk", |
|
"middle": [], |
|
"last": "Sohn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chun-Liang", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinsung", |
|
"middle": [], |
|
"last": "Yoon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minho", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Pfister", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kihyuk Sohn, Chun-Liang Li, Jinsung Yoon, Minho Jin, and Tomas Pfister. 2021. Learning and evaluat- ing representations for deep one-class classification. In International Conference on Learning Represen- tations.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Whitening sentence representations for better semantics and faster retrieval", |
|
"authors": [ |
|
{ |
|
"first": "Jianlin", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiarun", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weijie", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yangyiwen", |
|
"middle": [], |
|
"last": "Ou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2103.15316" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jianlin Su, Jiarun Cao, Weijie Liu, and Yangyiwen Ou. 2021. Whitening sentence representations for bet- ter semantics and faster retrieval. arXiv preprint arXiv:2103.15316.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Radge relevance learning and generation evaluating method for task-oriented conversational system-anonymous version", |
|
"authors": [ |
|
{ |
|
"first": "Liang", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qinghua", |
|
"middle": [], |
|
"last": "Shang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaokao", |
|
"middle": [], |
|
"last": "Lv", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zixi", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shijiang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chuanming", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhuo", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liang Tang, Qinghua Shang, Kaokao Lv, Zixi Fu, Shi- jiang Zhang, Chuanming Huang, and Zhuo Zhang. 2021. Radge relevance learning and generation evaluating method for task-oriented conversational system-anonymous version. AAAI-21 DSTC9 Work- shop.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Hybrid code networks: practical and efficient end-to-end dialog control with supervised and reinforcement learning", |
|
"authors": [ |
|
{ |
|
"first": "Jason", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Williams", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kavosh", |
|
"middle": [], |
|
"last": "Asadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Zweig", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jason D. Williams, Kavosh Asadi, and Geoffrey Zweig. 2017. Hybrid code networks: practical and efficient end-to-end dialog control with supervised and rein- forcement learning. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics (ACL 2017).", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Multi-source meta transfer for low resource multiple-choice question answering", |
|
"authors": [ |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joey", |
|
"middle": [ |
|
"Tianyi" |
|
], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7331--7341", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.654" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ming Yan, Hao Zhang, Di Jin, and Joey Tianyi Zhou. 2020. Multi-source meta transfer for low resource multiple-choice question answering. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7331-7341, On- line. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Dual adversarial transfer for sequence labeling", |
|
"authors": [ |
|
{ |
|
"first": "Joey", |
|
"middle": [ |
|
"Tianyi" |
|
], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xi", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "IEEE transactions on pattern analysis and machine intelligence", |
|
"volume": "43", |
|
"issue": "", |
|
"pages": "434--446", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joey Tianyi Zhou, Hao Zhang, Di Jin, and Xi Peng. 2019a. Dual adversarial transfer for sequence la- beling. IEEE transactions on pattern analysis and machine intelligence, 43(2):434-446.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "PCA with knowledgeseeking turns, F1 = 78.49%.", |
|
"num": null |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Scatter plot using top two principle components of PCA on test samples. F1 score is measured on the test set with top two dimensions only.", |
|
"num": null |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"text": "Development set F1 scores by retraining different values of first L columns of W . Full dimension is 768.", |
|
"num": null |
|
}, |
|
"TABREF0": { |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"3\">Turn Speaker Utterance</td><td>Sampled Knowledge Snippets from FAQs</td></tr><tr><td>1</td><td>User</td><td>I'm Would you like any more information on this location?</td></tr><tr><td>5</td><td>User</td><td>I'm interested in knowing, do they have a workout facility on the</td></tr><tr><td/><td/><td>premises?</td></tr><tr><td>6</td><td colspan=\"2\">Agent There are both a fitness center and gym available on the premises.</td><td>Q1: Do you have room service for your guests?</td></tr><tr><td/><td/><td>Does this sound ok?</td></tr><tr><td/><td/><td/><td>A1: Yes, the Lensfield Hotel provides room services.</td></tr><tr><td/><td/><td/><td>Q2: Is there a gym available at your location?</td></tr><tr><td/><td/><td/><td>A2: There is both a fitness center and gym avail-</td></tr><tr><td/><td/><td/><td>able on the premises.</td></tr><tr><td/><td/><td/><td>Q3: Can I bring my dog?</td></tr><tr><td/><td/><td/><td>A3: Pets are not allowed at the Lensfield Hotel.</td></tr><tr><td>7</td><td>User</td><td>That is perfect can you book that for me please.</td></tr><tr><td>8</td><td colspan=\"2\">Agent The Lensfield Hotel is located in the South. It has a 3 star rating</td></tr><tr><td/><td/><td>and is expensive. There is free parking and internet. I have booked</td></tr><tr><td/><td/><td>it for you.</td></tr><tr><td>9</td><td>User</td><td>Great, thank you!</td></tr></table>", |
|
"text": "looking for a place to stay in the south of town. It doesn't need to have free parking. 2 Agent There are 4 hotels that are in the area you are looking for. Would you prefer a 3 or 4 star rated hotel? 3 User I don't care about the star rating as long as it's expensive. 4Agent The Lensfield Hotel is the only expensive hotel in the south area.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"content": "<table><tr><td>: One example of task-oriented conversations with unstructured knowledge access. The user utterance of</td></tr><tr><td>turn 5 is beyond the coverage of API and needs external knowledge support, therefore it is a knowledge-seeking</td></tr><tr><td>turn while other user turns are non-knowledge-seeking turns. To address turn 5, three sampled FAQ pairs for</td></tr><tr><td>the entity \"Lensfield Hotel\" extracted from the external knowledge are listed in the rightmost column. The most</td></tr><tr><td>appropriate FAQ pair is highlighted in bold font.</td></tr></table>", |
|
"text": "", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"content": "<table><tr><td>Learning Schema</td><td>Sentence Encoder</td><td>Model size</td><td>Trainable Parameters</td><td>P</td><td>Test Set (%) R</td><td>F1</td><td>Contrast Set (%) P R F1</td></tr><tr><td/><td>RoBERTa-Large</td><td>355M</td><td>355M</td><td colspan=\"4\">99.19 92.88 95.93 96.61 69.37 80.75</td></tr><tr><td>Standard Fine-tuning</td><td>RoBERTa-Large-NLI</td><td>355M</td><td>355M</td><td colspan=\"4\">99.46 92.28 95.73 97.54 64.18 77.42</td></tr><tr><td/><td>DistilBERT-Base-NLI-STSB</td><td>66M</td><td>66M</td><td colspan=\"4\">98.92 92.78 95.75 95.36 66.67 78.44</td></tr><tr><td>REDE</td><td>DistilBERT-Base-NLI-STSB</td><td>66M</td><td>3K</td><td colspan=\"2\">97.76</td><td/><td/></tr></table>", |
|
"text": "94.65 96.18 86.98 94.17 90.43", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF6": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "Performance on the original test set and contrast set when all knowledge-seeking turns data are used for training. Trainable parameters refer to those parameters that are updated for learning knowledge-seeking turns.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF8": { |
|
"type_str": "table", |
|
"content": "<table><tr><td>: Averaged F1 score and standard deviation un-</td></tr><tr><td>der the low-resource setting by randomly sub-sampling</td></tr><tr><td>different number of knowledge-seeking turns for five</td></tr><tr><td>times. DistillBERT is DistillBERT-Base-NLI-STSB</td></tr><tr><td>while RoBERTa is RoBERTa-Large.</td></tr></table>", |
|
"text": "", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF10": { |
|
"type_str": "table", |
|
"content": "<table><tr><td colspan=\"4\">Dimensions Zero-shot Ten-shot Full-shot</td></tr><tr><td>Top 5</td><td>65.67</td><td>76.64</td><td>78.38</td></tr><tr><td>Top 50</td><td>71.04</td><td>82.23</td><td>92.40</td></tr><tr><td>Top 500</td><td>77.16</td><td>91.73</td><td>96.32</td></tr><tr><td>All (768)</td><td>77.05</td><td>92.37</td><td>96.09</td></tr></table>", |
|
"text": "Ablation study for MLM adaptation by removing it or replacing it with SimCSE (a contrastive learning method). All training samples are used here.", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF11": { |
|
"type_str": "table", |
|
"content": "<table><tr><td>: F1 score on test set for top 5, 50, 500, and</td></tr><tr><td>all principle components under three different settings:</td></tr><tr><td>zero-shot (PCA over non-knowledge-seeking turns),</td></tr><tr><td>ten-shot (PCA over ten knowledge-seeking turns), and</td></tr><tr><td>full-shot (PCA over all knowledge-seeking turns).</td></tr></table>", |
|
"text": "", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF13": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"text": "Comparison of performance (in percentage) by using different number of components for the GMM estimator.", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |