|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T10:33:18.073224Z" |
|
}, |
|
"title": "Using Large Pretrained Language Models for Answering User Queries from Product Specifications", |
|
"authors": [ |
|
{ |
|
"first": "Kalyani", |
|
"middle": [], |
|
"last": "Roy", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology", |
|
"location": { |
|
"settlement": "Kharagpur 2 Flipkart" |
|
} |
|
}, |
|
"email": "kroy@iitkgp.ac.in" |
|
}, |
|
{ |
|
"first": "Smit", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology", |
|
"location": { |
|
"settlement": "Kharagpur 2 Flipkart" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Nithish", |
|
"middle": [], |
|
"last": "Pai", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology", |
|
"location": { |
|
"settlement": "Kharagpur 2 Flipkart" |
|
} |
|
}, |
|
"email": "nithish.p@flipkart.com" |
|
}, |
|
{ |
|
"first": "Jaidam", |
|
"middle": [], |
|
"last": "Ramtej", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology", |
|
"location": { |
|
"settlement": "Kharagpur 2 Flipkart" |
|
} |
|
}, |
|
"email": "jaidam.ramtej@flipkart.com" |
|
}, |
|
{ |
|
"first": "Prajit", |
|
"middle": [ |
|
"Prashant" |
|
], |
|
"last": "Nadkarn", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology", |
|
"location": { |
|
"settlement": "Kharagpur 2 Flipkart" |
|
} |
|
}, |
|
"email": "prajit.pn@flipkart.com" |
|
}, |
|
{ |
|
"first": "Jyotirmoy", |
|
"middle": [], |
|
"last": "Banerjee", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology", |
|
"location": { |
|
"settlement": "Kharagpur 2 Flipkart" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Pawan", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology", |
|
"location": { |
|
"settlement": "Kharagpur 2 Flipkart" |
|
} |
|
}, |
|
"email": "pawang@cse.iitkgp.ac.in" |
|
}, |
|
{ |
|
"first": "Surender", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indian Institute of Technology", |
|
"location": { |
|
"settlement": "Kharagpur 2 Flipkart" |
|
} |
|
}, |
|
"email": "surender.k@flipkart.com" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "While buying a product from the e-commerce websites, customers generally have a plethora of questions. From the perspective of both the e-commerce service provider as well as the customers, there must be an effective question answering system to provide immediate answers to the user queries. While certain questions can only be answered after using the product, there are many questions which can be answered from the product specification itself. Our work takes a first step in this direction by finding out the relevant product specifications, that can help answering the user questions. We propose an approach to automatically create a training dataset for this problem. We utilize recently proposed XLNet and BERT architectures for this problem and find that they provide much better performance than the Siamese model, previously applied for this problem (Lai et al., 2018). Our model gives a good performance even when trained on one vertical and tested across different verticals.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "While buying a product from the e-commerce websites, customers generally have a plethora of questions. From the perspective of both the e-commerce service provider as well as the customers, there must be an effective question answering system to provide immediate answers to the user queries. While certain questions can only be answered after using the product, there are many questions which can be answered from the product specification itself. Our work takes a first step in this direction by finding out the relevant product specifications, that can help answering the user questions. We propose an approach to automatically create a training dataset for this problem. We utilize recently proposed XLNet and BERT architectures for this problem and find that they provide much better performance than the Siamese model, previously applied for this problem (Lai et al., 2018). Our model gives a good performance even when trained on one vertical and tested across different verticals.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Product specifications are the attributes of a product. These specifications help a user to easily identify and differentiate products and choose the one that matches certain specifications. There are more than 80 million products across 80+ product categories on Flipkart 1 . The 6 largest categories are -Mobile, AC, Backpack, Computer, Shoes, and Watches. A large fraction of user queries (\u223c 20%) 2 can be answered with the specifications. Product specifications would be helpful in providing instant responses to questions newly posed by users about * Work done while author was at IIT Kharagpur.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "1 Flipkart Pvt Ltd. is an e-commerce company based in Bangalore, India. 2 We randomly sampled 1500 questions from all these verticals except Mobile and manually annotated them as to whether these can be answered through product specifications. the corresponding product. Consider a question \"What is the fabric of this bag?\" This new question can be easily answered by retrieving the specification \"Material\" as the response. Fig. 1 depicts this scenario.", |
|
"cite_spans": [ |
|
{ |
|
"start": 72, |
|
"end": 73, |
|
"text": "2", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 426, |
|
"end": 432, |
|
"text": "Fig. 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Most of the recent works on product related queries on e-commerce leverage the product reviews to answer the questions (Gao et al., 2019; McAuley and Yang, 2016) . Although reviews are a rich source of data, they are also subject to personal experiences. People tend to give many reviews on some products and since it is based upon their personal experience, the opinion is also diverse. This creates a massive volume and range of opinions and thus makes review systems difficult to navigate. Sometimes products do not even have any reviews that can be used to find an answer, also the reviews do not mention the specifications a lot, but mainly deal with the experience. So, there are several reasons why product specifications might be a useful source of information to answer product-related queries which does not involve user experience to find an answer. As the specifications are readily available, users can get the response instantly. This paper attempts to retrieve the product specifications that would answer the user queries. While solving this problem, our key contributions are as follows -(i) We demonstrate the success of XL-Net on finding product specifications that can help answering product related queries. It beats the baseline Siamese method by 0.14 \u2212 0.31 points in HIT@1. (ii) We utilize a method to automatically create a large training dataset using a semisupervised approach, that was used to fine-tune XLNet and other models. (iii) While we trained on Mobile vertical, we tested on different verticals, namely, AC , Backpack , Computer , Shoes, Watches, which show promising results.", |
|
"cite_spans": [ |
|
{ |
|
"start": 119, |
|
"end": 137, |
|
"text": "(Gao et al., 2019;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 161, |
|
"text": "McAuley and Yang, 2016)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In recent years, e-commerce product question answering (PQA) has received a lot of attention. Yu et al. (2018) present a framework to answer product related questions by retrieving a ranked list of reviews and they employ the Positional Language Model (PLM) to create the training data. Chen et al. (2019) apply a multi-task attentive model to identify plausible answers. Lai et al. (2018) propose a Siamese deep learning model for answering questions regarding product specifications. The model returns a score for a question and specification pair. McAuley and Yang (2016) exploit product reviews for answer prediction via a Mixture of Expert (MoE) model. This MoE model makes use of a review relevance function and an answer prediction function. It assumes that a candidate answer set containing the correct answers is available for answer selection. Cui et al. (2017) develop a chatbot for e-commerce sites known as SuperAgent. SuperAgent considers question answer collections, reviews and specifications when answering questions. It selects the best answer from multiple data sources. Language representation models like BERT (Devlin et al., 2019) and XLNet (Yang et al., 2019) are pre-trained on vast amounts of text and then fine-tuned on task-specific labelled data. The resulting models have achieved state of the art in many natural language processing tasks including question answering. Dzendzik et al. (2019) employ BERT to answer binary questions by utilizing customer reviews.", |
|
"cite_spans": [ |
|
{ |
|
"start": 94, |
|
"end": 110, |
|
"text": "Yu et al. (2018)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 372, |
|
"end": 389, |
|
"text": "Lai et al. (2018)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 854, |
|
"end": 871, |
|
"text": "Cui et al. (2017)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 1131, |
|
"end": 1152, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 1163, |
|
"end": 1182, |
|
"text": "(Yang et al., 2019)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1399, |
|
"end": 1421, |
|
"text": "Dzendzik et al. (2019)", |
|
"ref_id": "BIBREF3" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "In this paper, unlike some of the previous works (Lai et al., 2018; Chen et al., 2019) on PQA that solely rely on human annotators to annotate the training instances, we propose a semi-supervised method to label training data. We leverage the product specifications to answer user queries by using BERT and XLNet.", |
|
"cite_spans": [ |
|
{ |
|
"start": 49, |
|
"end": 67, |
|
"text": "(Lai et al., 2018;", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 68, |
|
"end": 86, |
|
"text": "Chen et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Background and Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Here, we formalize the problem of answering user queries from product specifications. Given a question Q about a product P and the list of M specifications {s 1 , s 2 , ..., s M } of P , our objective is to identify the specification s i that can help answer Q.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Here, we assume that the question is answerable from specifications.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Statement", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Our goal is to train a classifier that takes a question and a specification as input (e.g., \"Color Code Black\") and predicts whether the specification is relevant to the question. We take Siamese architecture (Lai et al., 2018) as our baseline method. We fine-tune BERT and XLNet for this classification task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 209, |
|
"end": 227, |
|
"text": "(Lai et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Siamese: We train a 100-dimensional word2vec embedding on the whole corpus (all questions and specifications as shown in Table 1 .) to get the input word representation. In the Siamese model, the question and specification is passed through a Siamese Bi-LSTM layer. Then we use max-pooling on the contextual representations to get the feature vectors of the question and specification. We concatenate the absolute difference and hadamard product of these two feature vectors and feed it to two fully connected layers of dimension 50 and 25, subsequently. Finally, the softmax layer gives the relevance score.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 121, |
|
"end": 128, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "BERT and XLNet : The architecture we use for fine-tuning BERT and XLNet is the same. We begin with the pre-trained BERT Base and XLNet Base model. To adapt the models for our task, we introduce a fully-connected layer over the final hidden state corresponding to the [CLS] input token. During fine-tuning, we optimize the entire model end-to-end, with the additional softmax classifier parameters W \u2208 R K\u00d7H , where H is the dimen- 5 Experimental Setup", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Model Architecture", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The Statistics for the 6 largest categories used in this paper are shown in Table 1 , containing a snapshot of product details up to January 2019. Except for mobiles, for other domains, 300 products were sampled. As the number of question-specification pairs is huge, manually labelling a sufficiently large dataset is a tedious task. So, we propose a semisupervised method to create a large training dataset using Dual Embedding Space model (DESM) (Mitra et al., 2016) . Suppose a product P has S specifications and Q questions. For a question q i \u2208 Q and a specification s j \u2208 S, we find dual embedding score DU AL(q i , s j ) using Equation 1, where t q and t s denote the vectors for the question and specification terms, respectively. We consider (q i , s j ) pair positive if DU AL(q i , s j ) \u2265 \u03b8 and negative if", |
|
"cite_spans": [ |
|
{ |
|
"start": 449, |
|
"end": 469, |
|
"text": "(Mitra et al., 2016)", |
|
"ref_id": "BIBREF8" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 76, |
|
"end": 83, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset Creation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "DU AL(q i , s j ) < \u03b8. DU AL(q i , s j ) = 1 |q i | tq\u2208q i t q T s j t q s j", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Dataset Creation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Creation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "s j = 1 |s j | ts\u2208s j t s t s", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "Dataset Creation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We take M obile dataset to create labelled training data since most of the questions come from this vertical. We choose the threshold value (\u03b8) which gives the best accuracy on manually labelled balanced validation dataset consisting of 380 question and specification pairs. We train a word2vec (Mikolov et al., 2013 ) model on our training dataset to get the embeddings of the words. The word2vec model learns two weight matrices during training. The matrix corresponding to the input space and the output space is denoted as IN and OUT word embedding space respectively. Word2vec leverages only the input embeddings (IN), but discards the output embeddings (OUT), whereas DESM utilizes both IN and OUT embeddings. To compute the DUAL score of a question and specification, we take OUT-OUT vectors as it gives the best validation accuracy. We find that for \u03b8 = 0.34, we gain maximum accuracy value of 0.72 on the validation set. This creates a labelled training dataset D with 57, 138 positive pairs and 655, 290 negative pairs. For training, we take all the positive data from D and we randomly sample an equal number of negative examples from D.", |
|
"cite_spans": [ |
|
{ |
|
"start": 295, |
|
"end": 316, |
|
"text": "(Mikolov et al., 2013", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Creation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "To create the test datasets, domain experts manually annotate the correct specification for a question. As the test datasets come from different verticals, there is no product in common with the training set. The details of different test datasets are shown in Table 2 . We analyze the questions in the test datasets and find that the questions can be roughly categorized into three classes -numerical, yes/no and others based upon the answer type of the questions. For a question, we have a number of specifications and only one of them is correct.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 261, |
|
"end": 268, |
|
"text": "Table 2", |
|
"ref_id": "TABREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset Creation", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "We split the Mobile dataset into 80% and 20% as training set and development set, respectively. The Siamese model is trained for 20 epoch with Stochastic Gradient Descent optimizer and learn- ing rate 0.01. The fine-tuning of BERT and XL-Net is done with the same experimental settings as given in the original papers. In all the models, we minimize the cross-entropy loss while training. BERT-380 and XLNet-380 models are fine-tuned on the 380 labeled validation dataset that was used for creating the training dataset in Section 5.1. During evaluation, we sort the question specification pairs according to their relevance score. From this ranked list, we compute whether the correct specification appears within top k, k \u2208 {1, 2, 3} positions. The ratio of correctly identified specifications in top 1, 2, and 3 positions to the total number of questions is denoted as HIT@1, HIT@2 and HIT@3 respectively. Table 3 shows the performance of the models on different datasets 3 . BERT-380 and XLNet-380 perform very poorly, but when we use the train dataset created with DESM, there is a large boost in the models' performance and it shows the effectiveness of our semi-supervised method in generating labeled dataset. Both BERT and XLNet outperform the baseline Siamese model (Lai et al., 2018) by a large margin, and retrieve the correct specification within top 3 results for most of the queries. For Backpack and AC, both BERT and XLNet are very competitive. XLNet outperforms BERT in Computer, Shoes, and Watches. Only in HIT@1 of AC, BERT has surpassed XLNet with 0.07 points. We see that all the models have performed better in Computer compared to the other datasets. Computer has the highest percentage of yes/no questions and this might be one of the reasons, as some questions might have word overlap with correct specification. Table 4 shows the top three specifications returned by different models for some questions. We see that Siamese architecture returns results which look similar to na\u00efve word match, and retrieve wrong specifications. On the other hand, BERT and XLNet are able to retrieve the correct specifications.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1276, |
|
"end": 1294, |
|
"text": "(Lai et al., 2018)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 909, |
|
"end": 916, |
|
"text": "Table 3", |
|
"ref_id": "TABREF5" |
|
}, |
|
{ |
|
"start": 1839, |
|
"end": 1846, |
|
"text": "Table 4", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Training and Evaluation", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Error Analysis: We assume that for each question, there is only one correct specification, but the correct answer may span multiple specifications and our models can not provide a full answer. For example, in Backpack dataset, the dimension of the backpack, i.e., its height, weight, depth is defined separately. So, when user queries about the dimension, only one specification is provided. Some specifications are given in one unit, but users want the answer in another unit, e.g., \"what is the width of this bag in cms?\". Since the specification is given in inches, the models show the answer in inches. So, the answer is related, but not exactly correct. Users sometimes want to know the difference between certain specification types, what is meant by some specifications. For example, consider the questions \"what is the difference between inverter and non-inverter AC?\", \"what is meant by water resistant depth?\". While we can find the type of inverter, the water resistant depth of a watch etc. from specifications, the definition of the specification is not given. As we have generated train data labels in semi-supervised fashion, it also contributes to inaccurate classification in some cases.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In this paper, we proposed a method to label training data with little supervision. We demonstrated that large pretrained language models such as BERT and XLNet can be fine-tuned successfully to obtain product specifications that can help answer user queries. We also achieve reasonably good results even while testing on different verticals.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We would like to extend our method to take into account multiple specifications as an answer. We also plan to develop a classifier to identify which questions can not be answered from the specifications.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Unsupervised DUAL embedding model gave very similar results to Siamese model, and is not reported.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Answer identification from product reviews for user questions by multi-task attentive networks", |
|
"authors": [ |
|
{ |
|
"first": "Long", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ziyu", |
|
"middle": [], |
|
"last": "Guan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wanqing", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaopeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhou", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huan", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "45--52", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1609/aaai.v33i01.330145" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Long Chen, Ziyu Guan, Wei Zhao, Wanqing Zhao, Xi- aopeng Wang, Zhou Zhao, and Huan Sun. 2019. An- swer identification from product reviews for user questions by multi-task attentive networks. In Pro- ceedings of the AAAI Conference on Artificial Intel- ligence, volume 33, pages 45-52.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "SuperAgent: A customer service chatbot for e-commerce websites", |
|
"authors": [ |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaohan", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Furu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chuanqi", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chaoqun", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "97--102", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lei Cui, Shaohan Huang, Furu Wei, Chuanqi Tan, Chaoqun Duan, and Ming Zhou. 2017. SuperA- gent: A customer service chatbot for e-commerce websites. pages 97-102.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Is it dish washer safe? automatically answering \"yes/no\" questions using customer reviews", |
|
"authors": [ |
|
{ |
|
"first": "Daria", |
|
"middle": [], |
|
"last": "Dzendzik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carl", |
|
"middle": [], |
|
"last": "Vogel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jennifer", |
|
"middle": [], |
|
"last": "Foster", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Student Research Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-3001" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daria Dzendzik, Carl Vogel, and Jennifer Foster. 2019. Is it dish washer safe? automatically answering \"yes/no\" questions using customer reviews. In Pro- ceedings of the 2019 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Student Research Workshop, pages 1-6, Minneapolis, Minnesota. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Productaware answer generation in e-commerce questionanswering", |
|
"authors": [ |
|
{ |
|
"first": "Shen", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhaochun", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yihong", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dongyan", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dawei", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Twelfth ACM International Conference on Web Search and Data Mining, WSDM '19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "429--437", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3289600.3290992" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shen Gao, Zhaochun Ren, Yihong Zhao, Dongyan Zhao, Dawei Yin, and Rui Yan. 2019. Product- aware answer generation in e-commerce question- answering. In Proceedings of the Twelfth ACM Inter- national Conference on Web Search and Data Min- ing, WSDM '19, page 429-437, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A simple end-to-end question answering model for product information", |
|
"authors": [ |
|
{ |
|
"first": "Tuan", |
|
"middle": [], |
|
"last": "Lai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trung", |
|
"middle": [], |
|
"last": "Bui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sheng", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nedim", |
|
"middle": [], |
|
"last": "Lipka", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the First Workshop on Economics and Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "38--43", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-3105" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tuan Lai, Trung Bui, Sheng Li, and Nedim Lipka. 2018. A simple end-to-end question answering model for product information. In Proceedings of the First Workshop on Economics and Natural Language Pro- cessing, pages 38-43, Melbourne, Australia. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Republic and Canton of Geneva, CHE. International World Wide Web Conferences Steering Committee", |
|
"authors": [ |
|
{ |
|
"first": "Julian", |
|
"middle": [], |
|
"last": "Mcauley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 25th International Conference on World Wide Web, WWW '16", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "625--635", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/2872427.2883044" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Julian McAuley and Alex Yang. 2016. Addressing complex and subjective product-related queries with customer reviews. In Proceedings of the 25th In- ternational Conference on World Wide Web, WWW '16, page 625-635, Republic and Canton of Geneva, CHE. International World Wide Web Conferences Steering Committee.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Distributed representations of words and phrases and their compositionality", |
|
"authors": [ |
|
{ |
|
"first": "Tomas", |
|
"middle": [], |
|
"last": "Mikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Corrado", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Dean", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Proceedings of the 26th International Conference on Neural Information Processing Systems", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "3111--3119", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Cor- rado, and Jeffrey Dean. 2013. Distributed represen- tations of words and phrases and their composition- ality. In Proceedings of the 26th International Con- ference on Neural Information Processing Systems -Volume 2, NIPS'13, page 3111-3119, Red Hook, NY, USA. Curran Associates Inc.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "A dual embedding space model for document ranking", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Bhaskar Mitra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Nalisnick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rich", |
|
"middle": [], |
|
"last": "Craswell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Caruana", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1602.01137" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Bhaskar Mitra, Eric Nalisnick, Nick Craswell, and Rich Caruana. 2016. A dual embedding space model for document ranking. arXiv preprint arXiv:1602.01137.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Zhilin", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihang", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yiming", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaime", |
|
"middle": [], |
|
"last": "Carbonell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Russ", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc V", |
|
"middle": [], |
|
"last": "Salakhutdinov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5754--5764", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural in- formation processing systems, pages 5754-5764.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Responding e-commerce product questions via exploiting QA collections and reviews", |
|
"authors": [ |
|
{ |
|
"first": "Qian", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wai", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihao", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2192--2203", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qian Yu, Wai Lam, and Zihao Wang. 2018. Re- sponding e-commerce product questions via exploit- ing QA collections and reviews. In Proceedings of the 27th International Conference on Computational Linguistics, pages 2192-2203, Santa Fe, New Mex- ico, USA. Association for Computational Linguis- tics.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Riker: Mining rich keyword representations for interpretable product question answering", |
|
"authors": [ |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ziyu", |
|
"middle": [], |
|
"last": "Guan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huan", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery Data Mining, KDD '19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1389--1398", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3292500.3330985" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jie Zhao, Ziyu Guan, and Huan Sun. 2019. Riker: Min- ing rich keyword representations for interpretable product question answering. In Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery Data Mining, KDD '19, page 1389-1398, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"text": "Snapshot of a product with its specifications.", |
|
"num": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"text": "Statistics of 6 largest categories.", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"text": "Test datasets statistics. sion of the hidden state vectors and K is the number of classes.", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"text": "Performance comparison of different models.", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"text": "Top three specifications returned by different models for two questions. Correct specification is highlighted in bold.", |
|
"html": null, |
|
"content": "<table/>", |
|
"num": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |