|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T10:33:42.043884Z" |
|
}, |
|
"title": "Interactive Latent Knowledge Selection for E-commerce Product Copywriting Generation", |
|
"authors": [ |
|
{ |
|
"first": "Zeming", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Peking University", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yanyan", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Yuejian", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Peking University", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "fangyj@ss.pku.edu.cn" |
|
}, |
|
{ |
|
"first": "Hongshen", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Mian", |
|
"middle": [], |
|
"last": "Ma", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Zhuoye", |
|
"middle": [], |
|
"last": "Ding", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"O" |
|
], |
|
"last": "Long", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "bo.long@jd.com" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "As the multi-modal e-commerce is thriving, high-quality advertising product copywriting has gain more attentions, which plays a crucial role in the e-commerce recommender, advertising and even search platforms. The advertising product copywriting is able to enhance the user experience by highlighting the product's characteristics with textual descriptions and thus to improve the likelihood of user click and purchase. Automatically generating product copywriting has attracted noticeable interests from both academic and industrial communities, where existing solutions merely make use of a product's title and attribute information to generate its corresponding description. However, in addition to the product title and attributes, we observe that there are various auxiliary descriptions created by the shoppers or marketers in the ecommerce platforms (namely human knowledge), which contains valuable information for product copywriting generation, yet always accompanying lots of noises. In this work, we propose a novel solution to automatically generating product copywriting that involves all the title, attributes and denoised auxiliary knowledge. To be specific, we design an end-to-end generation framework equipped with two variational autoencoders that works interactively to select informative human knowledge and generate diverse copywriting. Experiments on real-world e-commerce product copywriting datasets demonstrate that our proposed method outperforms various baselines with regard to both automatic and human evaluation metrics.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "As the multi-modal e-commerce is thriving, high-quality advertising product copywriting has gain more attentions, which plays a crucial role in the e-commerce recommender, advertising and even search platforms. The advertising product copywriting is able to enhance the user experience by highlighting the product's characteristics with textual descriptions and thus to improve the likelihood of user click and purchase. Automatically generating product copywriting has attracted noticeable interests from both academic and industrial communities, where existing solutions merely make use of a product's title and attribute information to generate its corresponding description. However, in addition to the product title and attributes, we observe that there are various auxiliary descriptions created by the shoppers or marketers in the ecommerce platforms (namely human knowledge), which contains valuable information for product copywriting generation, yet always accompanying lots of noises. In this work, we propose a novel solution to automatically generating product copywriting that involves all the title, attributes and denoised auxiliary knowledge. To be specific, we design an end-to-end generation framework equipped with two variational autoencoders that works interactively to select informative human knowledge and generate diverse copywriting. Experiments on real-world e-commerce product copywriting datasets demonstrate that our proposed method outperforms various baselines with regard to both automatic and human evaluation metrics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Traditional e-commerce platforms solely present a list of products to customers. Nowadays, as the multi-modal recommender systems are thriving, the e-commerce platform ecosystem has also been enriched with multi-modal forms, such as product advertising copywriting and product living videos. Especially, the product advertising copywriting plays an important role in the e-commerce recommender, advertising and search platforms, which is able to improve the customers' shopping experience. Instead of only showing the product title, a well-written product description can interest the customer hugely and save their time from clicking every product and reading the long-and-complex product details. Hence, this work focuses on the problem of automatic product copywriting generation, which aims to generate a textual description for a product, highlighting the attractive properties of the product. Such a task is always framed as a sequence-to-sequence problem (Chen et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 962, |
|
"end": 981, |
|
"text": "(Chen et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The product title and a list of product attributes are always taken as the main model input to generate the product copywriting, exemplified by Figure 1. Recently, Chen et al. (2019) proposed to involve the external knowledge (i.e., Wikipedia) into the product title and attributes during the generation process of product copywriting. However, the external knowledge and customer reviews are not always available. For example, thousands of newly released products are emerging in the e-commerce platforms, where the external or the customer reviews are not accessible while automatically generating advertising copywriting is critical for such new products to improve the click-through rate and the conversion rate.", |
|
"cite_spans": [ |
|
{ |
|
"start": 144, |
|
"end": 182, |
|
"text": "Figure 1. Recently, Chen et al. (2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In practice, we observe that each product (e.g., the hot and newly released items) is accompanied with various product details in the e-commerce platforms (e.g., Taobao, JD, and Amazon). The product title and associated attributes summarize the main information of a product, while the product details comprise of auxiliary advertising descriptions created by shoppers and marketers which contain salient information that highlight the product properties with advertising phrases (i.e., human knowledge) and thus are beneficial to improving the quality of the generated copywriting. Exemplified by Figure 1 , the product title and attributes summarize the main functions of \"Xiaomi box SE\", while the product details elaborate the product with slogans that are attractive to customers, like \"voice control\" and \"switch channels and adjust the volume by voice\". Unfortunately, we also observe that such human knowledge also contains redundant pieces, like \"I want to watch action video\", which might harm the quality of the generated copywriting. One recent work (Zhang et al., 2021) simply concatenated all the product details with product title and attributes as the model input, without considering the noises contained by the product details. In this work, we propose to select the salient knowledge from the auxiliary product details before we feed such information, associated with product title and attributes, into the model.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1062, |
|
"end": 1082, |
|
"text": "(Zhang et al., 2021)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 598, |
|
"end": 606, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Variables model based on Transformer architecture (Vaswani et al., 2017 ) (i.e., ILVT), which is designed to select knowledge from noisy product details and incorporate the selected knowledge into the process of product copywriting generation. To enhance the connection between the process of the knowledge selection and copywriting generation, we sample latent variables from prior description and knowledge latent space separately. During generation phase, ILVT will firstly sample the description latent variable from the description distribution conditioned on the product title and attributes, then sample the knowledge latent variable from knowledge distribution conditioned on product details and the description latent variable. With the interactive latent variables, ILVT can also generate copywritings with strong diversity. Without latent variable modules, ILVT degenerates into transformer with copy mechanism, which is a traditional method for copywriting generation in e-commerce platform (Zhang et al., 2021) . To the best of our knowledge, this is the first work that selects knowledge from product details to improve the generation quality and diversity in product copywriting generation task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 71, |
|
"text": "(Vaswani et al., 2017", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1003, |
|
"end": 1023, |
|
"text": "(Zhang et al., 2021)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "To be specific, we propose an Interactive Latent", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "To evaluate our proposed method, we collected a Chinese product copywriting dataset from the JD platform, named JDK. The dataset consists of 220,000 instances, each of which comprises of a product's title, attributes, details as well as the corresponding description. Results on such dataset shows that our proposed method obtains the best performance compared to all baselines, in terms of both automatic and human evaluations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "To be specific, we propose an Interactive Latent", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We collected a Chinese product copywriting generation dataset, named JDK, from the e-commerce platform, JD 1 , one of the biggest Chinese ecommerce platforms. The dataset consists of 220K products, covering 30 categories, such as digits and clothing. Each product instance is associated with a product title, a set of attributes, product details created by advertising experts, as well as the product copywriting published by professional writers. We randomly split the whole datasets into three parts, 200K for training, 10K each for test and validation. The product title and attributes summarize the main characteristics of the product. On average, the number of Chinese tokens in product title is 44.93, and the size of the attribute set is 7.75. However, the average number of tokens before pre-processing in the product details is 838.39, which is much larger than the average length of the copywriting, i.e., 81.16. The average length of the human knowledge now is 111.38 tokens. Table 1 lists the detailed statistics about this dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 987, |
|
"end": 994, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Dataset Construction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "We observed that the product details, created by advertising experts, contain ample and heterogeneous information, such as the advertising slogans in textual form, the product size in numbers and specification with particular usage examples. Simply feeding all product details might harm the generation performance. Thus, a heuristic method is introduced to filter out the apparently noisy pieces in collected product details. We split the whole detail paragraph K total into fragments KF following the heuristic rule \u03b3 (i.e., the stop symbols) and keep the fragments whose length is between 10 and 64 tokens to remove some useless pieces, such as instructions for usage.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Construction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "K total \u03b3 \u2192 KF = {K f rag 1 , K f rag 2 , ..., K f ragm }", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Construction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where m is the number of fragments that varies for different products. We adopted the Sentence-Bert (Reimers and Gurevych, 2019) to obtain the contextual representation for each fragment K f rag i \u2208 KF , denoted as E f rag i . We feed the contextual representations of KF into the K-Means clustering algorithm (MacQueen et al., 1967) , where fragments with similar semantics are clustered into the same group:", |
|
"cite_spans": [ |
|
{ |
|
"start": 100, |
|
"end": 128, |
|
"text": "(Reimers and Gurevych, 2019)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 310, |
|
"end": 333, |
|
"text": "(MacQueen et al., 1967)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Construction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "{E f rag 1 , \u2022 \u2022 \u2022 , E f ragm } \u03ba \u2192 KP = K 1 , \u2022 \u2022 \u2022 , K |K|", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Construction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where each K i \u2208 KP is a group of fragments with similar semantics and we concatenated all fragments in the same group in an alphabetical order to obtain a single sequence, i.e. a text containing human knowledge. For simplicity, we manually set |K| = 6 for the number of clusters.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Construction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Likewise, we applied Sentence-Bert to obtain the contextual representation of each knowledge text K i \u2208 KP and the corresponding product description D, denoted as R K i and R D , respectively. We calculate the cosine similarity between R K i and R D . The cluster with the highest similarity score will be considered as a pseudo knowledge K pse .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Construction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "K pse = max K i \u2208KP cos < R K i , R D >", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Construction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "where cos < \u2022 > means the function of cosine similarity. Finally, for each copywriting instance, we have a set of knowledge in the size of 6, one of which is labeled as pseudo knowledge.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Dataset Construction", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "With a product title, attribute sets and its corresponding commodity details, the objective of our method is to utilize the intrinsic information firstly, and then select an appropriate knowledge from details. Finally, diverse and accurate product description will be generated. Given a product, the e-commerce platforms often describes such a product from multiple aspects, including the product title T , a set of attributes A, and the product details KP . The product title T describes the product in a short text, represented as a sequence of words T = t 1 , t 2 , ..., t |t| . The attribute set A consists of |A| attributes A = a 1 , a 2 ..., a |A| that captures the product properties from different aspects. The product details KP = K 1 , K 2 , ..., K |KP | are essentially a human knowledge pool, composed of advertising description created by advertising experts. Each advertising description K i \u2208 KP is a sequence of words", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "K i = k 1 i , k 2 i , ..., k |K i | i . Figure 1 demonstrates an example.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In this work, we aim to select the most salient knowledge from the product details KP , and then incorporate such knowledge with product title T and attributes A to generate diverse and highquality product copywriting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Problem Formulation", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In order to better guide the knowledge selection process and enhance the relationship between the target copywriting and corresponding selected knowledge, we utilize an interactive variational autoencoder framework (Kingma and Welling, 2014) to inject the description latent variable to the knowledge latent distribution, followed by selecting the salient knowledge and generating the description sequentially as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "p \u03b8 (K, D|A, T ) = z d z k p \u03b8 (D|z d , K, A, T ) \u2022p \u03b8 (K|z d , A, T, KP ) \u2022p \u03c6 (z k |z d , KP ) \u2022 p \u03c6 (z d |A, T )dz d", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where z k and z d are latent variables for knowledge and product copywriting, respectively. p \u03c6 (z d |A, T ) and p \u03c6 (z k |z d , A, T ) are their conditional priors. Since the knowledge selection is a discriminative task with limited choices, z k is suitable for a categorical distribution (Jang et al., 2017) , while the z d follows an isotropic Gaussian distribution (Kingma and Welling, 2014) . From the perspective of the product description writing process, we assume that the product copywriting contains pivot information that points out what kind of information from the product knowledge pool KP (i.e., product details) is useful for copywriting generation. Thus, the latent variable z d sampled from p(z d |A, T ) is based on the intrinsic product information (i.e., product title and attributes) and", |
|
"cite_spans": [ |
|
{ |
|
"start": 290, |
|
"end": 309, |
|
"text": "(Jang et al., 2017)", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 369, |
|
"end": 395, |
|
"text": "(Kingma and Welling, 2014)", |
|
"ref_id": "BIBREF9" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "z k \u223c p(z k |z d , KP ) is dependent on z d .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "During the training phase, a variational posterior q \u03d5 (\u2022) is used to maximize the Evidence Lower Bound (ELBO) as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "L ILV T = \u2212 D KL [q \u03d5 (z d |D, A, T, K)||p \u03c6 (z d |A, T )] \u2212 D KL [q \u03d5 (z k |z d , K, KP )||p \u03c6 (z k |z d , KP )] + E z k \u223cq \u03d5 (z k |z d ,K,KP ) [log p \u03b8 (K|z k , A, T, KP )] + E z d \u223cq \u03d5 (z d |D,A,T ) [log p \u03b8 (D|z d , A, T, K)]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "where \u03b8, \u03d5 and \u03c6 are the parameters of the generation, posterior and prior modules. The generative process can be described as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Step 1: sample the description latent variable", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "z d \u223c p \u03c6 (z d |A, T ). \u2022", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Step 2: sample the knowledge latent variable", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "z k \u223c p \u03c6 (z k |z d , KP ). \u2022", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "Step 3: select the most salient knowledge K \u223c p \u03b8 (K|z k , A, T, KP ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Framework", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "\u2022 Step 4: generate the product copywriting D \u223c p \u03b8 (D|z d , A, T, K). The description latent variable z d contributes to the knowledge latent variable z k explicitly, and z k influences z d via common target knowledge K and back propagation implicitly. We show the graphical model of the single-track interaction in Figure 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 316, |
|
"end": 324, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Framework", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "We adopt the Transformer (Vaswani et al., 2017) encoder as the encoding layer.", |
|
"cite_spans": [ |
|
{ |
|
"start": 25, |
|
"end": 47, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Basic Product Representation For simplicity, we concatenate the product title T and its associated attributes A = a 1 , a 2 , ..., a |A| (ordered in alphabet) into a single sequence to get basic product information as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "P = [T ; a 1 ; a 2 ; ...; a |A| ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "where \";\" stands for sequence concatenation. The basic product embedding in the first layer E (0) is the sum of the word embeddings W E(\u2022) and the positional encoding P E(\u2022):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "E P = W E(P ) + P E(P )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The initial product embedding will go through multi-layers and the output of i-th layer is:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "E (i) P = F F N (M HA(E (i\u22121) P , ..., E (i\u22121) P ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "where", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "M HA(\u2022, \u2022, \u2022) means multi-head self- attention function and F F N (\u2022)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "is the position-wise fully connected feed-forward network. The final representation of product basic information (i.e., product title and attributes) defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "H P = avgpool(E (N ) P ) where E (N ) P", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "is the the final representation from the N -th encoder layer, and avgpool is the average pooling operation (Cer et al., 2018) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 107, |
|
"end": 125, |
|
"text": "(Cer et al., 2018)", |
|
"ref_id": "BIBREF1" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Product Description Representation Following the same procedures, we can obtain the initial and final representations of the product description (i.e., copywriting), denoted as E D and H D .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "Product Knowledge Representation The product knowledge pool (i.e., the product details) is a list of advertising descriptions created by advertising experts, KP = K 1 , ..., K |KP | . To obtain the representation of the knowledge pool, for each advertising descriptions K j \u2208 KP , we consider all the word embedding, positional embedding description segment embedding:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "E K j = W E(K j ) + P E(K j ) + SE(j)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "where j stands for the description position in the knowledge pool, and SE is the segment embedding which can be learned during the training phase. The representation of i-th encoder is calculated as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "E (i) K j = F F N (M HA(E (i\u22121) K j , \u2022 \u2022 \u2022 , E (i) K j ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The final representation of the whole knowledge pool is then defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "H KP = [avgpool(E (N ) K 0 ), \u2022 \u2022 \u2022 , avgpool(E (N ) K |KP | )]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "It is worthy noting that there are no available annotations of the most salient knowledge, however, which is necessary during training phrase. Thus, we designed a simple algorithm to construct the pseudo label for the knowledge selection for each product. According to the pseudo annotations, we denote the selected knowledge as K pse , whose hidden representation is denoted as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "H K = avgpool(E (N ) K ).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Encoding Layer", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "To build the relationship between the knowledge selection and copywriting generation, we design a pair of interactive latent variables, i.e., the description latent variable and the knowledge latent variable, to influence each other.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Interactive Latent Variable Layer", |
|
"sec_num": "2.5" |
|
}, |
|
{ |
|
"text": "To make the generated copywriting more diverse and guide the selection phase, we learn a Gaussian distribution with the intrinsic product information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Description Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For the posterior, inspired by Kim et al. (2018) we calculate hidden representations H Datten K and H Katten D for enhancing the relation between description and pseudo knowledge, as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Description Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "H Datten K = avgpool(Sof tmax(Q D K K )) Q D = W Q E D K K = W K E K where W Q , W K is parameters. H Katten D is calcu- lated similarly.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Description Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We concatenate the hidden representations H Datten K , H Katten D with H P , H D , H K as H des and feed into a MLP layer to calculate parameters \u00b5 and \u03c3 of the posterior distribution:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Description Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u00b5 = M LP (H des ) \u03c3 = Sof tplus(M LP (H des )) H des = [H D , H P , H K , H Datten K , H Katten D ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Description Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "so the posterior Gaussian distribution can be then described as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Description Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "q \u03d5 (z d |D, A, T, K) = N \u03d5 (z d |\u00b5, \u03c3I)", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Description Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "For the prior distribution, we only utilize the basic product representation H P and calculate parameters \u00b5 \u2032 and \u03c3 \u2032 similar to the posterior processing:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Description Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "p \u03c6 (z d |A, T ) = N \u03c6 (z d |\u00b5 \u2032 , \u03c3 \u2032 I) \u00b5 \u2032 = M LP (H P ) \u03c3 \u2032 = Sof tplus(M LP (H P ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Description Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the training phase, we use the reparameterization trick(Kingma and Welling, 2014) since the stochastic sampling from the latent distribution is non-differential. In order to approximate the distributions of the posterior and prior representation, we introduce the KL divergence loss (Kullback and Leibler, 1951) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 286, |
|
"end": 314, |
|
"text": "(Kullback and Leibler, 1951)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Description Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In order to strength the relationship between the selected knowledge and corresponding description, we inject the description latent variable z d to the knowledge latent space and thus get the interactive VAEs model. Since knowledge selection is a discriminate task, we utilize the Categorical distribution (Jang et al., 2017) for knowledge latent space.", |
|
"cite_spans": [ |
|
{ |
|
"start": 307, |
|
"end": 326, |
|
"text": "(Jang et al., 2017)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We then calculate the hidden representation H KP attenz d via attention method:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "H KP attenz d = Sof tmax((W d z d )H T KP )H KP", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In the training phase, we feed H KP attenz d and z d with the total and pseudo knowledge representations H KP , H K together into a MLP layer to compute the parameters \u03c0 for posterior categorical distribution. By removing the H K , we obtain the parameters \u03c0 \u2032 for prior distribution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u03c0 = M LP [z d , H K , H KP , H KP attenz d ] \u03c0 \u2032 = M LP [z d , H KP , H KP attenz d ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The posterior and prior distributions can be then described as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "q \u03d5 (z k |z d , K, KP ) = Cat \u03d5 (\u03c0) p \u03c6 (z k |z d , KP )) = Cat \u03c6 (\u03c0 \u2032 )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We also introduce the KL divergence loss and reparametrization trick. We use gumbelsoftmax (Jang et al., 2017; Maddison et al., 2017) as the categorical distribution is discrete. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 110, |
|
"text": "(Jang et al., 2017;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 111, |
|
"end": 133, |
|
"text": "Maddison et al., 2017)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Latent Variable", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Motivated by Mou et al. (2016) , we adopt the heuristic matching algorithm to select the target salient knowledge from all the product details. After getting the knowledge latent variable z k sampling from the posterior distribution and prior distribution in training and generation stage, respectively, we compute the hidden representation for knowledge selection as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 30, |
|
"text": "Mou et al. (2016)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Selection", |
|
"sec_num": "2.6" |
|
}, |
|
{ |
|
"text": "H sel = [H P , z k , |H P \u2212 z k |, H P \u2299 z k ]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Selection", |
|
"sec_num": "2.6" |
|
}, |
|
{ |
|
"text": "where \u2299 stands for the element-wise multiplication. The selected knowledge is denoted as KS \u2208 KP , whose representation H KS can be obtained through the encoder layers where only the word and positional embeddings are taken as input, similar to the product title and attributes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Selection", |
|
"sec_num": "2.6" |
|
}, |
|
{ |
|
"text": "The selection embedding H sel will be fed into a MLP layer to predict the index ID KS of the corresponding target knowledge KS.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Knowledge Selection", |
|
"sec_num": "2.6" |
|
}, |
|
{ |
|
"text": "We inject the basic product information (i.e., title and attributes) E KS into a stacked transformer decoder module with the copy mechanism to generate the product copywriting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoder Layer", |
|
"sec_num": "2.7" |
|
}, |
|
{ |
|
"text": "We also try different ways to combining the VAE modules with transformer decoder. Similar to Fang et al. (2021) ; Li et al. (2020a), we empirically observed that the best choice is to element-wisely add the latent variable z d with the word and positional embeddings of each word, before fed into the decoder, to generate the copywriting. The copy mechanism is used to copy words in the selected knowledge and the input product information (i.e., title and attributes). The probability of generating token d t at t-th step is computed as:", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 111, |
|
"text": "(2021)", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoder Layer", |
|
"sec_num": "2.7" |
|
}, |
|
{ |
|
"text": "P (d t ) =\u03bb 1 P cp (d t |KS, P ) + \u03bb 2 P voc (d t |z d , KS, P )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoder Layer", |
|
"sec_num": "2.7" |
|
}, |
|
{ |
|
"text": "where \u03bb 1 and \u03bb 2 are the coordination probability. P voc is the output from the stacked transformer decoder layers and P cp represents the copy logits, defined as:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoder Layer", |
|
"sec_num": "2.7" |
|
}, |
|
{ |
|
"text": "P cp (d t | * ) = i:t i =Dt \u03b1 t,i", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoder Layer", |
|
"sec_num": "2.7" |
|
}, |
|
{ |
|
"text": "where * stands for either P or KS.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Decoder Layer", |
|
"sec_num": "2.7" |
|
}, |
|
{ |
|
"text": "We conducted experiments on the JDK dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "We compare our model with several baselines:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline Models", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "\u2022 CONVSEQ2SEQ (Gehring et al., 2017 ) is a sequence-to-sequence model with convolutional neural networks for text generation. \u2022 TRANSFORMER (Vaswani et al., 2017) is an encoder-decoder architecture relying on selfattention mechanism. \u2022 KOBE (Chen et al., 2019) incorporates knowledge extracted from exogenous database into the copywriting generation model. \u2022 PTRANS (Vaswani et al., 2017; See et al., 2017 ) is a transformer-based generation model with copy mechanism, which is the backbone architecture of this ILVT. In other words, ILVT without latent variable modules is degenerated into PTRANS. we also consider the following three variants that treat the product details with different strategies:", |
|
"cite_spans": [ |
|
{ |
|
"start": 14, |
|
"end": 35, |
|
"text": "(Gehring et al., 2017", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 140, |
|
"end": 162, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 241, |
|
"end": 260, |
|
"text": "(Chen et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 366, |
|
"end": 388, |
|
"text": "(Vaswani et al., 2017;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 389, |
|
"end": 405, |
|
"text": "See et al., 2017", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline Models", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "+ALL: takes all knowledge KP as input. +RAND: randomly picks K i \u2208 KP as input.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline Models", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "+PSE: takes as input the pseudo labelled knowledge K pse as input.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline Models", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "All variants also take as input the product title and attributes. Specifically, in this case, KOBE also considers the pseudo labelled knowledge as the one from the external database.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Baseline Models", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We implement our models on the Tesla V100 GPUs. For the transformer-based model, the hidden units is 512 and feed-forward hidden size is 2048. Both the encoder and decoder has 6 layers with 12 heads. The beam size is 5. The sentence length of title, attributes, description and knowledge are 128, 64, 128 and 512 tokens, respectively. The dropout rate is 0.1. We choose the Adam optimizer with \u03b2 1 = 0.9 and \u03b2 2 = 0.998. The warm-up step is set to 4000 and learning rate is 0.0001. The batch size is 32. To avoid the KL-vanishing problem, we choose KL-annealing trick (Bowman et al., 2016) with the \u03b1=0.00025 and \u03b2 = 6.25 for both two VAEs. Hyperparameters are set based on the performance of the validation set.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implementation Details", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For the automatic evaluations, we consider both the quality and diversity of output text generated by different systems:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatic Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "BLEU (Papineni et al., 2002) : To verify the effectiveness of models in selecting useful knowledge from noisy details and the ability of improving the generation quality, we reported BLEU-1,2,3,4 and the arithmetic mean of above values as BLEU.", |
|
"cite_spans": [ |
|
{ |
|
"start": 5, |
|
"end": 28, |
|
"text": "(Papineni et al., 2002)", |
|
"ref_id": "BIBREF20" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatic Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "Distinct (Li et al., 2016) : We calculated the number of distinct n-grams for Distinct-1,2 as Dist-1,2 to measure the diversity of generated copywriting.", |
|
"cite_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 26, |
|
"text": "(Li et al., 2016)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Automatic Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The automatic evaluation results on the JDK dataset are listed in Table 2 . ILVT beats all baselines in BLEU but PTRANS+PSE that can be seen as the model utilizing the ground truth knowledge label. Compared with PTRANS+ALL and PTRANS+RAND, ILVT improves 1.59 and 0.13 BLEU score individually, illustrating that ILVT is able to extract effective knowledge. In terms of KOBE that uses the pseudo labelled knowledge, ILVT achieves notable improvement in all automatic metrics, which shows that ILVT can take better advantage of the selected knowledge for improving generation quality. Also, ILVT significantly improves the generation diversity, beating all baselines in Distinct-1 and Distinct-2 , demonstrating that the interactive latent variables contribute to the diversity of generated product copywritings. ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 66, |
|
"end": 73, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Automatic Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "As shown in Table 2 , compared the variant without knowledge to the one considering all product details (denoted by +ALL), the BLEU of CON-VSEQ2SEQ (+ALL) and PTRANS (+ALL) drop significantly, which demonstrates that the product details contain harmful pieces, i.e., noises. However, simply feeding all product details into TRANS-FORMER, the BLEU is improved. The performance drop of PTRANS might be caused by the copy mechanism that copies noisy words from the product details. Reverse scenario happens to TRANS-FORMER and PTRANS. We can attribute this to the effect of attention mechanism that can denoise knowledge implicitly, while the copy mechanism may copy noise from input details. Taking all three variants, i.e., +ALL, +RAND and +PSE, we observe that simply picking a random knowledge text from the product details can improve the quality and diversity of the generated text for the most cases. Moreover, adopting the pseudo labelled knowledge results in the best performance. The above observations demonstrate that the product details (i.e., human knowledge) contain salient and informative knowledge but also tremendous noises. Thus, it is necessary to perform knowledge selection.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 12, |
|
"end": 19, |
|
"text": "Table 2", |
|
"ref_id": "TABREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Effect of Pseudo Label", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We also conducted the ablation study by removing particular modules, including copy mechanism, description latent variable and knowledge selection module with knowledge latent variable. Results are listed in Table 3 . The absence of the copy mechanism hurts both the generation quality(BLEU) and diversity(Distinct). We observe the prominent impact in automatic evaluation metrics without the description distribution, affirming it is helpful to enhance selecting informative knowledge from prior information and generating copywriting with good coherence and diversity. When removing the knowledge latent variable from the framework and selecting knowledge only based on the product representation, both BLEU and Distinct drop significantly. It demonstrates the interactive latent variable contributes to select knowledge and enhance generation quality.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 208, |
|
"end": 215, |
|
"text": "Table 3", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Ablation Study", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We also consider the model preference in terms of three criteria for human evaluations, as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "\u2022 Correctness: How correct the generated copywriting describes the product information?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "\u2022 Diversity: How diverse the output is?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "\u2022 Coherence: How coherent the copywriting is to the recommended product?", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "We invited six native Chinese speakers as volunteers to judge the quality of generation results with a score from 1 (worst) to 5 (best). The result of human writings is 5 for reference. We choose the average of all volunteers for each criteria for the same copywriting as human evalution score. We randomly selected 200 instances from test split, each instance contains the product title, attributes, commodity details with labeled pseudo knowledge and the generated result. We only evaluate baselines with pseudo knowledge in order to compare fairly. The average scores of human evaluation are shown in Table 4 , from where we can see that ILVT outperforms all baselines. In the correctness criterion, our model get an average score of 4.77, which indi-cates that ILVT can extract useful information from nosie and generate informative copywritings. In the diversity criterion, the improvement is 0.38, comparing with the best baseline model PTRANS+PSE, which proves that our model is able to generate more diverse results with the interactive knowledge and description latent variables. The Fleiss's kappa scores (Fleiss, 1971 ) among all volunteers is 0.529.", |
|
"cite_spans": [ |
|
{ |
|
"start": 1115, |
|
"end": 1128, |
|
"text": "(Fleiss, 1971", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 604, |
|
"end": 611, |
|
"text": "Table 4", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Human Evaluation", |
|
"sec_num": "4.4" |
|
}, |
|
{ |
|
"text": "The case study is performed to investigate how IVLT utilizes the auxiliary product details (i.e., human knowledge) to generate diverse and informative product copywriting. For fair comparisons, we only choose baselines with pseudo knowledge for fair comparison, namely CONVSEQ2SEQ+PSE, TRANSFORMER+PSE, KOBE and PTRANS+PSE. As shown in Table 5 in Appendix, the baselines tend to generate general and vague results mainly from the title and attributes, such as \"\u5bbd\u677e(loose and comfortable stereotype)\" and \"\u5706\u9886(round collar)\", while the product details are ignored. Rather, ILVT generates more diverse copywriting guided by the product details, such as \"\u9752\u6625\u7684\u6d3b\u529b\u6c14 \u606f(the vitality of youth)\", \"\u8fd0\u52a8\u98ce(sport fashion)\" and \"\u6253\u7834\u4e86\u7eaf\u8272\u7684\u5355\u8c03\u6027(breaks monotony of solid color)\", which are attractive to customers. Such a running example demonstrates that the ILVT is able to well utilize the selected knowledge to make the generation more diverse and informative, thanks to the interactive latent variables that enhance the connection between knowledge selection and copywriting generation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 336, |
|
"end": 343, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Case Study", |
|
"sec_num": "4.5" |
|
}, |
|
{ |
|
"text": "Product Copywriting Generation. The task of product copywriting generation has gained considerable attentions with various systems proposed to automatically generate product descriptions. presented a statistical framework and template-based method. Shao et al. (2019) proposed a Planing-based Hierarchical Varitional Model that decomposed the long product copywriting generation into several dependent sentence generation sub-tasks. Chen et al. (2019) proposed a transformer-based generation model which utilized the user categories of items and the knowledge collected from external database. (Li et al., 2020b) construted a list of salient attributes and keywords incorporated with visual information from a product picture to generate the copywriting. Zhang et al. (2021) simply concatenated the short advertising phrases written by experts with the product title and attributes to generate product copywriting. We are different from such work by involving dominant knowledge-selection rather than simply incorporating information from external sources, and we make a selection in an end-to-end fashion.", |
|
"cite_spans": [ |
|
{ |
|
"start": 249, |
|
"end": 267, |
|
"text": "Shao et al. (2019)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 433, |
|
"end": 451, |
|
"text": "Chen et al. (2019)", |
|
"ref_id": "BIBREF2" |
|
}, |
|
{ |
|
"start": 594, |
|
"end": 612, |
|
"text": "(Li et al., 2020b)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 755, |
|
"end": 774, |
|
"text": "Zhang et al. (2021)", |
|
"ref_id": "BIBREF29" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Variational Autoencoders. Variational Autoencoders (i.e., VAEs) (Kingma and Welling, 2014) have been widely used in a plenty of natural language generation tasks, such as dialogue generation (Zhao et al., 2017 ), text summarization (Li et al., 2017) and neural machine translation . VAEs aims at incorporating posterior information to capture the high variability during training phase and reducing the KL Divergence (Kullback and Leibler, 1951) between the prior and the posterior. Traditional VAE models used RNNs Shao et al., 2019; Lee et al., 2020) . Lin et al. (2020) ; Li et al. (2020a) ; Fang et al. (2021) incorporated the latent variables from VAEs with transformer. However, both RNNs-based and transformer-based VAEs face the problem of KL-vanishing. Bowman et al. 2016; Fu et al. (2019) ; Shao et al. (2021) changed the weight of KL-divergence to solve the KL-vanishing problem. Shao et al. (2021) studied the balance between diversity and relevance from the generation results with KL-vanishing in e-commerce situation. This paper adopts the VAEs to dynamically model the input product title, attributes as well as the human-written advertising knowledge to extract the salient parts from the advertising descriptions and also inject the selected knowledge into the generated product copywriting, so that the diversity and the quality can be improved.", |
|
"cite_spans": [ |
|
{ |
|
"start": 191, |
|
"end": 209, |
|
"text": "(Zhao et al., 2017", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 232, |
|
"end": 249, |
|
"text": "(Li et al., 2017)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 417, |
|
"end": 445, |
|
"text": "(Kullback and Leibler, 1951)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 516, |
|
"end": 534, |
|
"text": "Shao et al., 2019;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 535, |
|
"end": 552, |
|
"text": "Lee et al., 2020)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 555, |
|
"end": 572, |
|
"text": "Lin et al. (2020)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 575, |
|
"end": 592, |
|
"text": "Li et al. (2020a)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 595, |
|
"end": 613, |
|
"text": "Fang et al. (2021)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 782, |
|
"end": 798, |
|
"text": "Fu et al. (2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 801, |
|
"end": 819, |
|
"text": "Shao et al. (2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 891, |
|
"end": 909, |
|
"text": "Shao et al. (2021)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "This work studies a novel problem on how to generate informative and diverse product copywriting with auxiliary human-created product details. We propose an interactive latent variables model based on transformer architecture, ILVT, which allows to select salient knowledge from the noisy product details. To better evaluate ILVT model, we construct a large Chinese product copywriting dataset, JDK. Extensive experiments demonstrate that our proposed model outperforms the baselines with regard to both automatic and human evaluation, illustrating that ILVT can select outstanding knowledge and improve the generation quality and diversity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://www.jd.com/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank the anonymous reviewers for their thoughtful and constructive comments. Yanyan Zou is the corresponding author.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u670d\u9970\u5185\u8863\uff1bT\u6064\uff1b\u98ce\u864e\uff1b :::: \u5706\u9886\uff1b ::::: \u5370\u82b1\uff1b\u65f6\u5c1a\uff1b :::: \u5bbd\u677e\uff1b\u5927\u7801\uff1b\u5973\uff1b\u590f\u5b63\uff1b \u65b0\u6b3e\uff1b\u77ed\u8896 Clothing underwear; T-shirt; recluserecluse; :::::: Round ::::: neck :::: colla; Printing; Fashion; :::::Loose; Large size; Women; Summer; New Fashion; Short sleeves The printing pattern is more interesting, large area embellished with printing pattern in the clothes breaks monotony of solid color, gives bluethe whole piece a more interesting touch, highlights the vitality of youth, showing greater sport fashion . Table 5 : Case study of ILVT and baselines on JDK dataset. All baselines take as input the product title, attributes and the pseudo labelled knowledge. We highlight the pseudo labelled knowledge in yellow. Words generated from product title and attribute set are highlighted in breaking line and from product details are in double underline. Diverse words generated from selected knowledge in ILVT is highlighted in red color.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 475, |
|
"end": 482, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Attribute Set", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Generating sentences from a continuous space", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Samuel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Bowman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Oriol", |
|
"middle": [], |
|
"last": "Vilnis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"M" |
|
], |
|
"last": "Vinyals", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rafal", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Samy", |
|
"middle": [], |
|
"last": "J\u00f3zefowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bengio", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Samuel R. Bowman, Luke Vilnis, Oriol Vinyals, An- drew M. Dai, Rafal J\u00f3zefowicz, and Samy Bengio. 2016. Generating sentences from a continuous space. In Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Universal sentence encoder for english", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Cer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinfei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sheng-Yi", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nan", |
|
"middle": [], |
|
"last": "Hua", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicole", |
|
"middle": [], |
|
"last": "Limtiaco", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rhomni", |
|
"middle": [], |
|
"last": "St John", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noah", |
|
"middle": [], |
|
"last": "Constant", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mario", |
|
"middle": [], |
|
"last": "Guajardo-C\u00e9spedes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Steve", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Tar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniel Cer, Yinfei Yang, Sheng-yi Kong, Nan Hua, Nicole Limtiaco, Rhomni St John, Noah Constant, Mario Guajardo-C\u00e9spedes, Steve Yuan, Chris Tar, et al. 2018. Universal sentence encoder for english. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Towards knowledge-based personalized product description generation in e-commerce", |
|
"authors": [ |
|
{ |
|
"first": "Qibin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junyang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yichang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hongxia", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingren", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jie", |
|
"middle": [], |
|
"last": "Tang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qibin Chen, Junyang Lin, Yichang Zhang, Hongxia Yang, Jingren Zhou, and Jie Tang. 2019. Towards knowledge-based personalized product description generation in e-commerce. In Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "Transformer-based conditional variational autoencoder for controllable story generation", |
|
"authors": [ |
|
{ |
|
"first": "Le", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tao", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chao-Chun", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liefeng", |
|
"middle": [], |
|
"last": "Bo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wen", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changyou", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2101.00828" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Le Fang, Tao Zeng, Chao-Chun Liu, Liefeng Bo, Wen Dong, and Changyou Chen. 2021. Transformer-based conditional variational autoen- coder for controllable story generation. arXiv preprint arXiv:2101.00828.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Measuring nominal scale agreement among many raters", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Joseph", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fleiss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1971, |
|
"venue": "Psychological bulletin", |
|
"volume": "76", |
|
"issue": "5", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Joseph L Fleiss. 1971. Measuring nominal scale agree- ment among many raters. Psychological bulletin, 76(5):378.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Cyclical annealing schedule: A simple approach to mitigating kl vanishing", |
|
"authors": [ |
|
{ |
|
"first": "Hao", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chunyuan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asli", |
|
"middle": [], |
|
"last": "Celikyilmaz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lawrence", |
|
"middle": [], |
|
"last": "Carin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hao Fu, Chunyuan Li, Xiaodong Liu, Jianfeng Gao, Asli Celikyilmaz, and Lawrence Carin. 2019. Cycli- cal annealing schedule: A simple approach to mit- igating kl vanishing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Convolutional sequence to sequence learning", |
|
"authors": [ |
|
{ |
|
"first": "Jonas", |
|
"middle": [], |
|
"last": "Gehring", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Auli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Grangier", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Denis", |
|
"middle": [], |
|
"last": "Yarats", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yann N", |
|
"middle": [], |
|
"last": "Dauphin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Conference on Machine Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jonas Gehring, Michael Auli, David Grangier, Denis Yarats, and Yann N Dauphin. 2017. Convolutional sequence to sequence learning. In International Con- ference on Machine Learning. PMLR.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Categorical reparameterization with gumbel-softmax", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Jang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shixiang", |
|
"middle": [ |
|
"Shane" |
|
], |
|
"last": "Gu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Poole", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Jang, Shixiang Shane Gu, and Ben Poole. 2017. Categorical reparameterization with gumbel-softmax. In International Conference on Learning Representa- tions.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Bilinear attention networks", |
|
"authors": [ |
|
{ |
|
"first": "Jin-Hwa", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaehyun", |
|
"middle": [], |
|
"last": "Jun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byoung-Tak", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jin-Hwa Kim, Jaehyun Jun, and Byoung-Tak Zhang. 2018. Bilinear attention networks. In Advances in Neural Information Processing Systems.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Autoencoding variational bayes", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Max", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Welling", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P. Kingma and Max Welling. 2014. Auto- encoding variational bayes. In International Conference on Learning Representations, volume abs/1312.6114.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "On information and sufficiency. The annals of mathematical statistics", |
|
"authors": [ |
|
{ |
|
"first": "Solomon", |
|
"middle": [], |
|
"last": "Kullback", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Richard", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Leibler", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1951, |
|
"venue": "", |
|
"volume": "22", |
|
"issue": "", |
|
"pages": "79--86", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Solomon Kullback and Richard A Leibler. 1951. On information and sufficiency. The annals of mathe- matical statistics, 22(1):79-86.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Generating diverse and consistent qa pairs from contexts with information-maximizing hierarchical conditional vaes", |
|
"authors": [ |
|
{ |
|
"first": "Seanie", |
|
"middle": [], |
|
"last": "Dong Bok Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Donghwan", |
|
"middle": [], |
|
"last": "Woo Tae Jeong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sung", |
|
"middle": [ |
|
"Ju" |
|
], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dong Bok Lee, Seanie Lee, Woo Tae Jeong, Dongh- wan Kim, and Sung Ju Hwang. 2020. Gener- ating diverse and consistent qa pairs from con- texts with information-maximizing hierarchical con- ditional vaes. In Proceedings of the 58th Annual Meeting of the Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Optimus: Organizing sentences via pre-trained modeling of a latent space", |
|
"authors": [ |
|
{ |
|
"first": "Chunyuan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiang", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiujun", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Baolin", |
|
"middle": [], |
|
"last": "Peng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yizhe", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chunyuan Li, Xiang Gao, Yuan Li, Xiujun Li, Baolin Peng, Yizhe Zhang, and Jianfeng Gao. 2020a. Opti- mus: Organizing sentences via pre-trained modeling of a latent space. In Proceedings of the 2020 Con- ference on Empirical Methods in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Aspect-aware multimodal summarization for chinese e-commerce products", |
|
"authors": [ |
|
{ |
|
"first": "Haoran", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Song", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Youzheng", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodong", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bowen", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haoran Li, Peng Yuan, Song Xu, Youzheng Wu, Xi- aodong He, and Bowen Zhou. 2020b. Aspect-aware multimodal summarization for chinese e-commerce products. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "A diversity-promoting objective function for neural conversation models", |
|
"authors": [ |
|
{ |
|
"first": "Jiwei", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michel", |
|
"middle": [], |
|
"last": "Galley", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chris", |
|
"middle": [], |
|
"last": "Brockett", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianfeng", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Dolan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and William B. Dolan. 2016. A diversity-promoting objective function for neural conversation models. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Deep recurrent generative decoder for abstractive text summarization", |
|
"authors": [ |
|
{ |
|
"first": "Piji", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wai", |
|
"middle": [], |
|
"last": "Lam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lidong", |
|
"middle": [], |
|
"last": "Bing", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihao", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Piji Li, Wai Lam, Lidong Bing, and Zihao Wang. 2017. Deep recurrent generative decoder for abstractive text summarization. In Proceedings of the 2017 Confer- ence on Empirical Methods in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Variational transformers for diverse response generation", |
|
"authors": [ |
|
{ |
|
"first": "Zhaojiang", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Genta Indra Winata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zihan", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pascale", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Fung", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2003.12738" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhaojiang Lin, Genta Indra Winata, Peng Xu, Zihan Liu, and Pascale Fung. 2020. Variational transform- ers for diverse response generation. arXiv preprint arXiv:2003.12738.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Some methods for classification and analysis of multivariate observations", |
|
"authors": [ |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Macqueen", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1967, |
|
"venue": "Proceedings of the fifth Berkeley symposium on mathematical statistics and probability", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "281--297", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "James MacQueen et al. 1967. Some methods for clas- sification and analysis of multivariate observations. In Proceedings of the fifth Berkeley symposium on mathematical statistics and probability, volume 1, pages 281-297.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "The concrete distribution: A continuous relaxation of discrete random variables", |
|
"authors": [ |
|
{ |
|
"first": "Chris", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Maddison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andriy", |
|
"middle": [], |
|
"last": "Mnih", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yee Whye", |
|
"middle": [], |
|
"last": "Teh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. 2017. The concrete distribution: A continuous relax- ation of discrete random variables. In International Conference on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Natural language inference by tree-based convolution and heuristic matching", |
|
"authors": [ |
|
{ |
|
"first": "Lili", |
|
"middle": [], |
|
"last": "Mou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Men", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ge", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yan", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Yan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhi", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lili Mou, Rui Men, Ge Li, Yan Xu, Lu Zhang, Rui Yan, and Zhi Jin. 2016. Natural language inference by tree-based convolution and heuristic matching. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Bleu: a method for automatic evaluation of machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Kishore", |
|
"middle": [], |
|
"last": "Papineni", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Salim", |
|
"middle": [], |
|
"last": "Roukos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Todd", |
|
"middle": [], |
|
"last": "Ward", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wei-Jing", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic evalu- ation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Sentence-bert: Sentence embeddings using siamese bert-networks", |
|
"authors": [ |
|
{ |
|
"first": "Nils", |
|
"middle": [], |
|
"last": "Reimers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iryna", |
|
"middle": [], |
|
"last": "Gurevych", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nils Reimers and Iryna Gurevych. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Get to the point: Summarization with pointergenerator networks", |
|
"authors": [ |
|
{ |
|
"first": "Abigail", |
|
"middle": [], |
|
"last": "See", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [], |
|
"last": "Peter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Christopher D", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Manning", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Abigail See, Peter J Liu, and Christopher D Manning. 2017. Get to the point: Summarization with pointer- generator networks. In Proceedings of the 55th An- nual Meeting of the Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Controllable and diverse text generation in e-commerce", |
|
"authors": [], |
|
"year": null, |
|
"venue": "Proceedings of the Web Conference 2021", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2392--2401", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Controllable and diverse text generation in e-commerce. In Proceedings of the Web Conference 2021, pages 2392-2401.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Long and diverse text generation with planning-based hierarchical variational model", |
|
"authors": [ |
|
{ |
|
"first": "Zhihong", |
|
"middle": [], |
|
"last": "Shao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Minlie", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiangtao", |
|
"middle": [], |
|
"last": "Wen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wenfei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoyan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhihong Shao, Minlie Huang, Jiangtao Wen, Wenfei Xu, and Xiaoyan Zhu. 2019. Long and diverse text gen- eration with planning-based hierarchical variational model. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u0141ukasz", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "A statistical framework for product description generation", |
|
"authors": [ |
|
{ |
|
"first": "Jinpeng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yutai", |
|
"middle": [], |
|
"last": "Hou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunbo", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chin-Yew", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinpeng Wang, Yutai Hou, Jing Liu, Yunbo Cao, and Chin-Yew Lin. 2017. A statistical framework for product description generation. In Proceedings of the Eighth International Joint Conference on Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Variational neural machine translation", |
|
"authors": [ |
|
{ |
|
"first": "Biao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Deyi", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinsong", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hong", |
|
"middle": [], |
|
"last": "Duan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Min", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Biao Zhang, Deyi Xiong, Jinsong Su, Hong Duan, and Min Zhang. 2016. Variational neural machine trans- lation. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Automatic product copywriting for e-commerce", |
|
"authors": [ |
|
{ |
|
"first": "Xueying", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yanyan", |
|
"middle": [], |
|
"last": "Zou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hainan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shiliang", |
|
"middle": [], |
|
"last": "Diao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiajia", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhuoye", |
|
"middle": [], |
|
"last": "Ding", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhen", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xueqi", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yun", |
|
"middle": [], |
|
"last": "Xiao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2112.11915" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xueying Zhang, Yanyan Zou, Hainan Zhang, Jing Zhou, Shiliang Diao, Jiajia Chen, Zhuoye Ding, Zhen He, Xueqi He, Yun Xiao, et al. 2021. Automatic prod- uct copywriting for e-commerce. arXiv preprint arXiv:2112.11915.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Learning discourse-level diversity for neural dialog models using conditional variational autoencoders", |
|
"authors": [ |
|
{ |
|
"first": "Tiancheng", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ran", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maxine", |
|
"middle": [], |
|
"last": "Esk\u00e9nazi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tiancheng Zhao, Ran Zhao, and Maxine Esk\u00e9nazi. 2017. Learning discourse-level diversity for neural dialog models using conditional variational autoencoders. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "loose T-shirt, for women in new summer fashions", |
|
"authors": [ |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Case", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "A Case Study Product Title \u4e2d \u56fd \u98ce ::::::::: \u5706\u9886\u5370\u82b1\u65f6 \u5c1a ::::::::: \u5bbd\u677e\u5927\u7801T\u6064 \u5973 \u6b3e \u590f \u5b63 \u65b0 \u6b3e ::::: \u663e\u7626\u77ed \u8896 \u9ed1 \u8272 \u5370 \u82b1 \u9884 \u552et\u6064 Chinese style, ::::: round ::::: neck :::::::: printing ::::::: fashion, ::::: loose T-shirt, for women in new summer fashions, ::::::: looking :::: slim with short sleeves and black printing, pre-sale T-shirt", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "An example of product copywriting generation from our dataset, including product title, attribute, details as well as the product description.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF1": { |
|
"text": "The graphical representation of the propossed ILVT model. Dotted line belongs to posterior distribution solely.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"FIGREF2": { |
|
"text": "The architecture of the proposed ILVT model.The solid line denotes the training procedure, while the dotted line denotes the inference process.", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF2": { |
|
"html": null, |
|
"content": "<table/>", |
|
"text": "Automatic evaluation results on the JDK dataset. * represents that the model takes as input the pseudo labelled knowledge due to the system design. The best results, including the one for PTRANS+PSE, are highlighted with underline. The highest scores, except the ones for PTRANS+PSE, are highlighted in bold.", |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"content": "<table><tr><td>Model</td><td colspan=\"2\">Corr. Dive. Cohe.</td></tr><tr><td colspan=\"2\">CONVSEQ2SEQ+PSE 3.52 3.28</td><td>3.52</td></tr><tr><td colspan=\"2\">TRANSFORMER+PSE 3.73 3.33</td><td>3.29</td></tr><tr><td>PTTRANS+PSE</td><td>4.21 4.24</td><td>4.49</td></tr><tr><td>KOBE</td><td>4.39 3.85</td><td>4.14</td></tr><tr><td>ILVT</td><td>4.77 4.62</td><td>4.51</td></tr></table>", |
|
"text": "Model ablation study on JDK dataset. -Copy Mechanism: removing copy mechanism. -Description Distribution: removing description latent variable. -Knowledge Distribution: removing knowledge latent variable.", |
|
"type_str": "table", |
|
"num": null |
|
}, |
|
"TABREF5": { |
|
"html": null, |
|
"content": "<table><tr><td>All baselines input pseduo selected knowledge. Corr.: Correctness, Dive.: Diversity, and Cohe.: Coherence.</td></tr></table>", |
|
"text": "Human evaluation results on the JDK dataset.", |
|
"type_str": "table", |
|
"num": null |
|
} |
|
} |
|
} |
|
} |