| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T02:13:06.941456Z" |
| }, |
| "title": "Unsupervised KB-to-Text Generation with Auxiliary Triple Extraction using Dual Learning *", |
| "authors": [ |
| { |
| "first": "Zihao", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The Chinese University of Hong Kong", |
| "location": {} |
| }, |
| "email": "zhfu@se.cuhk.edu.hk" |
| }, |
| { |
| "first": "Bei", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "beishi@tencent.com" |
| }, |
| { |
| "first": "Lidong", |
| "middle": [], |
| "last": "Bing", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "l.bing@alibaba-inc.com" |
| }, |
| { |
| "first": "Wai", |
| "middle": [], |
| "last": "Lam", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "The Chinese University of Hong Kong", |
| "location": {} |
| }, |
| "email": "wlam@se.cuhk.edu.hk" |
| }, |
| { |
| "first": "James", |
| "middle": [ |
| "Craig" |
| ], |
| "last": "Watson", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Craig", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Watson", |
| "middle": [], |
| "last": "James", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Craig", |
| "middle": [], |
| "last": "Watson", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "The KB-to-text task aims at generating texts based on the given KB triples. Traditional methods usually map KB triples to sentences via a supervised seq-to-seq model. However, existing annotated datasets are very limited and human labeling is very expensive. In this paper, we propose a method which trains the generation model in a completely unsupervised way with unaligned raw text data and KB triples. Our method exploits a novel dual training framework which leverages the inverse relationship between the KB-to-text generation task and an auxiliary triple extraction task. In our architecture, we reconstruct KB triples or texts via a closed-loop framework via linking a generator and an extractor. Therefore the loss function that accounts for the reconstruction error of KB triples and texts can be used to train the generator and extractor. To resolve the cold start problem in training, we propose a method using a pseudo data generator which generates pseudo texts and KB triples for learning an initial model. To resolve the multiple-triple problem, we design an allocated reinforcement learning component to optimize the reconstruction loss. The experimental results demonstrate that our model can outperform other unsupervised generation methods and close to the bound of supervised methods.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "The KB-to-text task aims at generating texts based on the given KB triples. Traditional methods usually map KB triples to sentences via a supervised seq-to-seq model. However, existing annotated datasets are very limited and human labeling is very expensive. In this paper, we propose a method which trains the generation model in a completely unsupervised way with unaligned raw text data and KB triples. Our method exploits a novel dual training framework which leverages the inverse relationship between the KB-to-text generation task and an auxiliary triple extraction task. In our architecture, we reconstruct KB triples or texts via a closed-loop framework via linking a generator and an extractor. Therefore the loss function that accounts for the reconstruction error of KB triples and texts can be used to train the generator and extractor. To resolve the cold start problem in training, we propose a method using a pseudo data generator which generates pseudo texts and KB triples for learning an initial model. To resolve the multiple-triple problem, we design an allocated reinforcement learning component to optimize the reconstruction loss. The experimental results demonstrate that our model can outperform other unsupervised generation methods and close to the bound of supervised methods.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Knowledge Base (KB)-to-text task focuses on generating plain text descriptions from given knowledge bases (KB) triples which makes them accessible to users. For instance, given a KB triple <101 Helena, discoverer, James Craig Watson>, it is expected to generate a description sentence such as \"101 Helena is discovered by James Craig Watson.\". Recently, many research works have been proposed for this task. For example, Gardent et al. (2017a,b) create the WebNLG dataset to generate description for triples sampled from DBPedia (Auer et al., 2007) . Lebret et al.'s (2016) method generates people's biographies from extracted Wikipedia infobox. Novikova et al. (2017) propose to generate restaurant reviews by some given attributes and Fu et al. (2020a) create the WikiEvent dataset to generate text based on an event chain. However, the works mentioned above usually map structured triples to text via a supervised seq-to-seq (Sutskever et al., 2014) model, in which large amounts of annotated data is necessary and the annotation is very expensive and time-consuming.", |
| "cite_spans": [ |
| { |
| "start": 421, |
| "end": 445, |
| "text": "Gardent et al. (2017a,b)", |
| "ref_id": null |
| }, |
| { |
| "start": 529, |
| "end": 548, |
| "text": "(Auer et al., 2007)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 551, |
| "end": 573, |
| "text": "Lebret et al.'s (2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 646, |
| "end": 668, |
| "text": "Novikova et al. (2017)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 737, |
| "end": 754, |
| "text": "Fu et al. (2020a)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 928, |
| "end": 952, |
| "text": "(Sutskever et al., 2014)", |
| "ref_id": "BIBREF31" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We aim to tackle the problem of completely unsupervised KB-to-text generation which only requires a text corpus and a KB corpus and does not assume any alignment between them. We propose a dual learning framework based on the inverse relationship between the KB-to-text generation task and the triple extraction task. Specifically, the KBto-text task generates sentences from structured triples while the task of triple extraction extracts multiple triples from plain texts. Such a relationship enables the design of a closed-loop learning framework in which we link KB-to-text generation and its dual task of triple extraction so as to reconstruct the unaligned KB triples and texts. The non-differentiability issue of picking words from our neural model before reconstruction makes it hard to train the extractor or generator effectively using backpropagation. To solve this issue, we apply Reinforcement Learning (RL) based on policy gradients into our dual learning framework to optimize our extractor or generator according to the rewards.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Some semi-supervised works (He et al., 2016; Cao et al., 2019) have been proposed to generate 1.0 1.0 0.5 0.5 0.9", |
| "cite_spans": [ |
| { |
| "start": 27, |
| "end": 44, |
| "text": "(He et al., 2016;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 45, |
| "end": 62, |
| "text": "Cao et al., 2019)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our Proposed ARL Figure 1 : Illustration of the multiple-triple problem, in which E and G are extractor and generator respectively. The left part is the traditional RL methods and the right is our proposed ARL method. Four triples are extracted by the extractor. The top two triples are right and the others are wrong. Traditional RL methods give a single reward (0.9) for all the four triples while our proposed ARL gives each triple a different reward. Then the right triples and the wrong triples will be distinguished and optimized differently.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 17, |
| "end": 25, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "G Traditional RL", |
| "sec_num": null |
| }, |
| { |
| "text": "plain texts from data of certain forms in other domains (e.g., translation, semantic parsing) with limited annotated resources. These models contain two major steps. Firstly, they pre-train a weak model based on the labeled data. Secondly, they use an iterative model whose aim is to improve the weak model using the unlabeled data. In each iteration, the input sequence of the original data form is transformed into another form by the original model. Then, it is transformed back to the original data form by an inverse model. However, there are still some challenges applying the existing methods into KB-to-text directly: (1) Cold start problem. Existing approaches pre-train the model with labeled data and then fine-tune their models via unlabelled data. Such a mechanism still needs annotated data which is more difficult and expensive to obtain in KB-to-text task. (2) Multiple-triple problem. As shown in Fig. 1 , multiple triples might be extracted from a text example, and inevitably, the neural extractor could extract some wrong triples. The traditional dual learning approaches (He et al., 2016; Cao et al., 2019) , if directly applied, will regard all these triples as one unit and calculate a single reward for all the triples regardless of whether they are correct or not. It not only results in the slow convergence of RL, but also leads to unsatisfactory model performance. We propose a novel Extractor-Generator Dual (EGD) framework which exploits the inverse relationship between KB-to-text generation and auxiliary triple extraction. Our model can resolve the KB-to-text task in a totally unsupervised way. To cope with the cold start problem, we propose a pseudo data generator (PDG) which can generate pseudo text and pseudo KB triples based on the given unaligned KB triples and text respectively with prior knowledge. The extractor and the generator are then pre-trained with the generated pseudo data. To resolve the multiple-triple problem, we propose a novel Allocated Reinforcement Learning (ARL) component. Different from traditional RL methods in which one reward is calculated for the whole sequence, ARL allocates different rewards to different sub-parts of the sequence (Fig. 1 right) . Therefore, our model can distinguish the quality of each triple and optimize the extractor and the generator more accurately. We compare our framework with existing dual learning methods and the experimental results demonstrate that our model can outperform other unsupervised generation methods and close to the bound of supervised methods.", |
| "cite_spans": [ |
| { |
| "start": 1092, |
| "end": 1109, |
| "text": "(He et al., 2016;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 1110, |
| "end": 1127, |
| "text": "Cao et al., 2019)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 914, |
| "end": 920, |
| "text": "Fig. 1", |
| "ref_id": null |
| }, |
| { |
| "start": 2205, |
| "end": 2219, |
| "text": "(Fig. 1 right)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "G Traditional RL", |
| "sec_num": null |
| }, |
| { |
| "text": "Recently many tasks and methods have been proposed to transform existing data into humanreadable text. WebNLG (Gardent et al., 2017a,b) is proposed to describe a list of triples sampled from DBPedia (Auer et al., 2007) . Except for the KB triples, many other types of data have also been investigated for how to generate text from them. For example, E2E (Novikova et al., 2017) aims at generating text from some restaurants' attributes. Wikibio (Lebret et al., 2016) proposes to generate biographies for the Wikipedia infobox while WikiEvent (Fu et al., 2020a) proposes to generate text based on an event chain. Besides, Chen and Mooney (2008) ; Wiseman et al. (2017) propose to generate a summarization of a match based on the scores and Liang et al. (2009) propose to generate weather reports based on the records. All these tasks require an elaborately annotated dataset which is very expensive to prepare.", |
| "cite_spans": [ |
| { |
| "start": 110, |
| "end": 135, |
| "text": "(Gardent et al., 2017a,b)", |
| "ref_id": null |
| }, |
| { |
| "start": 199, |
| "end": 218, |
| "text": "(Auer et al., 2007)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 354, |
| "end": 377, |
| "text": "(Novikova et al., 2017)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 445, |
| "end": 466, |
| "text": "(Lebret et al., 2016)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 542, |
| "end": 560, |
| "text": "(Fu et al., 2020a)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 621, |
| "end": 643, |
| "text": "Chen and Mooney (2008)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 646, |
| "end": 667, |
| "text": "Wiseman et al. (2017)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 739, |
| "end": 758, |
| "text": "Liang et al. (2009)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Many methods have been proposed to tackle the dataset insufficiency problem in other tasks. Fu et al. (2020c) propose to directly train the model on partially-aligned data in which the data and the text are not necessarily exactly math, and it can be built automatically. He et al. (2016) ; Sennrich et al. (2016) ; Yi et al. (2017) propose dual learning frameworks. They pre-train a weak model with parallel data and refine the model with monolingual data. This strategy has been applied in many related tasks including semantic parsing (Cao et al., 2019) , summarization (Baziotis et al., 2019) ", |
| "cite_spans": [ |
| { |
| "start": 92, |
| "end": 109, |
| "text": "Fu et al. (2020c)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 272, |
| "end": 288, |
| "text": "He et al. (2016)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 291, |
| "end": 313, |
| "text": "Sennrich et al. (2016)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 316, |
| "end": 332, |
| "text": "Yi et al. (2017)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 538, |
| "end": 556, |
| "text": "(Cao et al., 2019)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 573, |
| "end": 596, |
| "text": "(Baziotis et al., 2019)", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "= \" > A A A B + X i c b V D L S s N A F J 3 U V 6 2 v V F f S z W A R X J W k R Z u C i 4 I I L l y 0 Y G u h C W E y n b Z D J w 9 m J o U S 8 i d u X C j i 1 v 9 w 4 U 6 / x k n b h V Y P D B z O u Z d 7 5 n g R o 0 I a x q e W W 1 v f 2 N z K b x d 2 d v f 2 D / T i Y V e E M c e k g 0 M W 8 p 6 H B G E 0 I B 1 J J S O 9 i B P k e 4 z c e 5 O r z L + f E i 5 o G N z J W U Q c H 4 0 C O q Q Y S S W 5 u m 7 7 S I 4 x Y s l t 6 i b X a c H V y 0 a l Y R m 1 R h U a F c u 6 q N b P F T H m g O a S l J t F u / T V P n 5 v u f q H P Q h x 7 J N A Y o a E 6 J t G J J 0 E c U k x I 2 n B j g W J E J 6 g E e k r G i C f C C e Z J 0 / h q V I G c B h y 9 Q I J 5 + r P j Q T 5 Q s x 8 T 0 1 m O c W q l 4 n / e f 1 Y D i 0 n o U E U S x L g x a F h z K A M Y V Y D H F B O s G Q z R R D m V G W F e I w 4 w l K V l Z V g r n 7 5 L + l W K 2 a t U m 2 b 5 e Y l W C A P S u A E n A E T 1 E E T 3 I A W 6 A A M p u A B P I F n L d E e t R f t d T G a 0 5 Y 7 R + A X t L d v g O y W g w = = < / l a t e x i t > L ARLG", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "< l a t e x i t s h a 1 _ b a s e 6 4 = \" k U 4 3 c 7 I m 1 F e 8 l k x p E h y B 0 d 0 r x e A = \" > A A A B / H", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "i c d V B N S w J B G J 6 1 L 7 O v N U / h Z U i C T s u u Z i Z 0 M D r U w Y N G m q C y z I 6 z O j j 7 w c x s I I v 9 l S 4 d i u j a z + j Q r X 5 N s 1 p Q U Q 8 M 7 8 P z v i / v M 4 8 T M i q k a b 5 p q Y X F p e W V 9 G p m b X 1 j c 0 v P b r d F E H F M W j h g A e 8 4 S B B G f d K S V D L S C T l B n s P I l T M + T f p X 1 4 Q L G v i X c h K S v o e G P n U p R l J J t p 7 r e U i O M G J x f W r H J x f 1 s 2 n G 1 g u m U S 1 V i u U y N I 2 k H l Q V O V T F L E L L M G c o 1 L K 9 / H t z 5 6 V h 6 6 + 9 Q Y A j j / g S M y R E 1 z J D 2 Y 8 R l x Q z M s 3 0 I k F C h M d o S L q K + s g j o h / P z E / h n l I G 0 A", |
| "eq_num": "2" |
| } |
| ], |
| "section": "Related Works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "4 e r 6 E M / X 7 R o w 8 I S a e o y Y T q + J 3 L x H / 6 n U j 6 R 7 1 Y + q H k S Q + n h 9 y I w Z l A J M k 4 I B y g i W b K I I w p 8 o r x C P E E Z Y q r y S E r 5 / C / 0 m 7 a F g l o 9 i 0 C r V j M E c a 5 M E u 2 A c W q I A a O A c N 0 A I Y T M A t u A c P 2 o 1 2 p z 1 q T / P R l P a 5 k w M / o D 1 / A H T j l 5 Y = < / l a t e x i t > L G < l a t e x i t s h a 1 _ b a s e 6 4 = \" y / L V b o R Z v P i T 5 7 5 9 n 9 z i g P Q Z r w 8 = \" > A A A B information narration (Sun et al., 2018) . However, as indicated in Hoang et al. 2018, the dual learning approach is not easy to train. Moreover, these methods still need some aligned data to pre-train the weak model. Another line of research proposes to use some extra annotations instead of using aligned data. Lample et al. (2018a,b) propose to train an unsupervised NMT system based on few annotated word pairs (Conneau et al., 2018) . Luo et al. (2019) propose to generate pseudo data with a rule-based template . However, these models cannot be directly applied in our scenario since our dataset is too complicated to make these annotations. Fu et al. (2020b) propose to utilize topic information from a dynamic topic tracker to solve the dataset insufficiency problem. Cheng et al. (2020) propose to generate better text description for a few entities by exploring the knowledge from KB and distill the useful part. In the field of computer vision, Zhu et al. (2017) propose cy-cleGAN which uses a cycled training method that transforms the input into another data form and then transforms it back, minimizing the recover loss. The method works well in the image domain but has some problems in text generation considering the non-differentiable discrete layer. We follow the ideas of cycleGAN to train the whole model without supervised data and adopt the RL method proposed in dual learning methods.", |
| "cite_spans": [ |
| { |
| "start": 494, |
| "end": 512, |
| "text": "(Sun et al., 2018)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 785, |
| "end": 808, |
| "text": "Lample et al. (2018a,b)", |
| "ref_id": null |
| }, |
| { |
| "start": 887, |
| "end": 909, |
| "text": "(Conneau et al., 2018)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 912, |
| "end": 929, |
| "text": "Luo et al. (2019)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1120, |
| "end": 1137, |
| "text": "Fu et al. (2020b)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 1248, |
| "end": 1267, |
| "text": "Cheng et al. (2020)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1428, |
| "end": 1445, |
| "text": "Zhu et al. (2017)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "+ X i c d V D L S s N A F J 3 4 r P W V 6 k q 6 C R b B V U j q o y 5 c F F z o w k U L 9 g F N C J P p p B 0 6 m Y S Z S a G E / I k b F 4 q 4 9 T 9 c u N O v c d J W s K I H L h z O u Z d 7 O H 5 M i Z C W 9 a E t L a + s r q 0 X N o q b W 9 s 7 u 3 p p r y 2 i h C P c Q h G N e N e H A l P C c E s S S X E 3 5 h i G P s U d f 3 S V + 5 0 x 5 o J E 7 E 5 O Y u y G c M B I Q B C U S v J 0 3 Q m h H C J I 0 9 v M S 6 + z o q d X b N O a w r D M s 9 r 5 a c 1 W Z K 5 8 W 5 V 6 y S l / N g / e G p 7 + 7 v Q j l I S Y S U S h E D 3 b i q W b Q i 4 J o j g r O o n A M U Q j O M A 9 R R k M s X D T a f L M O F J K 3 w g i r o Z J Y 6 r + v E h h K M Q k 9 N V m n l P 8 9 n L x L 6 + X y O D C T Q m L E 4 k Z m j 0 K E m r I y M h r M P q E Y y T p R B G I O F F Z D T S E H C K p y l o o 4 X / S r p r 2 i V l t 2 p X 6 J Z i h A M r g E B w D G 9 R A H d y A B m g B B M b g H j y C J y 3 V H r R n 7 W W 2 u q T N b / b B A r T X L 0 7 G l m E = < / l a t e x i t > L ARLE < l a t e x i t s h a 1 _ b a s e 6 4 = \" p L 0 R y u W 8 v 2 v D V j B k 9 F K E y s Q o k Y o = \" > A A A B / H i c d V D L S g M x F M 3 U V 6 2 v q V 1 J N 8 E i u B o y L f Q B L i o i u O i i F f u A t p R M m r a h m Q d J R i h D / R U 3 L h R x 6 2 e 4 c K d f Y 6 Z V U N E D g c M 5 9 3 J P j h N w J h V C b 0 Z i Z X V t f S O 5 m d r a 3 t n d M 9 P 7 L e m H g t A m 8 b k v O g 6 W l D O P N h V T n H Y C Q b H r c N p 2 p m e x 3 7 6 m Q j L f u 1 K z g P Z d P P b Y i B G s t D Q w M z 0 X q w n B P K r N B 9 H p Z e 1 8 n h q Y O W Q V U Q y I L F Q p o 0 J F k 0 K x X M y X o G 0 t H J S r p n v Z 9 8 b B S 3 1 g v v a G P g l d 6 i n C s Z R d G w W q H 2 G h G O F 0 n u q F k g a Y T P G Y d j X 1 s E t l P 1 q E n 8 M j r Q z h y B f 6 e Q o u 1 O 8 b E X a l n L m O n o y j y t 9 e L P 7 l d U M 1 K v c j 5 g W h o h 5 Z H h q F H C o f x k 3 A I R O U K D 7 T B B P B d F Z I J l h g o n R f c Q l f P 4 X / k 1 b e s g t W v m H n q i d g i S T I g k N w D G x Q A l V w A e q g C Q i Y g V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Reinforcement Learning (RL) has been utilized to solve the infeasibility of backpropagation through discrete tokens layer. Li et al. (2016) propose to use RL to focus on the long term target and thus improve the performance. Yu et al. (2017) propose to use the RL in generative adversarial networks to solve the discrete tokens problem. He et al. (2016) ; Sun et al. (2018) propose to use RL in dual training. As far as we know, no studies of RL have been conducted for KB triples in which the reward is different for each triple considering multiple-triple problem.", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 139, |
| "text": "Li et al. (2016)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 225, |
| "end": 241, |
| "text": "Yu et al. (2017)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 337, |
| "end": 353, |
| "text": "He et al. (2016)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 356, |
| "end": 373, |
| "text": "Sun et al. (2018)", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Works", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Formally, we denote the KB corpus as K =", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method 3.1 Problem Definition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "{K i |\u2200i} in which K i = [k (i) 1 , k (i) 2 , \u2022 \u2022 \u2022 , k (i) n i ] is the ith KB triple list containing n i triples. k (i) j = (h (i) j , r (i) j , t (i) j )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method 3.1 Problem Definition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "represents the jth KB triple in K i containing the head, relation and tail entity respectively. We denote the texts corpus as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method 3.1 Problem Definition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "T = {T i |\u2200i} in which T i = [t (i) 1 , t (i) 2 , \u2022 \u2022 \u2022 , t (i) n i ] is the ith sen- tence and t (i)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method 3.1 Problem Definition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "j is the jth word in the sentence. In our problem, we are only given a collection of KB triples K t \u2282 K and a collection of text T t \u2282 T without any alignment information between them. The ultimate goal is to train a model that generates the corresponding text in T describing the given triple list from K.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Method 3.1 Problem Definition", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Our proposed Extractor-Generator Dual (EGD) framework is composed of a generator G and an extractor E that translate data in one form to another. We denote all trainable parameters in E and G as \u03b8 and \u03c6, respectively. The generator generates text representation for each KB triple as T = G(K), K \u2208 K, T \u2208 T while the extractor extracts KB triples from raw text as K = E(T ), T \u2208 T , K \u2208 K. Our EGD framework is trained in an unsupervised manner and it contains three processes, as shown in Fig. 2 . The first process is a pre-train process in which both E and G are trained with the pseudo data generated by the pseudo generator. The second process is the kb2kb process which generates description text based on the given KB triples with G and then recovers the KB triples from the generated text with E. The third process is called txt2txt which extracts KB triples from the given text with E and then recovers the text from the generated KB triples with G. In order to overcome the multiple-mapping problem, we propose a novel allocated reinforcement learning component in kb2kb and txt2txt, respectively.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 490, |
| "end": 496, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The EGD framework firstly pre-trains the extractor and generator with the data generated by the pseudo data generator (PDG). For the text corpus T t , we generate corresponding pseudo KB triples as K t = {K = P K (T )|\u2200T \u2208 T t }, in which P K is the pseudo KB generator. We pretrain the generator G to transform K \u2208 K t to T \u2208 T t . Similarly, we generate pseudo text as T t = {T = P T (K)|\u2200K \u2208 K t }, in which P T is the pseudo text generator. Then, we train the extractor to transform T \u2208 T t to K \u2208 K t . After G and E have been pre-trained, the kb2kb process and the txt2txt process are conducted alternately to further improve the performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In the kb2kb process, the input KB triples are firstly flattened and concatenated one by one as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "K = [k 1 , k 2 , \u2022 \u2022 \u2022 , k n k ] = [w 1 , w 2 , \u2022 \u2022 \u2022 , w nw ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "in which k i is the ith triple in K while w i denotes the ith words in the concatenated word list. n k is the number of triples while n w is the number of the words. K is then sent into the generator G to get a text description", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "T m = [t 1 , t 2 , \u2022 \u2022 \u2022 , t nt ],", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where t i is the ith word in the sentence T m and n t is the length of T m . Afterwards, The extractor takes the sentences T m as input and outputs the triple sequence", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "K = [w 1 , w 2 , \u2022 \u2022 \u2022 , w n w ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": ", in which w i is the ith word in K while n w is the length of K . The target is to make K as close to K as possible. Therefore, in the training step, the loss function for the extractor is defined as the negative log probability of each word in K:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "L E = \u2212 nw i=1 log p \u03b8 (w i = w i |T m , w 1 , \u2022 \u2022 \u2022 , w i\u22121 ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We can also use the output to improve the generator. Since T m is discrete, the gradient cannot be passed to the generator as the cycleGAN (Zhu et al., 2017) does. To tackle this problem, we propose an Allocated Reinforcement Learning for Generator (ARLG) component to utilize the extractor's result to optimize the generator. Different rewards are allocated to different parts of the generator output. The gradient for the generator is denoted as \u2207 \u03c6 L ARLG which will be introduced in the later section.", |
| "cite_spans": [ |
| { |
| "start": 139, |
| "end": 157, |
| "text": "(Zhu et al., 2017)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In the txt2txt process, the input text", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "T = [t 1 , t 2 , \u2022 \u2022 \u2022 , t nt ] is transformed into its KB represen- tation K m = [k 1 , k 2 , \u2022 \u2022 \u2022 , k nm ] by the extractor E. K m is then transformed to T = [t 1 , t 2 , \u2022 \u2022 \u2022 , t nt ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "by the generator and the loss is defined as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "L G = \u2212 nt i=1 log p \u03c6 (t i = t i |K m , t 1 , \u2022 \u2022 \u2022 , t i\u22121 ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Similarly, we also propose an Allocated Reinforcement Learning for Extractor (ARLE) to utilize the generator's result to optimize the extractor. Different rewards are allocated to different parts of the extractor output. Let the gradient for the extractor be denoted as \u2207 \u03b8 L ARLE . The final gradient for extractor's parameters \u03b8 is formulated as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "\u2207 \u03b8 L E + \u2207 \u03b8 L ARLE while the gradient for genera- tor's parameters \u03c6 is \u2207 \u03c6 L G + \u2207 \u03c6 L ARLG .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We use the Adam (Kingma and Ba, 2014) as the optimizer to optimize all the parameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extractor-Generator Dual Framework", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The extractor and the generator are both backboned by the prevalent Transformer (Vaswani et al., 2017) model, which is a variant of the seq-to-seq model. It takes a sequence as input and generates another sequence as output. The Transformer model contains two parts, namely an encoder and a decoder. Both of them are built with several attention layers. We refer readers to the original paper (Vaswani et al., 2017) for more details.", |
| "cite_spans": [ |
| { |
| "start": 80, |
| "end": 102, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 393, |
| "end": 415, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Background of Transformer", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "To handle the cold start problem, we propose a novel pseudo data generator (PDG) to generate pseudo data. It contains two components, namely a pseudo text generator and a pseudo KB generator.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo Data Generator", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Pseudo Text Generator generates pseudo text for each KB and forms a pseudo supervised training data for pre-training the extractor and thus solving the cold start problem. We compute a statistics of the word count in the training set T t and calculate the empirical distribution for each word as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo Data Generator", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "p(w) = #w w \u2208Tt #w ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo Data Generator", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where #w stands for the total word count for w in T t . For a list of KB triples K", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo Data Generator", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "= [k 1 , k 2 , \u2022 \u2022 \u2022 , k n k ] = [h 1 , r 1 , t 1 , h 2 , r 2 , t 2 , \u2022 \u2022 \u2022 , h n k , r n k , t n k ],", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo Data Generator", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "we firstly sample head entities and tail entities as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo Data Generator", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "K s = [h 1 , t 1 , h 2 , t 2 , \u2022 \u2022 \u2022 ; h n , t n ].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo Data Generator", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "The final sequence is generated by sampling from both K s and p(w). When generating each wordT i , a random number generator is used to generate a random number r i uniformly. r i is used to compare with a threshold parameter \u03b1. If r i > \u03b1, T i is sampled with the word distribution p(w), otherwise, it is sampled form the next token in K s . This process can be expressed mathematically as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo Data Generator", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "T i = \uf8f1 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f3 w \u223c p(w) r i > \u03b1 K s [1 + i\u22121 j=1 1(T j \u2208 K s )] otherwise ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo Data Generator", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "in which 1(C) = 1 if condition C is true and 0 otherwise.T j \u2208 K s indicates whether the wordT j is sampled from K s . This pseudo text data is used to solve the cold start problem when training the extractor.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo Data Generator", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Pseudo KB Generator generates pseudo KB triples for each text and form a pseudo supervised training data. This data is used to solve the cold start problem when pre-training the generator. Similar with the work of Freitag and Roy (2018) , for an input sequence T we randomly remove words in the input text with a probability \u03b2 1 and sample new words by sampling words from a distribution with a probability \u03b2 2 . The generated sequenceK is the pseudo KB sequence for each text. Similar to the Pseudo Text Generator, we randomly add some words by sampling from the distribution p(w). We do not use the probability calculated from K t since it may sample some wrong relations or wrong entity names which undermines the performance. Mathematically, it can be expressed as:", |
| "cite_spans": [ |
| { |
| "start": 214, |
| "end": 236, |
| "text": "Freitag and Roy (2018)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo Data Generator", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "K i = \uf8f1 \uf8f4 \uf8f4 \uf8f2 \uf8f4 \uf8f4 \uf8f3 w \u223c p(w) r i < \u03b2 2 T s [1 + i\u22121 j=1 1(K j \u2208 T s )] otherwise ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo Data Generator", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "in which T s = s(T ) and s(\u2022) is a sample function defined as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo Data Generator", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "s(T ) = \uf8f1 \uf8f4 \uf8f2 \uf8f4 \uf8f3 T T = 0 [T 1 ; s(T 2: T )] r < \u03b2 1 , T = 0 s(T 2: T ) otherwise ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo Data Generator", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "where T denotes the length of the sequence T while T 2: T stands for the sub-sequence from the second to the last of T .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Pseudo Data Generator", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "Traditional reinforcement learning for sequence generation calculates a reward for the whole sequence (He et al., 2016; Hoang et al., 2018; Keneshloo et al., 2018) and uses the policy gradient (Sutton et al., 2000) algorithm to optimize the parameters. It suffers from the multiple-triple problem as discussed above. We propose an allocated reinforcement learning method to allocate different rewards for different KB triples and thus alleviate this problem. In the kb2kb process, the RL model is called the Allocated Reinforcement Learning for Generator (ARLG) since it optimizes the parameters in the generator while in the txt2txt process, it is called Allocated Reinforcement Learning for Extractor (ARLE) accordingly. ARLE is shown in Fig. 2 . The main idea is to recover and evaluate the KB triples separately which inherently has the following benefits: 1) Each triple is given a distinct reward as discussed above; 2) Traditional RL is more likely to ignore some triples (e.g., 3rd triple in Fig. 1 ) since it handles several triples at once while our method alleviates such problem by handling triples one by one. It firstly sends the input text", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 119, |
| "text": "(He et al., 2016;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 120, |
| "end": 139, |
| "text": "Hoang et al., 2018;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 140, |
| "end": 163, |
| "text": "Keneshloo et al., 2018)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 193, |
| "end": 214, |
| "text": "(Sutton et al., 2000)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 740, |
| "end": 746, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 1000, |
| "end": 1006, |
| "text": "Fig. 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "T = [t 1 , t 2 , \u2022 \u2022 \u2022 , t nt ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "into the extractor and get the extracted triples:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "K m = E(T ) = [k (1) m , k (2) m , \u2022 \u2022 \u2022 , k (n k ) m ].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "The corresponding probability for each token is denoted as p", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "(i)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "j , in which i denotes the ith triple and j denotes the jth word in the triple. Afterwards, the generator is applied on each triple in K m to recover the corresponding text, which denotes as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "T = [G(k (1) m ), G(k (2) m ), \u2022 \u2022 \u2022 , G(k (n k ) m )] = [t 1 , t 2 , \u2022 \u2022 \u2022 , t n k ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": ". We calculate the reward for each k (i) m as the recall for each corresponding t i referring to T :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "R(k (i) m ) = t i j=1 1(t (j) i \u2208 T ) t i ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "in which t i denotes the length of t i and t", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "(j) i", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "is the jth word in t i . The reward for each sentence in K m is denoted as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "R e = [R(k (1) m ), R(k (2) m ), \u2022 \u2022 \u2022 , R(k (n k ) m )]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": ". Different from the traditional policy gradient algorithm (Sutton et al., 2000) , our RL uses a different reward for each generated triple. The gradient is calculated as:", |
| "cite_spans": [ |
| { |
| "start": 59, |
| "end": 80, |
| "text": "(Sutton et al., 2000)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "\u2207 \u03b8 L ARLE = \u2212E[ n k i=1 R(k (i) m ) k i j=1 \u2207 \u03b8 log p (i) j ].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Since the RL model only guides the model with some reward scores which is only one aspect of the result. It misleads the model into generating some sequences which have a high reward while actually perform worse. To prevent this, we propose to conduct the gradient descent together with the kb2kb process simultaneously in which the extractor is trained with a supervised sequence.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "ARLG is applied in the kb2kb process. The input KB triples is firstly splitted into", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "n k triples K = [k 1 , k 2 , \u2022 \u2022 \u2022 , k n k ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "which is then sent into the generator separately and get the corresponding description sentences:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "T m = [G(k 1 ), G(k 2 ), \u2022 \u2022 \u2022 , G(k n k )] = [t (1) m , t (2) m , \u2022 \u2022 \u2022 , t (n k ) m ].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "The corresponding probability for the jth word in the ith sentence is denoted as p", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "(i)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "j . Afterwards, the text is sent into the extractor to recover the input KB triple for each t", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "(i) m : K = [E(t (1) m ), E(t (2) m ), \u2022 \u2022 \u2022 , E(t (n k ) m )] = [k 1 , k 2 , \u2022 \u2022 \u2022 , k n k ]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": ". We calculate the reward for each", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "t (i)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "m as the precision for each corresponding k i referring to k i in K:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "P (t (i) m ) = k i j=1 1(k (j) i \u2208 k i ) k i ,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "in which k i denotes the total word number count of k i . The reward for each sentence in T m is denoted as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "R g = [P (t (1) m ), P (t (2) m ), \u2022 \u2022 \u2022 , P (t (n k ) m )]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": ". We use RL to maximize the expected reward for each KB triple t (i) m with corresponding reward. The gradient is:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "\u2207 \u03c6 L ARLG = \u2212E[ n k i=1 P (t (i) m ) t i j=1 \u2207 \u03b8 log p (i) j ].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Similar to ARLE, we also train the model with the txt2txt process to give a targeted sequence to guide the training together with the reward score.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Allocated Reinforcement Learning", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "We adopt the WebNLG v2 dataset (Gardent et al., 2017a) 1 . It samples KB triples from DBpedia and annotates corresponding texts by crowdsourcing. In order to show that our model can work under the unsupervised setting, we split the original dataset into two parts, namely the KB part and the text part. We do not assume any alignment between 1 https://gitlab.com/shimorina/webnlg-dataset #triples 1 2 3 4 5 6 7 Total train 7,429 6,717 7,377 6,888 4,982 488 471 34,352 dev 924 842 919 877 632 64 58 4,316 test 931 831 903 838 608 58 55 4,224 Table 1 : Statistics for the dataset. The number of instances with different number of triples are listed.", |
| "cite_spans": [ |
| { |
| "start": 31, |
| "end": 54, |
| "text": "(Gardent et al., 2017a)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 388, |
| "end": 565, |
| "text": "#triples 1 2 3 4 5 6 7 Total train 7,429 6,717 7,377 6,888 4,982 488 471 34,352 dev 924 842 919 877 632 64 58 4,316 test 931 831 903 838 608 58 55 4,224 Table 1", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "KB and text. Table 1 shows the statistics of instances with different number of triples. In this dataset, one sentence can be mapped to at most seven triples. We use the same dev and test set as the original WebNLG. The training set has 34,352 samples in total while the dev set and the test set have 4,316 and 4,224 samples respectively. It can be observed that there are 78.2% sentences mapped with multiple-triple.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 13, |
| "end": 20, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We compare our model against the following baseline methods: PDG uses the Pseudo Data Generator to generate the pseudo data for pre-training both extractor and generator. PDG does not conduct the subsequent dual learning process and thus illustrates the capability of PDG.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Models", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "DL uses the dual learning process proposed in He et al. (2016) ; Zhu et al. (2017) . It is fine-tuned on the PDG model and iterates alternatively between txt2txt and kb2kb processes. Here, we do not use any reinforcement learning component.", |
| "cite_spans": [ |
| { |
| "start": 46, |
| "end": 62, |
| "text": "He et al. (2016)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 65, |
| "end": 82, |
| "text": "Zhu et al. (2017)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Models", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "DL-RL1 uses the dual learning process together with an RL component. It is similar to the dual learning method proposed in He et al. (2016) ; Zhu et al. (2017) . We use the PDG's data to train the weak model. It uses the log-likelihood of the recover process's output sequence as the reward.", |
| "cite_spans": [ |
| { |
| "start": 123, |
| "end": 139, |
| "text": "He et al. (2016)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 142, |
| "end": 159, |
| "text": "Zhu et al. (2017)", |
| "ref_id": "BIBREF38" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Models", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "DL-RL2 follows the settings of Sun et al. (2018) . Different from DL-RL1, this model uses the ROUGE L (Lin, 2004) score of the recovered sequence instead of using the log-likelihood as the reward.", |
| "cite_spans": [ |
| { |
| "start": 31, |
| "end": 48, |
| "text": "Sun et al. (2018)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 102, |
| "end": 113, |
| "text": "(Lin, 2004)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Models", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "SEG is a Supervised Extractor-Generator using the original setting of WebNLG for both generator and extractor. It utilizes all the alignment information between KB and text and thus provides an upper bound for our experiment.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Comparison Models", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We evaluate the performances of the generator and the extractor with several metrics including BLEU (Papineni et al., 2002) , NIST (Dodding- ton, 2002), METEOR (Banerjee and Lavie, 2005) , ROUGE L (Lin, 2004) , and CIDEr (Vedantam et al., 2015) . These metrics are calculated with the evaluation code provided in Novikova et al. (2017) . Moreover, we also evaluate the performance of the extractor with precision, recall, and F1 scores (Manning et al., 2010) . In PDG, we set \u03b1 = 0.8, \u03b2 1 = 0.2, \u03b2 2 = 0.6. We firstly pre-train the extractor and the generator in the PDG model with the data generated by PDG until convergence. All other models are fine-tuned on the PDG model. For the DL model, we train the generator for 5 steps with the txt2txt process and train the extractor with the kb2kb process for another 5 steps with the new generator. We iterate this process 10 times. For all transformers, we set clip norm to 1.0, label smoothing to 0.1, and dropout to 0.3. We use Adam (Kingma and Ba, 2014) as our optimizer and set the learning rate for the extractor to 2e-4 and generator to 5e-4. All hyper-parameters are tuned on the dev dataset with grid search.", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 123, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 160, |
| "end": 186, |
| "text": "(Banerjee and Lavie, 2005)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 197, |
| "end": 208, |
| "text": "(Lin, 2004)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 221, |
| "end": 244, |
| "text": "(Vedantam et al., 2015)", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 313, |
| "end": 335, |
| "text": "Novikova et al. (2017)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 436, |
| "end": 458, |
| "text": "(Manning et al., 2010)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental settings", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The performances of our KB-to-text generator and triple extractor are shown in the left and right of Table 2 respectively. Both generator and extractor of our model outperform all baseline models significantly and consistently. The comparison between our EGD model and the supervised SEG model indicates that our unsupervised EGD model is close to the bound of the supervised methods.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 101, |
| "end": 108, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Compared with the PDG model, our EGD model has a much better performance with the dual learning framework and the ARL component. Moreover, Our EGD model outperforms the DL-RL1 and DL-RL2 model, which indicates that our proposed ARL component can handle the multiple-triple problem between triples and texts. In the traditional RL models, the reward is the same for a whole sequence including all the triples while in our ARL model, the reward is calculated for several subparts of the sequence, which is more accurate and effective. By comparing PDG with SEG, we found that the model trained with our proposed pseudo data generator (PDG)'s output achieves acceptable results. It indicates that using the PDG's output is a feasible alternative to initialize the model and can handle the cold start problem. Ablation Study. We also conduct some ablation studies to show that each component contributes to the final performance. The results are shown at the bottom part of Table 2 . By comparing the model EGD w/o ARLE and EGD w/o ARLG with the EGD model, we can see that both the ARLE and ARLG components are effective to handle the multiple-triple problem and help improve the performance. It is interesting to see that the result of EGD w/o PDG is extremely poor showing the importance of our PDG component. The EGD w/o PDG removes the pre-train stage with the pseudo data generator and conducts the iterations between txt2txt and kb2kb directly. Without PDG, we observe that the models trend to learn some \"own language\" without a good initialization which is incomprehensible to human.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 970, |
| "end": 977, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "The Influence of the KB triples Number. We analyze the influence of the KB triples' number on the performance. The results are shown in Fig. 3 . As expected, the SEG model performs the best over all numbers since it is fully supervised. The PDG model performs the worst since it only uses pseudo data to train. The DL model improves significantly comparing with the PDG model over all numbers, especially in the extractor model. It shows that using dual learning's iteration approach does improve the model of training solely based on PDG's data. Our proposed EGD model outperforms the DL model and the PDG model. This shows that the ARL model does help to give more information to train the model. Nearly all generators' scores decrease as the number increases. This is because if the sequence is long, it has more ways to express those triples which may be different from the gold standard sentence. However, when extracting triples from the text, it only has one correct way and thus the extractor's scores are similar in all lengths.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 136, |
| "end": 142, |
| "text": "Fig. 3", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Error Analysis. We conduct an error analysis experiment for the top 20 mentioned relations in the extractor which is shown in Fig. 4 . We focus on two kinds of errors. The first kind of error is called \"false negative\" which means when extracting, some correct triples are ignored. The second kind of error is called \"false positive\" which means that the extractor generates some incorrect triples that the text does not mention. It can be observed that the \"false negative\" problem is much more severe than the \"false positive\" problem for the PDG model, while the DL model and the EGD model alleviate this problem a lot. This reason is that the pseudo text data is made by sampling entities in KB ignoring relation information. Iterating alternately between txt2txt and kb2 solves the problem since the missing information is supplemented. It can also be observed that when comparing with the DL model, our EGD model mainly solves the \"false positive\" problem. The reason is that the RL can penalize the wrong generated triples but cannot give specific guidance on which missing triples the model should generate.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 126, |
| "end": 132, |
| "text": "Fig. 4", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Comparison with Semi-Supervised Learning. To measure the quality of the initialization via PDG, we compare our PDG method against the semisupervised learning method. We sample labeled data from the original dataset with different ratios to train the models and compare the results with the PDG model. The result is shown in Table 3 . It can be concluded from the result that training the extractor with the PDG's data outperforms training with 10% aligned data and it also outperforms 20% aligned data for the generator. It shows that our PDG component does provide usable data and it can be boosted a lot in the subsequent dual iteration process.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 324, |
| "end": 331, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "Case Study. Table 4 shows a case study for 4 models. For the extractor, the input is \"Virginia DeMarce is the author of 1634 : The Ram Rebellion , which can be found as an e -book .\". For the generator, the input is \"(1634 : The Ram Rebellion, mediaType, E -book) (1634 : The Ram Rebellion, author, Virginia DeMarce)\". It can be observed that for the PDG model, it omits the second triple. It also shows that the PDG model has a severe false negative problem which has been mentioned in the error analysis sub-section. The DL model alleviates this problem but it introduces more triples causing the false positive problem. Our EGD model solves the false positive problem by the RL component. All models make some mistakes in the generation process including the supervised SEG model. The result of the generator shows that it is more difficult to generate a sequence than extracting triples.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 12, |
| "end": 19, |
| "text": "Table 4", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "4.4" |
| }, |
| { |
| "text": "We propose a new challenging task, namely, unsupervised KB-to-text generation. To solve this task, we propose an extractor-generator dual framework which exploits the inverse relationship between the KB-to-text generation task and the auxiliary triple extraction task. To handle the cold start problem and the multiple-triple problem respectively, we propose a novel pseudo data generator and an allocated reinforcement learning component. Experimental results show that our proposed method successfully resolves the observed problems and outperforms all the baseline models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Dbpedia: A nucleus for a web of open data", |
| "authors": [ |
| { |
| "first": "S\u00f6ren", |
| "middle": [], |
| "last": "Auer", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Bizer", |
| "suffix": "" |
| }, |
| { |
| "first": "Georgi", |
| "middle": [], |
| "last": "Kobilarov", |
| "suffix": "" |
| }, |
| { |
| "first": "Jens", |
| "middle": [], |
| "last": "Lehmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Cyganiak", |
| "suffix": "" |
| }, |
| { |
| "first": "Zachary", |
| "middle": [ |
| "G" |
| ], |
| "last": "Ives", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "The Semantic Web, 6th International Semantic Web Conference, 2nd Asian Semantic Web Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "722--735", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S\u00f6ren Auer, Christian Bizer, Georgi Kobilarov, Jens Lehmann, Richard Cyganiak, and Zachary G. Ives. 2007. Dbpedia: A nucleus for a web of open data. In The Semantic Web, 6th International Semantic Web Conference, 2nd Asian Semantic Web Confer- ence, pages 722-735.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Meteor: An automatic metric for mt evaluation with improved correlation with human judgments", |
| "authors": [ |
| { |
| "first": "Satanjeev", |
| "middle": [], |
| "last": "Banerjee", |
| "suffix": "" |
| }, |
| { |
| "first": "Alon", |
| "middle": [], |
| "last": "Lavie", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization", |
| "volume": "", |
| "issue": "", |
| "pages": "65--72", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Satanjeev Banerjee and Alon Lavie. 2005. Meteor: An automatic metric for mt evaluation with improved correlation with human judgments. In Proceedings of the ACL Workshop on Intrinsic and Extrinsic Eval- uation Measures for Machine Translation and/or Summarization, pages 65-72.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Seq\u02c63: Differentiable sequence-to-sequence-to-sequence autoencoder for unsupervised abstractive sentence compression", |
| "authors": [ |
| { |
| "first": "Christos", |
| "middle": [], |
| "last": "Baziotis", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "673--681", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christos Baziotis, Ion Androutsopoulos, Ioannis Kon- stas, and Alexandros Potamianos. 2019. Seq\u02c63: Dif- ferentiable sequence-to-sequence-to-sequence au- toencoder for unsupervised abstractive sentence compression. In Proceedings of the 2019 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long and Short Pa- pers), pages 673-681.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Semantic parsing with dual learning", |
| "authors": [ |
| { |
| "first": "Ruisheng", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Su", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jieyu", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "51--64", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ruisheng Cao, Su Zhu, Chen Liu, Jieyu Li, and Kai Yu. 2019. Semantic parsing with dual learning. In Proceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 51-64, Florence, Italy. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Learning to sportscast: a test of grounded language acquisition", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond J", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mooney", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Proceedings of the 25th international conference on Machine learning", |
| "volume": "", |
| "issue": "", |
| "pages": "128--135", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David L Chen and Raymond J Mooney. 2008. Learn- ing to sportscast: a test of grounded language acqui- sition. In Proceedings of the 25th international con- ference on Machine learning, pages 128-135. ACM.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Entdesc: Entity description generation by exploringknowledge graph", |
| "authors": [ |
| { |
| "first": "Liying", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Dekun", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lidong", |
| "middle": [], |
| "last": "Bing", |
| "suffix": "" |
| }, |
| { |
| "first": "Yan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhanming", |
| "middle": [], |
| "last": "Jie", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Luo", |
| "middle": [], |
| "last": "Si", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Liying Cheng, Dekun Wu, Lidong Bing, Yan Zhang, Zhanming Jie, Wei Lu, and Luo Si. 2020. Ent- desc: Entity description generation by exploring- knowledge graph. In Proceedings of the Conference on Empirical Methods in Natural Language Process- ing.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Word translation without parallel data", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| }, |
| { |
| "first": "Ludovic", |
| "middle": [], |
| "last": "Denoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Herv\u00e9", |
| "middle": [], |
| "last": "J\u00e9gou", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "International Conference on Learning Representations (ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Guillaume Lample, Marc'Aurelio Ranzato, Ludovic Denoyer, and Herv\u00e9 J\u00e9gou. 2018. Word translation without parallel data. In Inter- national Conference on Learning Representations (ICLR).", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Automatic evaluation of machine translation quality using n-gram cooccurrence statistics", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Doddington", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the Second International Conference on Human Language Technology Research", |
| "volume": "", |
| "issue": "", |
| "pages": "138--145", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George Doddington. 2002. Automatic evaluation of machine translation quality using n-gram co- occurrence statistics. In Proceedings of the Second International Conference on Human Language Tech- nology Research, pages 138-145.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Unsupervised natural language generation with denoising autoencoders", |
| "authors": [ |
| { |
| "first": "Markus", |
| "middle": [], |
| "last": "Freitag", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Roy", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "3922--3929", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Markus Freitag and Scott Roy. 2018. Unsupervised natural language generation with denoising autoen- coders. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 3922-3929.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Open domain event text generation", |
| "authors": [ |
| { |
| "first": "Zihao", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lidong", |
| "middle": [], |
| "last": "Bing", |
| "suffix": "" |
| }, |
| { |
| "first": "Wai", |
| "middle": [], |
| "last": "Lam", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Thirty-Fourth AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "7748--7755", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zihao Fu, Lidong Bing, and Wai Lam. 2020a. Open do- main event text generation. In Thirty-Fourth AAAI Conference on Artificial Intelligence, pages 7748- 7755.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Dynamic topic tracker for kb-to-text generation", |
| "authors": [ |
| { |
| "first": "Zihao", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "" |
| }, |
| { |
| "first": "Lidong", |
| "middle": [], |
| "last": "Bing", |
| "suffix": "" |
| }, |
| { |
| "first": "Wai", |
| "middle": [], |
| "last": "Lam", |
| "suffix": "" |
| }, |
| { |
| "first": "Shoaib", |
| "middle": [], |
| "last": "Jameel", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 28th International Conference on Computational Linguistics: Technical Papers (COLING)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zihao Fu, Lidong Bing, Wai Lam, and Shoaib Jameel. 2020b. Dynamic topic tracker for kb-to-text genera- tion. In Proceedings of the 28th International Con- ference on Computational Linguistics: Technical Pa- pers (COLING).", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Partially-aligned data-to-text generation with distant supervision", |
| "authors": [ |
| { |
| "first": "Zihao", |
| "middle": [], |
| "last": "Fu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bei", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Wai", |
| "middle": [], |
| "last": "Lam", |
| "suffix": "" |
| }, |
| { |
| "first": "Lidong", |
| "middle": [], |
| "last": "Bing", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zihao Fu, Bei Shi, Wai Lam, Lidong Bing, and Zhiyuan Liu. 2020c. Partially-aligned data-to-text generation with distant supervision. In Proceedings of the Con- ference on Empirical Methods in Natural Language Processing (EMNLP).", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Creating training corpora for nlg micro-planners", |
| "authors": [ |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Gardent", |
| "suffix": "" |
| }, |
| { |
| "first": "Anastasia", |
| "middle": [], |
| "last": "Shimorina", |
| "suffix": "" |
| }, |
| { |
| "first": "Shashi", |
| "middle": [], |
| "last": "Narayan", |
| "suffix": "" |
| }, |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Perez-Beltrachini", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "179--188", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Claire Gardent, Anastasia Shimorina, Shashi Narayan, and Laura Perez-Beltrachini. 2017a. Creating train- ing corpora for nlg micro-planners. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 179-188.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "The webnlg challenge: Generating text from rdf data", |
| "authors": [ |
| { |
| "first": "Claire", |
| "middle": [], |
| "last": "Gardent", |
| "suffix": "" |
| }, |
| { |
| "first": "Anastasia", |
| "middle": [], |
| "last": "Shimorina", |
| "suffix": "" |
| }, |
| { |
| "first": "Shashi", |
| "middle": [], |
| "last": "Narayan", |
| "suffix": "" |
| }, |
| { |
| "first": "Laura", |
| "middle": [], |
| "last": "Perez-Beltrachini", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 10th International Conference on Natural Language Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "124--133", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Claire Gardent, Anastasia Shimorina, Shashi Narayan, and Laura Perez-Beltrachini. 2017b. The webnlg challenge: Generating text from rdf data. In Pro- ceedings of the 10th International Conference on Natural Language Generation, pages 124-133.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Dual learning for machine translation", |
| "authors": [ |
| { |
| "first": "Di", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Yingce", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Liwei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Nenghai", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tie-Yan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Ying", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "820--828", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Di He, Yingce Xia, Tao Qin, Liwei Wang, Nenghai Yu, Tie-Yan Liu, and Wei-Ying Ma. 2016. Dual learn- ing for machine translation. In Advances in Neural Information Processing Systems, pages 820-828.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Iterative backtranslation for neural machine translation", |
| "authors": [ |
| { |
| "first": "Duy", |
| "middle": [], |
| "last": "Vu Cong", |
| "suffix": "" |
| }, |
| { |
| "first": "Philipp", |
| "middle": [], |
| "last": "Hoang", |
| "suffix": "" |
| }, |
| { |
| "first": "Gholamreza", |
| "middle": [], |
| "last": "Koehn", |
| "suffix": "" |
| }, |
| { |
| "first": "Trevor", |
| "middle": [], |
| "last": "Haffari", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Cohn", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2nd Workshop on Neural Machine Translation and Generation", |
| "volume": "", |
| "issue": "", |
| "pages": "18--24", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vu Cong Duy Hoang, Philipp Koehn, Gholamreza Haffari, and Trevor Cohn. 2018. Iterative back- translation for neural machine translation. In Pro- ceedings of the 2nd Workshop on Neural Machine Translation and Generation, pages 18-24.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Deep reinforcement learning for sequence to sequence models", |
| "authors": [ |
| { |
| "first": "Yaser", |
| "middle": [], |
| "last": "Keneshloo", |
| "suffix": "" |
| }, |
| { |
| "first": "Tian", |
| "middle": [], |
| "last": "Shi", |
| "suffix": "" |
| }, |
| { |
| "first": "Naren", |
| "middle": [], |
| "last": "Ramakrishnan", |
| "suffix": "" |
| }, |
| { |
| "first": "Chandan K", |
| "middle": [], |
| "last": "Reddy", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1805.09461" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yaser Keneshloo, Tian Shi, Naren Ramakrishnan, and Chandan K Reddy. 2018. Deep reinforcement learn- ing for sequence to sequence models. arXiv preprint arXiv:1805.09461.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1412.6980" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Unsupervised machine translation using monolingual corpora only", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Ludovic", |
| "middle": [], |
| "last": "Denoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample, Alexis Conneau, Ludovic Denoyer, and Marc'Aurelio Ranzato. 2018a. Unsupervised machine translation using monolingual corpora only. In International Conference on Learning Represen- tations (ICLR).", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Phrase-based & neural unsupervised machine translation", |
| "authors": [ |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Ludovic", |
| "middle": [], |
| "last": "Denoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Guillaume Lample, Myle Ott, Alexis Conneau, Lu- dovic Denoyer, and Marc'Aurelio Ranzato. 2018b. Phrase-based & neural unsupervised machine trans- lation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing (EMNLP).", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Neural text generation from structured data with application to the biography domain", |
| "authors": [ |
| { |
| "first": "R\u00e9mi", |
| "middle": [], |
| "last": "Lebret", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Grangier", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Auli", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1203--1213", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R\u00e9mi Lebret, David Grangier, and Michael Auli. 2016. Neural text generation from structured data with ap- plication to the biography domain. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1203-1213.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Deep reinforcement learning for dialogue generation", |
| "authors": [ |
| { |
| "first": "Jiwei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Will", |
| "middle": [], |
| "last": "Monroe", |
| "suffix": "" |
| }, |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Michel", |
| "middle": [], |
| "last": "Galley", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianfeng", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1192--1202", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiwei Li, Will Monroe, Alan Ritter, Dan Jurafsky, Michel Galley, and Jianfeng Gao. 2016. Deep rein- forcement learning for dialogue generation. In Pro- ceedings of the 2016 Conference on Empirical Meth- ods in Natural Language Processing, pages 1192- 1202.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Delete, retrieve, generate: a simple approach to sentiment and style transfer", |
| "authors": [ |
| { |
| "first": "Juncen", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Robin", |
| "middle": [], |
| "last": "Jia", |
| "suffix": "" |
| }, |
| { |
| "first": "He", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1865--1874", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Juncen Li, Robin Jia, He He, and Percy Liang. 2018. Delete, retrieve, generate: a simple approach to sen- timent and style transfer. In Proceedings of the Con- ference of the North American Chapter of the Asso- ciation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long Papers), vol- ume 1, pages 1865-1874.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Learning semantic correspondences with less supervision", |
| "authors": [ |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Jordan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "91--99", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Percy Liang, Michael Jordan, and Dan Klein. 2009. Learning semantic correspondences with less super- vision. In Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th In- ternational Joint Conference on Natural Language Processing of the AFNLP, pages 91-99. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Rouge: A package for automatic evaluation of summaries", |
| "authors": [ |
| { |
| "first": "Chin-Yew", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Workshop on Text Summarization Branches Out", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. Workshop on Text Summa- rization Branches Out.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "A dual reinforcement learning framework for unsupervised text style transfer", |
| "authors": [ |
| { |
| "first": "Fuli", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| }, |
| { |
| "first": "Peng", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jie", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Pengcheng", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Baobao", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifang", |
| "middle": [], |
| "last": "Sui", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 28th International Joint Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fuli Luo, Peng Li, Jie Zhou, Pengcheng Yang, Baobao Chang, Zhifang Sui, and Xu Sun. 2019. A dual rein- forcement learning framework for unsupervised text style transfer. In Proceedings of the 28th Interna- tional Joint Conference on Artificial Intelligence, IJ- CAI 2019.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Introduction to information retrieval", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Prabhakar", |
| "middle": [], |
| "last": "Raghavan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Natural Language Engineering", |
| "volume": "16", |
| "issue": "1", |
| "pages": "100--103", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher Manning, Prabhakar Raghavan, and Hin- rich Sch\u00fctze. 2010. Introduction to information re- trieval. Natural Language Engineering, 16(1):100- 103.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "The e2e dataset: New challenges for end-toend generation", |
| "authors": [ |
| { |
| "first": "Jekaterina", |
| "middle": [], |
| "last": "Novikova", |
| "suffix": "" |
| }, |
| { |
| "first": "Ond\u0159ej", |
| "middle": [], |
| "last": "Du\u0161ek", |
| "suffix": "" |
| }, |
| { |
| "first": "Verena", |
| "middle": [], |
| "last": "Rieser", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue", |
| "volume": "", |
| "issue": "", |
| "pages": "201--206", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jekaterina Novikova, Ond\u0159ej Du\u0161ek, and Verena Rieser. 2017. The e2e dataset: New challenges for end-to- end generation. In Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue, pages 201-206.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the Annual meeting on Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic eval- uation of machine translation. In Proceedings of the Annual meeting on Association for Computational Linguistics, pages 311-318.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Improving neural machine translation models with monolingual data", |
| "authors": [ |
| { |
| "first": "Rico", |
| "middle": [], |
| "last": "Sennrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Haddow", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "Birch", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "86--96", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Improving neural machine translation mod- els with monolingual data. In Proceedings of the 54th Annual Meeting of the Association for Compu- tational Linguistics (Volume 1: Long Papers), pages 86-96.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Logician and orator: Learning from the duality between language and knowledge in open domain", |
| "authors": [ |
| { |
| "first": "Mingming", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Ping", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2119--2130", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mingming Sun, Xu Li, and Ping Li. 2018. Logician and orator: Learning from the duality between lan- guage and knowledge in open domain. In Proceed- ings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2119-2130.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Sequence to sequence learning with neural networks", |
| "authors": [ |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3104--3112", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V Le. 2014. Sequence to sequence learning with neural networks. In Advances in Neural Information Processing Sys- tems, pages 3104-3112.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Policy gradient methods for reinforcement learning with function approximation", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Richard", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "A" |
| ], |
| "last": "Sutton", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mcallester", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Satinder", |
| "suffix": "" |
| }, |
| { |
| "first": "Yishay", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mansour", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1057--1063", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard S Sutton, David A McAllester, Satinder P Singh, and Yishay Mansour. 2000. Policy gradient methods for reinforcement learning with function ap- proximation. In Advances in neural information pro- cessing systems, pages 1057-1063.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information pro- cessing systems, pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Cider: Consensus-based image description evaluation", |
| "authors": [ |
| { |
| "first": "Ramakrishna", |
| "middle": [], |
| "last": "Vedantam", |
| "suffix": "" |
| }, |
| { |
| "first": "Lawrence", |
| "middle": [], |
| "last": "Zitnick", |
| "suffix": "" |
| }, |
| { |
| "first": "Devi", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "4566--4575", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. 2015. Cider: Consensus-based image de- scription evaluation. In Proceedings of IEEE Con- ference on Computer Vision and Pattern Recogni- tion, pages 4566-4575.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Challenges in data-to-document generation", |
| "authors": [ |
| { |
| "first": "Sam", |
| "middle": [], |
| "last": "Wiseman", |
| "suffix": "" |
| }, |
| { |
| "first": "Stuart", |
| "middle": [], |
| "last": "Shieber", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Rush", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "2253--2263", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sam Wiseman, Stuart Shieber, and Alexander Rush. 2017. Challenges in data-to-document generation. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2253-2263.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Dualgan: Unsupervised dual learning for image-toimage translation", |
| "authors": [ |
| { |
| "first": "Zili", |
| "middle": [], |
| "last": "Yi", |
| "suffix": "" |
| }, |
| { |
| "first": "Hao", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the IEEE international conference on computer vision", |
| "volume": "", |
| "issue": "", |
| "pages": "2849--2857", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zili Yi, Hao Zhang, Ping Tan, and Minglun Gong. 2017. Dualgan: Unsupervised dual learning for image-to- image translation. In Proceedings of the IEEE in- ternational conference on computer vision, pages 2849-2857.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Seqgan: Sequence generative adversarial nets with policy gradient", |
| "authors": [ |
| { |
| "first": "Lantao", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Weinan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yong", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Thirty-First AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lantao Yu, Weinan Zhang, Jun Wang, and Yong Yu. 2017. Seqgan: Sequence generative adversarial nets with policy gradient. In Thirty-First AAAI Confer- ence on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Unpaired image-to-image translation using cycle-consistent adversarial networks", |
| "authors": [ |
| { |
| "first": "Jun-Yan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Taesung", |
| "middle": [], |
| "last": "Park", |
| "suffix": "" |
| }, |
| { |
| "first": "Phillip", |
| "middle": [], |
| "last": "Isola", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexei", |
| "middle": [ |
| "A" |
| ], |
| "last": "Efros", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the IEEE international conference on computer vision", |
| "volume": "", |
| "issue": "", |
| "pages": "2223--2232", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. 2017. Unpaired image-to-image translation using cycle-consistent adversarial networks. In Pro- ceedings of the IEEE international conference on computer vision, pages 2223-2232.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "t w D x 6 M G + P O e D S e l q M J 4 3 M n A 3 7 A e P 4 A U 8 S X g A = = < / l a t e x i t > The extractor-generator dual (EGD) framework. It contains three processes namely a pre-train process, a kb2kb process and a txt2txt process." |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "The influence of KB triples count. The xaxis represents the KB triples count while the y-axis represents the scores." |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "uris": null, |
| "num": null, |
| "text": "Error analysis for top 20 mentioned relations." |
| }, |
| "TABREF2": { |
| "num": null, |
| "type_str": "table", |
| "text": "Results for generator (left) and extractor (right), which are evaluated with generation metrics. For the extractor, precision, recall, and F1 scores are also calculated at triple's level. The performances of our EGD method without different components and the supervised method SEG are shown in the bottom.", |
| "content": "<table><tr><td>Ratio</td><td colspan=\"2\">Generator BLEU ROUGE L</td><td colspan=\"2\">Extractor BLEU ROUGE L</td></tr><tr><td>0.10</td><td>0.235</td><td>0.439</td><td>0.335</td><td>0.557</td></tr><tr><td>0.15</td><td>0.281</td><td>0.49</td><td>0.655</td><td>0.708</td></tr><tr><td>0.20</td><td>0.308</td><td>0.506</td><td>0.746</td><td>0.757</td></tr><tr><td>0.25</td><td>0.347</td><td>0.524</td><td>0.71</td><td>0.764</td></tr><tr><td>PDG</td><td>0.322</td><td>0.505</td><td>0.489</td><td>0.618</td></tr></table>", |
| "html": null |
| }, |
| "TABREF3": { |
| "num": null, |
| "type_str": "table", |
| "text": "Compare our PDG framework with semisupervised models at different labeling ratios.", |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF4": { |
| "num": null, |
| "type_str": "table", |
| "text": "Case study. The input KB and text are listed in the first row.", |
| "content": "<table/>", |
| "html": null |
| } |
| } |
| } |
| } |