This repository has been archived by the owner on Mar 26, 2020. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathwriteup.bib
300 lines (271 loc) · 21.8 KB
/
writeup.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
@inproceedings{sutskever14sequence,
abstract = {Deep Neural Networks (DNNs) are powerful models that have achieved excellent performance on difficult learning tasks. Although DNNs work well whenever large labeled training sets are available, they cannot be used to map sequences to sequences. In this paper, we present a general end-to-end approach to sequence learning that makes minimal assumptions on the sequence structure. Our method uses a multilayered Long Short-Term Memory (LSTM) to map the input sequence to a vector of a fixed dimensionality, and then another deep LSTM to decode the target sequence from the vector. Our main result is that on an English to French translation task from the WMT'14 dataset, the translations produced by the LSTM achieve a BLEU score of 34.8 on the entire test set, where the LSTM's BLEU score was penalized on out-of-vocabulary words. Additionally, the LSTM did not have difficulty on long sentences. For comparison, a phrase-based SMT system achieves a BLEU score of 33.3 on the same dataset. When we used the LSTM to rerank the 1000 hypotheses produced by the aforementioned SMT system, its BLEU score increases to 36.5, which is close to the previous best result on this task. The LSTM also learned sensible phrase and sentence representations that are sensitive to word order and are relatively invariant to the active and the passive voice. Finally, we found that reversing the order of the words in all source sentences (but not target sentences) improved the LSTM's performance markedly, because doing so introduced many short term dependencies between the source and the target sentence which made the optimization problem easier.},
archivePrefix = {arXiv},
arxivId = {1409.3215},
author = {Sutskever, Ilya and Vinyals, Oriol and Le, Quoc V.},
booktitle = {NIPS},
eprint = {1409.3215},
isbn = {1409.3215},
pages = {9},
pmid = {2079951},
title = {{Sequence to Sequence Learning with Neural Networks}},
url = {http://arxiv.org/abs/1409.3215},
year = {2014}
}
@inproceedings{Bahdanau2015,
abstract = {Neural machine translation is a recently proposed approach to machine transla-tion. Unlike the traditional statistical machine translation, the neural machine translation aims at building a single neural network that can be jointly tuned to maximize the translation performance. The models proposed recently for neu-ral machine translation often belong to a family of encoder–decoders and encode a source sentence into a fixed-length vector from which a decoder generates a translation. In this paper, we conjecture that the use of a fixed-length vector is a bottleneck in improving the performance of this basic encoder–decoder architec-ture, and propose to extend this by allowing a model to automatically (soft-)search for parts of a source sentence that are relevant to predicting a target word, without having to form these parts as a hard segment explicitly. With this new approach, we achieve a translation performance comparable to the existing state-of-the-art phrase-based system on the task of English-to-French translation. Furthermore, qualitative analysis reveals that the (soft-)alignments found by the model agree well with our intuition.},
archivePrefix = {arXiv},
arxivId = {1409.0473},
author = {Bahdanau, Dzmitry and Cho, Kyunghyun and Bengio, Yoshua},
booktitle = {ICLR},
doi = {10.1146/annurev.neuro.26.041002.131047},
eprint = {1409.0473},
isbn = {0147-006X (Print)},
issn = {0147-006X},
keywords = {Neural machine translation is a recently proposed,Unlike the traditional statistical machine transla,a source sentence into a fixed-length vector from,and propose to extend this by allowing a model to,bottleneck in improving the performance of this ba,for parts of a source sentence that are relevant t,having to form these parts as a hard segment expli,machine translation often belong to a family of en,maximize the translation performance. The models p,phrase-based system on the task of English-to-Fren,qualitative analysis reveals that the (soft-)align,the neural machine,translation aims at building a single neural netwo,translation. In this paper,we achieve a translation performance comparable to,we conjecture that the use of a fixed-length vecto,well with our intuition,without},
pages = {1--15},
pmid = {14527267},
title = {{Neural Machine Translation By Jointly Learning To Align and Translate}},
url = {http://arxiv.org/abs/1409.0473 http://arxiv.org/abs/1409.0473v3},
year = {2014}
}
@article{Xu2015,
abstract = {Inspired by recent work in machine translation and object detection, we introduce an attention based model that automatically learns to describe the content of images. We describe how we can train this model in a deterministic manner using standard backpropagation techniques and stochastically by maximizing a variational lower bound. We also show through visualization how the model is able to automatically learn to fix its gaze on salient objects while generating the corresponding words in the output sequence. We validate the use of attention with state-of-the-art performance on three benchmark datasets: Flickr8k, Flickr30k and MS COCO.},
archivePrefix = {arXiv},
arxivId = {1502.03044},
author = {Xu, Kelvin and Ba, Jimmy and Kiros, Ryan and Cho, Kyunghyun and Courville, Aaron and Salakhutdinov, Ruslan and Zemel, Richard and Bengio, Yoshua},
eprint = {1502.03044},
file = {:home/srush/.local/share/data/Mendeley Ltd./Mendeley Desktop/Downloaded/Xu et al. - 2015 - Show, Attend and Tell Neural Image Caption Generation with Visual Attention(2).pdf:pdf},
journal = {ICML},
month = {feb},
title = {{Show, Attend and Tell: Neural Image Caption Generation with Visual Attention}},
url = {http://arxiv.org/abs/1502.03044},
year = {2015}
}
@article{systran,
title={SYSTRAN's Pure Neural Machine Translation System},
author={Josep Crego and Jungi Kim and Jean Senellart},
journal={arXiv preprint arXiv:1602.06023},
year={2016}
}
@InProceedings{Cho2014,
title = {{L}earning {P}hrase {R}epresentations using {RNN} {E}ncoder-{D}ecoder for {S}tatistical {M}achine {T}ranslation},
author = {Kyunghyun Cho and Bart van Merrienboer and Caglar Gulcehre and Dzmitry Bahdanau and Fethi Bougares and Holger Schwenk and Yoshua Bengio},
booktitle = {Proc of EMNLP},
year = {2014}
}
@InProceedings{Luong2015,
title = {{E}ffective {A}pproaches to {A}ttention-based {N}eural {M}achine {T}ranslation},
author = {Minh-Thang Luong and Hieu Pham and Christopher D. Manning},
booktitle = {Proc of EMNLP},
year = {2015}
}
@InProceedings{Luong2015b,
title = {{A}ddressing the {R}are {W}ord {P}roblem in {N}eural {M}achine {T}ranslation},
author = {Minh-Thang Luong and Ilya Sutskever and Quoc Le and Oriol Vinyals and Wojciech Zaremba},
booktitle = {Proc of ACL},
year = {2015}
}
@article{wu2016google,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Wu, Yonghui and Schuster, Mike and Chen, Zhifeng and Le, Quoc V and Norouzi, Mohammad and Macherey, Wolfgang and Krikun, Maxim and Cao, Yuan and Gao, Qin and Macherey, Klaus and others},
journal={arXiv preprint arXiv:1609.08144},
year={2016}
}
@article{johnson2016google,
title={Google's Multilingual Neural Machine Translation System: Enabling Zero-Shot Translation},
author={Johnson, Mike Schuster, Quoc V. Le, Maxim Krikun, Yonghui Wu, Zhifeng Chen, Nikhil Thorat, Fernanda Viégas, Martin Wattenberg, Greg Corrado, Macduff Hughes, Jeffrey Dean},
year={2016}
}
@inproceedings{dean2012large,
title={Large scale distributed deep networks},
author={Dean, Jeffrey and Corrado, Greg and Monga, Rajat and Chen, Kai and Devin, Matthieu and Mao, Mark and Senior, Andrew and Tucker, Paul and Yang, Ke and Le, Quoc V and others},
booktitle={Advances in neural information processing systems},
pages={1223--1231},
year={2012}
}
@inproceedings{koehn2007moses,
title={Moses: Open source toolkit for statistical machine translation},
author={Koehn, Philipp and Hoang, Hieu and Birch, Alexandra and Callison-Burch, Chris and Federico, Marcello and Bertoldi, Nicola and Cowan, Brooke and Shen, Wade and Moran, Christine and Zens, Richard and others},
booktitle={Proc ACL},
pages={177--180},
year={2007},
organization={Association for Computational Linguistics}
}
@inproceedings{dyer2010cdec,
title={cdec: A decoder, alignment, and learning framework for finite-state and context-free translation models},
author={Dyer, Chris and Weese, Jonathan and Setiawan, Hendra and Lopez, Adam and Ture, Ferhan and Eidelman, Vladimir and Ganitkevitch, Juri and Blunsom, Phil and Resnik, Philip},
booktitle={Proceedings of the ACL 2010 System Demonstrations},
pages={7--12},
year={2010},
organization={Association for Computational Linguistics}
}
@article{hochreiter1997long,
title={Long short-term memory},
author={Hochreiter, Sepp and Schmidhuber, J{\"u}rgen},
journal={Neural computation},
volume={9},
number={8},
pages={1735--1780},
year={1997},
publisher={MIT Press}
}
@article{chung2014empirical,
title={Empirical evaluation of gated recurrent neural networks on sequence modeling},
author={Chung, Junyoung and Gulcehre, Caglar and Cho, KyungHyun and Bengio, Yoshua},
journal={arXiv preprint arXiv:1412.3555},
year={2014}
}
@article{sennrich2016linguistic,
title={Linguistic Input Features Improve Neural Machine Translation},
author={Sennrich, Rico and Haddow, Barry},
journal={arXiv preprint arXiv:1606.02892},
year={2016}
}
@inproceedings{yang2016hierarchical,
title={Hierarchical attention networks for document classification},
author={Yang, Zichao and Yang, Diyi and Dyer, Chris and He, Xiaodong and Smola, Alex and Hovy, Eduard},
booktitle={Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies},
year={2016}
}
@article{martins2016softmax,
title={From Softmax to Sparsemax: A Sparse Model of Attention and Multi-Label Classification},
author={Martins, Andr{\'e} FT and Astudillo, Ram{\'o}n Fernandez},
journal={arXiv preprint arXiv:1602.02068},
year={2016}
}
@article{DBLP:journals/corr/LeonardWW15,
author = {Nicholas L{\'{e}}onard and
Sagar Waghmare and
Yang Wang and
Jin{-}Hwa Kim},
title = {rnn : Recurrent Library for Torch},
journal = {CoRR},
volume = {abs/1511.07889},
year = {2015},
url = {http://arxiv.org/abs/1511.07889},
timestamp = {Wed, 23 Dec 2015 08:46:28 +0100},
biburl = {http://dblp.uni-trier.de/rec/bib/journals/corr/LeonardWW15},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
@inproceedings{DBLP:conf/conll/BowmanVVDJB16,
author = {Samuel R. Bowman and
Luke Vilnis and
Oriol Vinyals and
Andrew M. Dai and
Rafal J{\'{o}}zefowicz and
Samy Bengio},
title = {Generating Sentences from a Continuous Space},
booktitle = {Proceedings of the 20th {SIGNLL} Conference on Computational Natural
Language Learning, CoNLL 2016, Berlin, Germany, August 11-12, 2016},
pages = {10--21},
year = {2016},
crossref = {DBLP:conf/conll/2016},
url = {http://aclweb.org/anthology/K/K16/K16-1002.pdf},
timestamp = {Sun, 04 Sep 2016 10:01:12 +0200},
biburl = {http://dblp.uni-trier.de/rec/bib/conf/conll/BowmanVVDJB16},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
@inproceedings{DBLP:conf/nips/VinyalsBLKW16,
author = {Oriol Vinyals and
Charles Blundell and
Tim Lillicrap and
Koray Kavukcuoglu and
Daan Wierstra},
title = {Matching Networks for One Shot Learning},
booktitle = {Advances in Neural Information Processing Systems 29: Annual Conference
on Neural Information Processing Systems 2016, December 5-10, 2016,
Barcelona, Spain},
pages = {3630--3638},
year = {2016},
crossref = {DBLP:conf/nips/2016},
url = {http://papers.nips.cc/paper/6385-matching-networks-for-one-shot-learning},
timestamp = {Fri, 16 Dec 2016 19:45:58 +0100},
biburl = {http://dblp.uni-trier.de/rec/bib/conf/nips/VinyalsBLKW16},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
@article{DBLP:journals/corr/WestonCB14,
author = {Jason Weston and
Sumit Chopra and
Antoine Bordes},
title = {Memory Networks},
journal = {CoRR},
volume = {abs/1410.3916},
year = {2014},
url = {http://arxiv.org/abs/1410.3916},
timestamp = {Sun, 02 Nov 2014 11:25:59 +0100},
biburl = {http://dblp.uni-trier.de/rec/bib/journals/corr/WestonCB14},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
@article{DBLP:journals/corr/XuBKCCSZB15,
author = {Kelvin Xu and
Jimmy Ba and
Ryan Kiros and
Kyunghyun Cho and
Aaron C. Courville and
Ruslan Salakhutdinov and
Richard S. Zemel and
Yoshua Bengio},
title = {Show, Attend and Tell: Neural Image Caption Generation with Visual
Attention},
journal = {CoRR},
volume = {abs/1502.03044},
year = {2015},
url = {http://arxiv.org/abs/1502.03044},
timestamp = {Mon, 02 Mar 2015 14:17:34 +0100},
biburl = {http://dblp.uni-trier.de/rec/bib/journals/corr/XuBKCCSZB15},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
@article{DBLP:journals/corr/DengKR16,
author = {Yuntian Deng and
Anssi Kanervisto and
Alexander M. Rush},
title = {What You Get Is What You See: {A} Visual Markup Decompiler},
journal = {CoRR},
volume = {abs/1609.04938},
year = {2016},
url = {http://arxiv.org/abs/1609.04938},
timestamp = {Mon, 03 Oct 2016 17:51:10 +0200},
biburl = {http://dblp.uni-trier.de/rec/bib/journals/corr/DengKR16},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
@article{DBLP:journals/corr/ChanJLV15,
author = {William Chan and
Navdeep Jaitly and
Quoc V. Le and
Oriol Vinyals},
title = {Listen, Attend and Spell},
journal = {CoRR},
volume = {abs/1508.01211},
year = {2015},
url = {http://arxiv.org/abs/1508.01211},
timestamp = {Tue, 01 Sep 2015 14:42:40 +0200},
biburl = {http://dblp.uni-trier.de/rec/bib/journals/corr/ChanJLV15},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
@article{DBLP:journals/corr/SennrichHB15,
author = {Rico Sennrich and
Barry Haddow and
Alexandra Birch},
title = {Neural Machine Translation of Rare Words with Subword Units},
journal = {CoRR},
volume = {abs/1508.07909},
year = {2015},
url = {http://arxiv.org/abs/1508.07909},
timestamp = {Tue, 01 Sep 2015 14:42:40 +0200},
biburl = {http://dblp.uni-trier.de/rec/bib/journals/corr/SennrichHB15},
bibsource = {dblp computer science bibliography, http://dblp.org}
}
@article{chopra2016abstractive,
title={Abstractive sentence summarization with attentive recurrent neural networks},
author={Chopra, Sumit and Auli, Michael and Rush, Alexander M and Harvard, SEAS},
journal={Proceedings of NAACL-HLT16},
pages={93--98},
year={2016}
}
@article{vinyals2015neural,
title={A neural conversational model},
author={Vinyals, Oriol and Le, Quoc},
journal={arXiv preprint arXiv:1506.05869},
year={2015}
}