Entry Goto:2014:DMB from talip.bib

Last update: Sun Oct 15 02:55:04 MDT 2017                Valid HTML 3.2!

Index sections

Top | Symbols | Numbers | Math | A | B | C | D | E | F | G | H | I | J | K | L | M | N | O | P | Q | R | S | T | U | V | W | X | Y | Z

BibTeX entry

@Article{Goto:2014:DMB,
  author =       "Isao Goto and Masao Utiyama and Eiichiro Sumita and
                 Akihiro Tamura and Sadao Kurohashi",
  title =        "Distortion Model Based on Word Sequence Labeling for
                 Statistical Machine Translation",
  journal =      j-TALIP,
  volume =       "13",
  number =       "1",
  pages =        "2:1--2:??",
  month =        feb,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2537128",
  ISSN =         "1530-0226 (print), 1558-3430 (electronic)",
  ISSN-L =       "1530-0226",
  bibdate =      "Thu Feb 27 12:18:55 MST 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/talip.bib",
  abstract =     "This article proposes a new distortion model for
                 phrase-based statistical machine translation. In
                 decoding, a distortion model estimates the source word
                 position to be translated next (subsequent position;
                 SP) given the last translated source word position
                 (current position; CP). We propose a distortion model
                 that can simultaneously consider the word at the CP,
                 the word at an SP candidate, the context of the CP and
                 an SP candidate, relative word order among the SP
                 candidates, and the words between the CP and an SP
                 candidate. These considered elements are called rich
                 context. Our model considers rich context by
                 discriminating label sequences that specify spans from
                 the CP to each SP candidate. It enables our model to
                 learn the effect of relative word order among SP
                 candidates as well as to learn the effect of distances
                 from the training data. In contrast to the learning
                 strategy of existing methods, our learning strategy is
                 that the model learns preference relations among SP
                 candidates in each sentence of the training data. This
                 leaning strategy enables consideration of all of the
                 rich context simultaneously. In our experiments, our
                 model had higher BLUE and RIBES scores for
                 Japanese-English, Chinese--English, and German-English
                 translation compared to the lexical reordering
                 models.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Asian Language Information
                 Processing",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?&idx=J820",
}

Related entries