Entry Liu:2014:DTL from talip.bib

Last update: Sun Oct 15 02:55:04 MDT 2017                Valid HTML 3.2!

Index sections

Top | Symbols | Numbers | Math | A | B | C | D | E | F | G | H | I | J | K | L | M | N | O | P | Q | R | S | T | U | V | W | X | Y | Z

BibTeX entry

@Article{Liu:2014:DTL,
  author =       "Lemao Liu and Tiejun Zhao and Taro Watanabe and
                 Hailong Cao and Conghui Zhu",
  title =        "Discriminative Training for Log-Linear Based {SMT}:
                 Global or Local Methods",
  journal =      j-TALIP,
  volume =       "13",
  number =       "4",
  pages =        "17:1--17:??",
  month =        dec,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2637478",
  ISSN =         "1530-0226 (print), 1558-3430 (electronic)",
  ISSN-L =       "1530-0226",
  bibdate =      "Wed Jan 7 15:23:49 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/talip.bib",
  abstract =     "In statistical machine translation, the standard
                 methods such as MERT tune a single weight with regard
                 to a given development data. However, these methods
                 suffer from two problems due to the diversity and
                 uneven distribution of source sentences. First, their
                 performance is highly dependent on the choice of a
                 development set, which may lead to an unstable
                 performance for testing. Second, the sentence level
                 translation quality is not assured since tuning is
                 performed on the document level rather than on sentence
                 level. In contrast with the standard global training in
                 which a single weight is learned, we propose novel
                 local training methods to address these two problems.
                 We perform training and testing in one step by locally
                 learning the sentence-wise weight for each input
                 sentence. Since the time of each tuning step is
                 unnegligible and learning sentence-wise weights for the
                 entire test set means many passes of tuning, it is a
                 great challenge for the efficiency of local training.
                 We propose an efficient two-phase method to put the
                 local training into practice by employing the
                 ultraconservative update. On NIST Chinese-to-English
                 translation tasks with both medium and large scales of
                 training data, our local training methods significantly
                 outperform standard methods with the maximal
                 improvements up to 2.0 BLEU points, meanwhile their
                 efficiency is comparable to that of the standard
                 methods.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Asian Language Information
                 Processing",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J820",
}

Related entries