Valid HTML 4.0! Valid CSS!
%%% -*-BibTeX-*-
%%% ====================================================================
%%%  BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.10",
%%%     date            = "29 August 2016",
%%%     time            = "06:42:18 MDT",
%%%     filename        = "teac.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "http://www.math.utah.edu/~beebe",
%%%     checksum        = "59988 4187 23967 219964",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                        beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "bibliography; BibTeX; ACM Transactions on
%%%                        Economics and Computation (TEAC)",
%%%     license         = "public domain",
%%%     supported       = "no",
%%%     docstring       = "This is a COMPLETE BibTeX bibliography for
%%%                        the journal ACM Transactions on Economics and
%%%                        Computation (no CODEN, ISSN 2167-8375
%%%                        (print), 2167-8383 (electronic)), for
%%%                        2013--date.
%%%
%%%                        Publication began with volume 1, number 1,
%%%                        in January 2013.  The journal appears
%%%                        quarterly, in January, May, September, and
%%%                        December.
%%%
%%%                        The journal has a World-Wide Web site at:
%%%
%%%                            http://teac.acm.org/
%%%
%%%                        Tables-of-contents of all issues are
%%%                        available at:
%%%
%%%                            http://dl.acm.org/citation.cfm?id=2542174
%%%
%%%                        Qualified subscribers can retrieve the full
%%%                        text of recent articles in PDF form.
%%%
%%%                        At version 1.10, the COMPLETE journal
%%%                        coverage looked like this:
%%%
%%%                             2013 (  21)    2015 (  32)
%%%                             2014 (  17)    2016 (  19)
%%%
%%%                             Article:         89
%%%
%%%                             Total entries:   89
%%%
%%%                        Spelling has been verified with the UNIX
%%%                        spell and GNU ispell programs using the
%%%                        exception dictionary stored in the
%%%                        companion file with extension .sok.
%%%
%%%                        BibTeX citation tags are uniformly chosen
%%%                        as name:year:abbrev, where name is the
%%%                        family name of the first author or editor,
%%%                        year is a 4-digit number, and abbrev is a
%%%                        3-letter condensation of important title
%%%                        words. Citation tags were automatically
%%%                        generated by software developed for the
%%%                        BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, using ``bibsort -byvolume.''
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility.",
%%%  }
%%% ====================================================================
@Preamble{"\input bibnames.sty"}

%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|http://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:
@String{j-TEAC                  = "ACM Transactions on Economics and
                                  Computation"}

%%% ====================================================================
%%% Publisher abbreviations:
@String{pub-ACM                 = "ACM Press"}
@String{pub-ACM:adr             = "New York, NY 10036, USA"}

%%% ====================================================================
%%% Bibliography entries:
@Article{Conitzer:2013:ATE,
  author =       "Vincent Conitzer and R. Preston Mcafee",
  title =        "The {ACM Transactions on Economics and Computation}:
                 an introduction",
  journal =      j-TEAC,
  volume =       "1",
  number =       "1",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2399187.2399188",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:51 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Gradwohl:2013:SRC,
  author =       "Ronen Gradwohl and Noam Livne and Alon Rosen",
  title =        "Sequential rationality in cryptographic protocols",
  journal =      j-TEAC,
  volume =       "1",
  number =       "1",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2399187.2399189",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:51 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Much of the literature on rational cryptography
                 focuses on analyzing the strategic properties of
                 cryptographic protocols. However, due to the presence
                 of computationally-bounded players and the asymptotic
                 nature of cryptographic security, a definition of
                 sequential rationality for this setting has thus far
                 eluded researchers. We propose a new framework for
                 overcoming these obstacles, and provide the first
                 definitions of computational solution concepts that
                 guarantee sequential rationality. We argue that natural
                 computational variants of subgame perfection are too
                 strong for cryptographic protocols. As an alternative,
                 we introduce a weakening called threat-free Nash
                 equilibrium that is more permissive but still
                 eliminates the undesirable ``empty threats'' of
                 nonsequential solution concepts. To demonstrate the
                 applicability of our framework, we revisit the problem
                 of implementing a mediator for correlated equilibria
                 [Dodis et al 2000], and propose a variant of their
                 protocol that is sequentially rational for a nontrivial
                 class of correlated equilibria. Our treatment provides
                 a better understanding of the conditions under which
                 mediators in a correlated equilibrium can be replaced
                 by a stable protocol.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Jain:2013:GTA,
  author =       "Shaili Jain and David C. Parkes",
  title =        "A game-theoretic analysis of the {ESP} game",
  journal =      j-TEAC,
  volume =       "1",
  number =       "1",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2399187.2399190",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:51 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "``Games with a Purpose'' are interactive games that
                 users play because they are fun, with the added benefit
                 that the outcome of play is useful work. The ESP game,
                 developed by von Ahn and Dabbish [2004], is an example
                 of such a game devised to label images on the web.
                 Since labeling images is a hard problem for computer
                 vision algorithms and can be tedious and time-consuming
                 for humans, the ESP game provides humans with incentive
                 to do useful work by being enjoyable to play. We
                 present a simple game-theoretic model of the ESP game
                 and characterize the equilibrium behavior in our model.
                 Our equilibrium analysis supports the fact that users
                 appear to coordinate on low effort words. We provide an
                 alternate model of user preferences, modeling a change
                 that could be induced through a different scoring
                 method, and show that equilibrium behavior in this
                 model coordinates on high-effort words. We also give
                 sufficient conditions for coordinating on high-effort
                 words to be a Bayesian-Nash equilibrium. Our results
                 suggest the possibility of formal incentive design in
                 achieving desirable system-wide outcomes for the
                 purpose of human computation, complementing existing
                 considerations of robustness against cheating and human
                 factors.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Naroditskiy:2013:OPD,
  author =       "Victor Naroditskiy and Maria Polukarov and Nicholas R.
                 Jennings",
  title =        "Optimal payments in dominant-strategy mechanisms for
                 single-parameter domains",
  journal =      j-TEAC,
  volume =       "1",
  number =       "1",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2399187.2399191",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:51 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study dominant-strategy mechanisms in allocation
                 domains where agents have one-dimensional types and
                 quasilinear utilities. Taking an allocation function as
                 an input, we present an algorithmic technique for
                 finding optimal payments in a class of mechanism design
                 problems, including utilitarian and egalitarian
                 allocation of homogeneous items with nondecreasing
                 marginal costs. Our results link optimality of payment
                 functions to a geometric condition involving
                 triangulations of polytopes. When this condition is
                 satisfied, we constructively show the existence of an
                 optimal payment function that is piecewise linear in
                 agent types.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Feldman:2013:ISI,
  author =       "Michal Feldman and Noam Nisan",
  title =        "Introduction to the {Special Issue on Algorithmic Game
                 Theory}",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "5:1--5:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2465769.2465770",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Blume:2013:NFP,
  author =       "Lawrence Blume and David Easley and Jon Kleinberg and
                 Robert Kleinberg and {\'E}va Tardos",
  title =        "Network Formation in the Presence of Contagious Risk",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "6:1--6:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2465769.2465771",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "There are a number of domains where agents must
                 collectively form a network in the face of the
                 following trade-off: each agent receives benefits from
                 the direct links it forms to others, but these links
                 expose it to the risk of being hit by a cascading
                 failure that might spread over multistep paths.
                 Financial contagion, epidemic disease, the exposure of
                 covert organizations to discovery, and electrical power
                 networks are all settings in which such issues have
                 been articulated. Here we formulate the problem in
                 terms of strategic network formation, and provide
                 asymptotically tight bounds on the welfare of both
                 optimal and stable networks. We find that socially
                 optimal networks are, in a precise sense, situated just
                 beyond a phase transition in the behavior of the
                 cascading failures, and that stable graphs lie slightly
                 further beyond this phase transition, at a point where
                 most of the available welfare has been lost. Our
                 analysis enables us to explore such issues as the
                 trade-offs between clustered and anonymous market
                 structures, and it exposes a fundamental sense in which
                 very small amounts of ``over-linking'' in networks with
                 contagious risk can have strong consequences for the
                 welfare of the participants.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Karlin:2013:SEM,
  author =       "Anna R. Karlin and C. Thach Nguyen and Yuval Peres",
  title =        "Selling in Exclusive Markets: Some Observations on
                 Prior-Free Mechanism Design",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "7:1--7:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2465769.2465772",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider prior-free benchmarks in non-matroid
                 settings. In particular, we show that a very desirable
                 benchmark proposed by Hartline and Roughgarden is too
                 strong, in the sense that no truthful mechanism can
                 compete with it even in a very simple non-matroid
                 setting where there are two exclusive markets and the
                 seller can only sell to agents in one of them. On the
                 other hand, we show that there is a mechanism that
                 competes with a symmetrized version of this benchmark.
                 We further investigate the more traditional best fixed
                 price profit benchmark and show that there are
                 mechanisms that compete with it in any downward-closed
                 settings.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Ha:2013:MDC,
  author =       "Bach Q. Ha and Jason D. Hartline",
  title =        "Mechanism Design via Consensus Estimates, Cross
                 Checking, and Profit Extraction",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "8:1--8:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2465769.2465773",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "There is only one technique for prior-free optimal
                 mechanism design that generalizes beyond the
                 structurally benevolent setting of digital goods. This
                 technique uses random sampling to estimate the
                 distribution of agent values and then employs the
                 Bayesian optimal mechanism for this estimated
                 distribution on the remaining players. Though quite
                 general, even for digital goods, this random sampling
                 auction has a complicated analysis and is known to be
                 suboptimal. To overcome these issues we generalize the
                 consensus and profit extraction techniques from
                 Goldberg and Hartline [2003] to structurally rich
                 environments that include, for example, single-minded
                 combinatorial auctions.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Goldberg:2013:CHM,
  author =       "Paul W. Goldberg and Christos H. Papadimitriou and
                 Rahul Savani",
  title =        "The Complexity of the Homotopy Method, Equilibrium
                 Selection, and {Lemke--Howson} Solutions",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "9:1--9:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2465769.2465774",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We show that the widely used homotopy method for
                 solving fixpoint problems, as well as the
                 Harsanyi-Selten equilibrium selection process for
                 games, are PSPACE-complete to implement. Extending our
                 result for the Harsanyi-Selten process, we show that
                 several other homotopy-based algorithms for finding
                 equilibria of games are also PSPACE-complete to
                 implement. A further application of our techniques
                 yields the result that it is PSPACE-complete to compute
                 any of the equilibria that could be found via the
                 classical Lemke--Howson algorithm, a
                 complexity-theoretic strengthening of the result in
                 Savani and von Stengel [2006]. These results show that
                 our techniques can be widely applied and suggest that
                 the PSPACE-completeness of implementing homotopy
                 methods is a general principle.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Koutsoupias:2013:CRO,
  author =       "Elias Koutsoupias and George Pierrakos",
  title =        "On the Competitive Ratio of Online Sampling Auctions",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "10:1--10:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2465769.2465775",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study online profit-maximizing auctions for digital
                 goods with adversarial bid selection and uniformly
                 random arrivals; in this sense, our model lies at the
                 intersection of prior-free mechanism design and
                 secretary problems. Our goal is to design auctions that
                 are constant competitive with F$^{(2)}$. We give a
                 generic reduction that transforms any offline auction
                 to an online one with only a loss of a factor of 2 in
                 the competitive ratio. We also present some natural
                 auctions, both randomized and deterministic, and study
                 their competitive ratio. Our analysis reveals some
                 interesting connections of one of these auctions with
                 RSOP.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Candogan:2013:NPG,
  author =       "Ozan Candogan and Asuman Ozdaglar and Pablo A.
                 Parrilo",
  title =        "Near-Potential Games: Geometry and Dynamics",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "11:1--11:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2465769.2465776",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Potential games are a special class of games for which
                 many adaptive user dynamics converge to a Nash
                 equilibrium. In this article, we study properties of
                 near-potential games, that is, games that are close in
                 terms of payoffs to potential games, and show that such
                 games admit similar limiting dynamics. We first focus
                 on finite games in strategic form. We introduce a
                 distance notion in the space of games and study the
                 geometry of potential games and sets of games that are
                 equivalent, with respect to various equivalence
                 relations, to potential games. We discuss how, given an
                 arbitrary game, one can find a nearby game in these
                 sets. We then study dynamics in near-potential games by
                 focusing on continuous-time perturbed best response
                 dynamics. We characterize the limiting behavior of this
                 dynamics in terms of the upper contour sets of the
                 potential function of a close potential game and
                 approximate equilibria of the game. Exploiting
                 structural properties of approximate equilibrium sets,
                 we strengthen our result and show that for games that
                 are sufficiently close to a potential game, the
                 sequence of mixed strategies generated by this dynamics
                 converges to a small neighborhood of equilibria whose
                 size is a function of the distance from the set of
                 potential games. In the second part of the article, we
                 study continuous games and show that our approach for
                 characterizing the limiting sets in near-potential
                 games extends to continuous games. In particular, we
                 consider continuous-time best response dynamics and a
                 variant of it (where players update their strategies
                 only if there is at least $ \epsilon $ utility
                 improvement opportunity) in near-potential games where
                 the strategy sets are compact and convex subsets of a
                 Euclidean space. We show that these update rules
                 converge to a neighborhood of equilibria (or the
                 maximizer of the potential function), provided that the
                 potential function of the nearby potential game
                 satisfies some structural properties. Our results
                 generalize the known convergence results for potential
                 games to near-potential games.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Abernethy:2013:EMM,
  author =       "Jacob Abernethy and Yiling Chen and Jennifer Wortman
                 Vaughan",
  title =        "Efficient Market Making via Convex Optimization, and a
                 Connection to Online Learning",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "12:1--12:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2465769.2465777",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We propose a general framework for the design of
                 securities markets over combinatorial or infinite state
                 or outcome spaces. The framework enables the design of
                 computationally efficient markets tailored to an
                 arbitrary, yet relatively small, space of securities
                 with bounded payoff. We prove that any market
                 satisfying a set of intuitive conditions must price
                 securities via a convex cost function, which is
                 constructed via conjugate duality. Rather than deal
                 with an exponentially large or infinite outcome space
                 directly, our framework only requires optimization over
                 a convex hull. By reducing the problem of automated
                 market making to convex optimization, where many
                 efficient algorithms exist, we arrive at a range of new
                 polynomial-time pricing mechanisms for various
                 problems. We demonstrate the advantages of this
                 framework with the design of some particular markets.
                 We also show that by relaxing the convex hull we can
                 gain computational tractability without compromising
                 the market institution's bounded budget. Although our
                 framework was designed with the goal of deriving
                 efficient automated market makers for markets with very
                 large outcome spaces, this framework also provides new
                 insights into the relationship between market design
                 and machine learning, and into the complete market
                 setting. Using our framework, we illustrate the
                 mathematical parallels between cost-function-based
                 markets and online learning and establish a
                 correspondence between cost-function-based markets and
                 market scoring rules for complete markets.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Haghpanah:2013:OAP,
  author =       "Nima Haghpanah and Nicole Immorlica and Vahab Mirrokni
                 and Kamesh Munagala",
  title =        "Optimal Auctions with Positive Network Externalities",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "13:1--13:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2465769.2465778",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the problem of designing auctions in
                 social networks for goods that exhibit single-parameter
                 submodular network externalities in which a bidder's
                 value for an outcome is a fixed private type times a
                 known submodular function of the allocation of his
                 friends. Externalities pose many issues that are hard
                 to address with traditional techniques; our work shows
                 how to resolve these issues in a specific setting of
                 particular interest. We operate in a Bayesian
                 environment and so assume private values are drawn
                 according to known distributions. We prove that the
                 optimal auction is NP-hard to approximate pointwise,
                 and APX-hard on average. Thus we instead design
                 auctions whose revenue approximates that of the optimal
                 auction. Our main result considers step-function
                 externalities in which a bidder's value for an outcome
                 is either zero, or equal to his private type if at
                 least one friend has the good. For these settings, we
                 provide a e / e + 1-approximation. We also give a
                 0.25-approximation auction for general single-parameter
                 submodular network externalities, and discuss
                 optimizing over a class of simple pricing strategies.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Othman:2013:PLS,
  author =       "Abraham Othman and David M. Pennock and Daniel M.
                 Reeves and Tuomas Sandholm",
  title =        "A Practical Liquidity-Sensitive Automated Market
                 Maker",
  journal =      j-TEAC,
  volume =       "1",
  number =       "3",
  pages =        "14:1--14:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2509413.2509414",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:54 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Automated market makers are algorithmic agents that
                 enable participation and information elicitation in
                 electronic markets. They have been widely and
                 successfully applied in artificial-money settings, like
                 some Internet prediction markets. Automated market
                 makers from the literature suffer from two problems
                 that contribute to their impracticality and impair
                 their use beyond artificial-money settings: first, they
                 are unable to adapt to liquidity, so that trades cause
                 prices to move the same amount in both heavily and
                 lightly traded markets, and second, in typical
                 circumstances, they run at a deficit. In this article,
                 we construct a market maker that is both sensitive to
                 liquidity and can run at a profit. Our market maker has
                 bounded loss for any initial level of liquidity and, as
                 the initial level of liquidity approaches zero,
                 worst-case loss approaches zero. For any level of
                 initial liquidity we can establish a boundary in market
                 state space such that, if the market terminates within
                 that boundary, the market maker books a profit
                 regardless of the realized outcome.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Balcan:2013:PU,
  author =       "Maria-Florina Balcan and Avrim Blum and Yishay
                 Mansour",
  title =        "The Price of Uncertainty",
  journal =      j-TEAC,
  volume =       "1",
  number =       "3",
  pages =        "15:1--15:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2509413.2509415",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:54 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In this work, we study the degree to which small
                 fluctuations in costs in well-studied potential games
                 can impact the result of natural best-response and
                 improved-response dynamics. We call this the Price of
                 Uncertainty and study it in a wide variety of potential
                 games including fair cost-sharing games, set-cover
                 games, routing games, and job-scheduling games. We show
                 that in certain cases, even extremely small
                 fluctuations can have the ability to cause these
                 dynamics to spin out of control and move to states of
                 much higher social cost, whereas in other cases these
                 dynamics are much more stable even to large degrees of
                 fluctuation. We also consider the resilience of these
                 dynamics to a small number of Byzantine players about
                 which no assumptions are made. We show again a contrast
                 between different games. In certain cases (e.g., fair
                 cost-sharing, set-cover, job-scheduling) even a single
                 Byzantine player can cause best-response dynamics to
                 transition from low-cost states to states of
                 substantially higher cost, whereas in others (e.g., the
                 class of $ \beta $-nice games, which includes routing,
                 market-sharing and many others) these dynamics are much
                 more resilient. Overall, our work can be viewed as
                 analyzing the inherent resilience or safety of games to
                 different kinds of imperfections in player behavior,
                 player information, or in modeling assumptions made.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Ben-Yehuda:2013:DAE,
  author =       "Orna Agmon Ben-Yehuda and Muli Ben-Yehuda and Assaf
                 Schuster and Dan Tsafrir",
  title =        "Deconstructing {Amazon EC2} Spot Instance Pricing",
  journal =      j-TEAC,
  volume =       "1",
  number =       "3",
  pages =        "16:1--16:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2509413.2509416",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:54 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Cloud providers possessing large quantities of spare
                 capacity must either incentivize clients to purchase it
                 or suffer losses. Amazon is the first cloud provider to
                 address this challenge, by allowing clients to bid on
                 spare capacity and by granting resources to bidders
                 while their bids exceed a periodically changing spot
                 price. Amazon publicizes the spot price but does not
                 disclose how it is determined. By analyzing the spot
                 price histories of Amazon's EC2 cloud, we reverse
                 engineer how prices are set and construct a model that
                 generates prices consistent with existing price traces.
                 Our findings suggest that usually prices are not
                 market-driven, as sometimes previously assumed. Rather,
                 they are likely to be generated most of the time at
                 random from within a tight price range via a dynamic
                 hidden reserve price mechanism. Our model could help
                 clients make informed bids, cloud providers design
                 profitable systems, and researchers design pricing
                 algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Sarne:2013:CSM,
  author =       "David Sarne",
  title =        "Competitive Shopbots-Mediated Markets",
  journal =      j-TEAC,
  volume =       "1",
  number =       "3",
  pages =        "17:1--17:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2509413.2509417",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:54 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "This article considers markets mediated by autonomous
                 self-interested comparison-shopping agents. As in
                 today's markets, the agents do not charge buyers for
                 their services but rather benefit from payments
                 obtained from sellers upon the execution of a
                 transaction. The agents aim at maximizing their
                 expected benefit, taking into consideration the cost
                 incurred by the search and competition dynamics that
                 arise in the multi-agent setting. This article provides
                 a comprehensive analysis of such models, based on
                 search theory principles. The analysis results in a
                 characterization of the buyers' and agents' search
                 strategies in equilibrium. The main result of this
                 article is that the use of self-interested
                 comparison-shopping agents can result in a beneficial
                 equilibrium, where both buyers and sellers benefit, in
                 comparison to the case where buyers control the
                 comparison-shopping agent, and the comparison-shopping
                 agents necessarily do not lose. This, despite the fact
                 that the service is offered for free to buyers and its
                 cost is essentially covered by sellers. The analysis
                 generalizes to any setting where buyers can use
                 self-interested agents capable of effectively
                 performing the search (e.g., evaluating opportunities)
                 on their behalf.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Procaccia:2013:AMD,
  author =       "Ariel D. Procaccia and Moshe Tennenholtz",
  title =        "Approximate Mechanism Design without Money",
  journal =      j-TEAC,
  volume =       "1",
  number =       "4",
  pages =        "18:1--18:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2542174.2542175",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:56 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "The literature on algorithmic mechanism design is
                 mostly concerned with game-theoretic versions of
                 optimization problems to which standard economic
                 money-based mechanisms cannot be applied efficiently.
                 Recent years have seen the design of various truthful
                 approximation mechanisms that rely on payments. In this
                 article, we advocate the reconsideration of highly
                 structured optimization problems in the context of
                 mechanism design. We explicitly argue for the first
                 time that, in such domains, approximation can be
                 leveraged to obtain truthfulness without resorting to
                 payments. This stands in contrast to previous work
                 where payments are almost ubiquitous and (more often
                 than not) approximation is a necessary evil that is
                 required to circumvent computational complexity. We
                 present a case study in approximate mechanism design
                 without money. In our basic setting, agents are located
                 on the real line and the mechanism must select the
                 location of a public facility; the cost of an agent is
                 its distance to the facility. We establish tight upper
                 and lower bounds for the approximation ratio given by
                 strategy-proof mechanisms without payments, with
                 respect to both deterministic and randomized
                 mechanisms, under two objective functions: the social
                 cost and the maximum cost. We then extend our results
                 in two natural directions: a domain where two
                 facilities must be located and a domain where each
                 agent controls multiple locations.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Yildiz:2013:BOD,
  author =       "Ercan Yildiz and Asuman Ozdaglar and Daron Acemoglu
                 and Amin Saberi and Anna Scaglione",
  title =        "Binary Opinion Dynamics with Stubborn Agents",
  journal =      j-TEAC,
  volume =       "1",
  number =       "4",
  pages =        "19:1--19:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2538508",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:56 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study binary opinion dynamics in a social network
                 with stubborn agents who influence others but do not
                 change their opinions. We focus on a generalization of
                 the classical voter model by introducing nodes
                 (stubborn agents) that have a fixed state. We show that
                 the presence of stubborn agents with opposing opinions
                 precludes convergence to consensus; instead, opinions
                 converge in distribution with disagreement and
                 fluctuations. In addition to the first moment of this
                 distribution typically studied in the literature, we
                 study the behavior of the second moment in terms of
                 network properties and the opinions and locations of
                 stubborn agents. We also study the problem of optimal
                 placement of stubborn agents where the location of a
                 fixed number of stubborn agents is chosen to have the
                 maximum impact on the long-run expected opinions of
                 agents.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Mossel:2013:MCT,
  author =       "Elchanan Mossel and Omer Tamuz",
  title =        "Making Consensus Tractable",
  journal =      j-TEAC,
  volume =       "1",
  number =       "4",
  pages =        "20:1--20:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2542174.2542176",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:56 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study a model of consensus decision making in which
                 a finite group of Bayesian agents has to choose between
                 one of two courses of action. Each member of the group
                 has a private and independent signal at his or her
                 disposal, giving some indication as to which action is
                 optimal. To come to a common decision, the participants
                 perform repeated rounds of voting. In each round, each
                 agent casts a vote in favor of one of the two courses
                 of action, reflecting his or her current belief, and
                 observes the votes of the rest. We provide an efficient
                 algorithm for the calculation the agents have to
                 perform and show that consensus is always reached and
                 that the probability of reaching a wrong decision
                 decays exponentially with the number of agents.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Hoefer:2013:AAC,
  author =       "Martin Hoefer and Alexander Skopalik",
  title =        "Altruism in Atomic Congestion Games",
  journal =      j-TEAC,
  volume =       "1",
  number =       "4",
  pages =        "21:1--21:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2542174.2542177",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 14 06:10:56 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "This article studies the effects of altruism, a
                 phenomenon widely observed in practice, in the model of
                 atomic congestion games. Altruistic behavior is modeled
                 by a linear trade-off between selfish and social
                 objectives. Our model can be embedded in the framework
                 of congestion games with player-specific latency
                 functions. Stable states are the pure Nash equilibria
                 of these games, and we examine their existence and the
                 convergence of sequential best-response dynamics. In
                 general, pure Nash equilibria are often absent, and
                 existence is NP-hard to decide. Perhaps surprisingly,
                 if all delay functions are affine, the games remain
                 potential games, even when agents are arbitrarily
                 altruistic. The construction underlying this result can
                 be extended to a class of general potential games and
                 social cost functions, and we study a number of
                 prominent examples. These results give important
                 insights into the robustness of multi-agent systems
                 with heterogeneous altruistic incentives. Furthermore,
                 they yield a general technique to prove that
                 stabilization is robust, even with partly altruistic
                 agents, which is of independent interest. In addition
                 to these results for uncoordinated dynamics, we
                 consider a scenario with a central altruistic
                 institution that can set incentives for the agents. We
                 provide constructive and hardness results for finding
                 the minimum number of altruists to stabilize an optimal
                 congestion profile and more general mechanisms to
                 incentivize agents to adopt favorable behavior. These
                 results are closely related to Stackelberg routing and
                 answer open questions raised recently in the
                 literature.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Polevoy:2014:SCS,
  author =       "Gleb Polevoy and Rann Smorodinsky and Moshe
                 Tennenholtz",
  title =        "Signaling Competition and Social Welfare",
  journal =      j-TEAC,
  volume =       "2",
  number =       "1",
  pages =        "1:1--1:??",
  month =        mar,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2560766",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 21 18:00:43 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider an environment where sellers compete over
                 buyers. All sellers are a-priori identical and
                 strategically signal buyers about the product they
                 sell. In a setting motivated by online advertising in
                 display ad exchanges, where firms use second price
                 auctions, a firm's strategy is a decision about its
                 signaling scheme for a stream of goods (e.g., user
                 impressions), and a buyer's strategy is a selection
                 among the firms. In this setting, a single seller will
                 typically provide partial information, and
                 consequently, a product may be allocated inefficiently.
                 Intuitively, competition among sellers may induce
                 sellers to provide more information in order to attract
                 buyers and thus increase efficiency. Surprisingly, we
                 show that such a competition among firms may yield
                 significant loss in consumers' social welfare with
                 respect to the monopolistic setting. Although we also
                 show that in some cases, the competitive setting yields
                 gain in social welfare, we provide a tight bound on
                 that gain, which is shown to be small with respect to
                 the preceding possible loss. Our model is tightly
                 connected with the literature on bundling in
                 auctions.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Albers:2014:NEN,
  author =       "Susanne Albers and Stefan Eilts and Eyal Even-Dar and
                 Yishay Mansour and Liam Roditty",
  title =        "On {Nash} Equilibria for a Network Creation Game",
  journal =      j-TEAC,
  volume =       "2",
  number =       "1",
  pages =        "2:1--2:??",
  month =        mar,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2560767",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 21 18:00:43 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study a basic network creation game proposed by
                 Fabrikant et al. [2003]. In this game, each player
                 (vertex) can create links (edges) to other players at a
                 cost of \alpha per edge. The goal of every player is to
                 minimize the sum consisting of (a) the cost of the
                 links he has created and (b) the sum of the distances
                 to all other players. Fabrikant et al. conjectured that
                 there exists a constant $A$ such that, for any $\alpha
                 > A$, all nontransient Nash equilibria graphs are
                 trees. They showed that if a Nash equilibrium is a
                 tree, the price of anarchy is constant. In this
                 article, we disprove the tree conjecture. More
                 precisely, we show that for any positive integer $n_0$,
                 there exists a graph built by $n \geq n_0$ players
                 which contains cycles and forms a nontransient Nash
                 equilibrium, for any $\alpha$ with $1 < \alpha \leq
                 \sqrt n / 2$. Our construction makes use of some
                 interesting results on finite affine planes. On the
                 other hand, we show that, for $\alpha \geq 12 n \lceil
                 log n \rceil$, every Nash equilibrium forms a
                 tree. Without relying on the tree conjecture, Fabrikant
                 et al. proved an upper bound on the price of anarchy of
                 $O(\sqrt{\alpha})$, where $\alpha \in [2, n^2]$. We
                 improve this bound. Specifically, we derive a constant
                 upper bound for $\alpha \in O(\sqrt{n})$ and for
                 $\alpha \geq 12 n \lceil log n \rceil$. For the
                 intermediate values, we derive an improved bound of
                 $O(1 + (\min\{\alpha^2 / n, n^2 / \alpha \})^{1 /
                 3})$. Additionally, we develop characterizations of
                 Nash equilibria and extend our results to a weighted
                 network creation game as well as to scenarios with cost
                 sharing.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Smeulders:2014:GFM,
  author =       "Bart Smeulders and Frits C. R. Spieksma and Laurens
                 Cherchye and Bram {De Rock}",
  title =        "Goodness-of-Fit Measures for Revealed Preference
                 Tests: Complexity Results and Algorithms",
  journal =      j-TEAC,
  volume =       "2",
  number =       "1",
  pages =        "3:1--3:??",
  month =        mar,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2560793",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 21 18:00:43 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We provide results on the computational complexity of
                 goodness-of-fit measures (i.e., Afriat's efficiency
                 index, Varian's efficiency vector-index, and the
                 Houtman-Maks index) associated with several revealed
                 preference axioms (i.e., WARP, SARP, GARP, and
                 HARP). These results explain the computational
                 difficulties that have been observed in literature when
                 computing these indices. Our NP-hardness results are
                 obtained by reductions from the independent set
                 problem. We also show that this reduction can be used
                 to prove that no approximation algorithm achieving a
                 ratio of $O(n^{1 - \delta})$, $\delta;> 0$ exists for
                 Varian's index, nor for Houtman-Maks' index (unless P =
                 NP). Finally, we give an exact polynomial-time
                 algorithm for finding Afriat's efficiency index.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Zhang:2014:RPO,
  author =       "Yu Zhang and Jaeok Park and Mihaela van der Schaar",
  title =        "Rating Protocols in Online Communities",
  journal =      j-TEAC,
  volume =       "2",
  number =       "1",
  pages =        "4:1--4:??",
  month =        mar,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2560794",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 21 18:00:43 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Sustaining cooperation among self-interested agents is
                 critical for the proliferation of emerging online
                 communities. Providing incentives for cooperation in
                 online communities is particularly challenging because
                 of their unique features: a large population of
                 anonymous agents having asymmetric interests and
                 dynamically joining and leaving the community,
                 operation errors, and agents trying to whitewash when
                 they have a low standing in the community. In this
                 article, we take these features into consideration and
                 propose a framework for designing and analyzing a class
                 of incentive schemes based on rating protocols, which
                 consist of a rating scheme and a recommended strategy.
                 We first define the concept of sustainable rating
                 protocols under which every agent has the incentive to
                 follow the recommended strategy given the deployed
                 rating scheme. We then formulate the problem of
                 designing an optimal rating protocol, which selects the
                 protocol that maximizes the overall social welfare
                 among all sustainable rating protocols. Using the
                 proposed framework, we study the structure of optimal
                 rating protocols and explore the impact of one-sided
                 rating, punishment lengths, and whitewashing on optimal
                 rating protocols. Our results show that optimal rating
                 protocols are capable of sustaining cooperation, with
                 the amount of cooperation varying depending on the
                 community characteristics.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Emek:2014:SSR,
  author =       "Yuval Emek and Michal Feldman and Iftah Gamzu and
                 Renato PaesLeme and Moshe Tennenholtz",
  title =        "Signaling Schemes for Revenue Maximization",
  journal =      j-TEAC,
  volume =       "2",
  number =       "2",
  pages =        "5:1--5:??",
  month =        jun,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2594564",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Mon Jun 9 16:42:02 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Signaling is an important topic in the study of
                 asymmetric information in economic settings. In
                 particular, the transparency of information available
                 to a seller in an auction setting is a question of
                 major interest. We introduce the study of signaling
                 when conducting a second price auction of a
                 probabilistic good whose actual instantiation is known
                 to the auctioneer but not to the bidders. This
                 framework can be used to model impressions selling in
                 display advertising. We establish several results
                 within this framework. First, we study the problem of
                 computing a signaling scheme that maximizes the
                 auctioneer's revenue in a Bayesian setting. We show
                 that this problem is polynomially solvable for some
                 interesting special cases, but computationally hard in
                 general. Second, we establish a tight bound on the
                 minimum number of signals required to implement an
                 optimal signaling scheme. Finally, we show that at
                 least half of the maximum social welfare can be
                 preserved within such a scheme.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Chen:2014:EPR,
  author =       "Yiling Chen and Ian A. Kash and Michael Ruberry and
                 Victor Shnayder",
  title =        "Eliciting Predictions and Recommendations for Decision
                 Making",
  journal =      j-TEAC,
  volume =       "2",
  number =       "2",
  pages =        "6:1--6:??",
  month =        jun,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2556271",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Mon Jun 9 16:42:02 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "When making a decision, a decision maker selects one
                 of several possible actions and hopes to achieve a
                 desirable outcome. To make a better decision, the
                 decision maker often asks experts for advice. In this
                 article, we consider two methods of acquiring advice
                 for decision making. We begin with a method where one
                 or more experts predict the effect of each action and
                 the decision maker then selects an action based on the
                 predictions. We characterize strictly proper decision
                 making, where experts have an incentive to accurately
                 reveal their beliefs about the outcome of each action.
                 However, strictly proper decision making requires the
                 decision maker use a completely mixed strategy to
                 choose an action. To address this limitation, we
                 consider a second method where the decision maker asks
                 a single expert to recommend an action. We show that it
                 is possible to elicit the decision maker's most
                 preferred action for a broad class of preferences of
                 the decision maker, including when the decision maker
                 is an expected value maximizer.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Rozen:2014:EPE,
  author =       "Rakefet Rozen and Rann Smorodinsky",
  title =        "Ex-Post Equilibrium and {VCG} Mechanisms",
  journal =      j-TEAC,
  volume =       "2",
  number =       "2",
  pages =        "7:1--7:??",
  month =        jun,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2594565",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Mon Jun 9 16:42:02 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Consider an abstract social choice setting with
                 incomplete information, where the number of
                 alternatives is large. Albeit natural, implementing VCG
                 mechanisms is infeasible due to the prohibitive
                 communication constraints. However, if players restrict
                 attention to a subset of the alternatives, feasibility
                 may be recovered. This article characterizes the class
                 of subsets that induce an ex-post equilibrium in the
                 original game. It turns out that a crucial condition
                 for such subsets to exist is the availability of a
                 type-independent optimal social alternative for each
                 player. We further analyze the welfare implications of
                 these restrictions. This work follows that of Holzman
                 et al. [2004] and Holzman and Monderer [2004] where
                 similar analysis is done for combinatorial auctions.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Chen:2014:PAS,
  author =       "Xujin Chen and Benjamin Doerr and Carola Doerr and
                 Xiaodong Hu and Weidong Ma and Rob van Stee",
  title =        "The Price of Anarchy for Selfish Ring Routing is Two",
  journal =      j-TEAC,
  volume =       "2",
  number =       "2",
  pages =        "8:1--8:??",
  month =        jun,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2548545",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Mon Jun 9 16:42:02 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We analyze the network congestion game with atomic
                 players, asymmetric strategies, and the maximum latency
                 among all players as social cost. This important social
                 cost function is much less understood than the average
                 latency. We show that the price of anarchy is at most
                 two, when the network is a ring and the link latencies
                 are linear. Our bound is tight. This is the first sharp
                 bound for the maximum latency objective.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Cary:2014:CPA,
  author =       "Matthew Cary and Aparna Das and Benjamin Edelman and
                 Ioannis Giotis and Kurtis Heimerl and Anna R. Karlin
                 and Scott Duke Kominers and Claire Mathieu and Michael
                 Schwarz",
  title =        "Convergence of Position Auctions under Myopic
                 Best-Response Dynamics",
  journal =      j-TEAC,
  volume =       "2",
  number =       "3",
  pages =        "9:1--9:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2632226",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Oct 17 12:45:12 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the dynamics of multiround position auctions,
                 considering both the case of exogenous click-through
                 rates and the case in which click-through rates are
                 determined by an endogenous consumer search process. In
                 both contexts, we demonstrate that dynamic position
                 auctions converge to their associated static, envy-free
                 equilibria. Furthermore, convergence is efficient, and
                 the entry of low-quality advertisers does not slow
                 convergence. Because our approach predominantly relies
                 on assumptions common in the sponsored search
                 literature, our results suggest that dynamic position
                 auctions converge more generally.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Azar:2014:QCS,
  author =       "Pablo Daniel Azar and Silvio Micali",
  title =        "The Query Complexity of Scoring Rules",
  journal =      j-TEAC,
  volume =       "2",
  number =       "3",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2632228",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Oct 17 12:45:12 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Proper scoring rules are crucial tools to elicit
                 truthful information from experts. A scoring rule maps
                 X, an expert-provided distribution over the set of all
                 possible states of the world, and $ \omega $, a
                 realized state of the world, to a real number
                 representing the expert's reward for his provided
                 information. To compute this reward, a scoring rule
                 queries the distribution X at various states. The
                 number of these queries is thus a natural measure of
                 the complexity of the scoring rule. We prove that any
                 bounded and strictly proper scoring rule that is
                 deterministic must make a number of queries to its
                 input distribution that is a quarter of the number of
                 states of the world. When the state space is very
                 large, this makes the computation of such scoring rules
                 impractical. We also show a new stochastic scoring rule
                 that is bounded, strictly proper, and which makes only
                 two queries to its input distribution. Thus, using
                 randomness allows us to have significant savings when
                 computing scoring rules.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Alaei:2014:RSA,
  author =       "Saeed Alaei and Azarakhsh Malekian and Aravind
                 Srinivasan",
  title =        "On Random Sampling Auctions for Digital Goods",
  journal =      j-TEAC,
  volume =       "2",
  number =       "3",
  pages =        "11:1--11:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2517148",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Oct 17 12:45:12 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In the context of auctions for digital goods, an
                 interesting random sampling auction has been proposed
                 by Goldberg et al. [2001]. This auction has been
                 analyzed by Feige et al. [2005], who have shown that it
                 obtains in expectation at least 1/15 fraction of the
                 optimal revenue, which is substantially better than the
                 previously proven constant bounds but still far from
                 the conjectured lower bound of 1/4. In this article, we
                 prove that the aforementioned random sampling auction
                 obtains at least 1/4 fraction of the optimal revenue
                 for a large class of instances where the number of bids
                 above (or equal to) the optimal sale price is at least
                 6. We also show that this auction obtains at least
                 1/4.68 fraction of the optimal revenue for the small
                 class of remaining instances, thus leaving a negligible
                 gap between the lower and upper bound. We employ a mix
                 of probabilistic techniques and dynamic programming to
                 compute these bounds.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Dandekar:2014:PAR,
  author =       "Pranav Dandekar and Nadia Fawaz and Stratis
                 Ioannidis",
  title =        "Privacy Auctions for Recommender Systems",
  journal =      j-TEAC,
  volume =       "2",
  number =       "3",
  pages =        "12:1--12:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2629665",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Oct 17 12:45:12 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study a market for private data in which a data
                 analyst publicly releases a statistic over a database
                 of private information. Individuals that own the data
                 incur a cost for their loss of privacy proportional to
                 the differential privacy guarantee given by the analyst
                 at the time of the release. The analyst incentivizes
                 individuals by compensating them, giving rise to a
                 privacy auction. Motivated by recommender systems, the
                 statistic we consider is a linear predictor function
                 with publicly known weights. The statistic can be
                 viewed as a prediction of the unknown data of a new
                 individual, based on the data of individuals in the
                 database. We formalize the trade-off between privacy
                 and accuracy in this setting, and show that a simple
                 class of estimates achieves an order-optimal trade-off.
                 It thus suffices to focus on auction mechanisms that
                 output such estimates. We use this observation to
                 design a truthful, individually rational,
                 proportional-purchase mechanism under a fixed budget
                 constraint. We show that our mechanism is 5-approximate
                 in terms of accuracy compared to the optimal mechanism,
                 and that no truthful mechanism can achieve a $ 2 -
                 \epsilon $ approximation, for any $\epsilon > 0$.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Balcan:2014:NOC,
  author =       "Maria-Florina Balcan and Sara Krehbiel and Georgios
                 Piliouras and Jinwoo Shin",
  title =        "Near-Optimality in Covering Games by Exposing Global
                 Information",
  journal =      j-TEAC,
  volume =       "2",
  number =       "4",
  pages =        "13:1--13:??",
  month =        oct,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2597890",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Tue Oct 28 16:50:26 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Mechanism design for distributed systems is
                 fundamentally concerned with aligning individual
                 incentives with social welfare to avoid socially
                 inefficient outcomes that can arise from agents acting
                 autonomously. One simple and natural approach is to
                 centrally broadcast nonbinding advice intended to guide
                 the system to a socially near-optimal state while still
                 harnessing the incentives of individual agents. The
                 analytical challenge is proving fast convergence to
                 near optimal states, and in this article we give the
                 first results that carefully constructed advice vectors
                 yield stronger guarantees. We apply this approach to a
                 broad family of potential games modeling vertex cover
                 and set cover optimization problems in a distributed
                 setting. This class of problems is interesting because
                 finding exact solutions to their optimization problems
                 is NP-hard yet highly inefficient equilibria exist, so
                 a solution in which agents simply locally optimize is
                 not satisfactory. We show that with an arbitrary advice
                 vector, a set cover game quickly converges to an
                 equilibrium with cost of the same order as the square
                 of the social cost of the advice vector. More
                 interestingly, we show how to efficiently construct an
                 advice vector with a particular structure with cost O
                 (log n ) times the optimal social cost, and we prove
                 that the system quickly converges to an equilibrium
                 with social cost of this same order.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Bhawalkar:2014:WCG,
  author =       "Kshipra Bhawalkar and Martin Gairing and Tim
                 Roughgarden",
  title =        "Weighted Congestion Games: The Price of Anarchy,
                 Universal Worst-Case Examples, and Tightness",
  journal =      j-TEAC,
  volume =       "2",
  number =       "4",
  pages =        "14:1--14:??",
  month =        oct,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2629666",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Tue Oct 28 16:50:26 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We characterize the Price of Anarchy (POA) in weighted
                 congestion games, as a function of the allowable
                 resource cost functions. Our results provide as
                 thorough an understanding of this quantity as is
                 already known for nonatomic and unweighted congestion
                 games, and take the form of universal (cost
                 function-independent) worst-case examples. One
                 noteworthy by-product of our proofs is the fact that
                 weighted congestion games are ``tight,'' which implies
                 that the worst-case price of anarchy with respect to
                 pure Nash equilibria, mixed Nash equilibria, correlated
                 equilibria, and coarse correlated equilibria are always
                 equal (under mild conditions on the allowable cost
                 functions). Another is the fact that, like nonatomic
                 but unlike atomic (unweighted) congestion games,
                 weighted congestion games with trivial structure
                 already realize the worst-case POA, at least for
                 polynomial cost functions. We also prove a new result
                 about unweighted congestion games: the worst-case price
                 of anarchy in symmetric games is as large as in their
                 more general asymmetric counterparts.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Fotakis:2014:PDM,
  author =       "Dimitris Fotakis and Christos Tzamos",
  title =        "On the Power of Deterministic Mechanisms for Facility
                 Location Games",
  journal =      j-TEAC,
  volume =       "2",
  number =       "4",
  pages =        "15:1--15:??",
  month =        oct,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2665005",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Tue Oct 28 16:50:26 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider $K$-Facility Location games, where n
                 strategic agents report their locations in a metric
                 space and a mechanism maps them to $K$ facilities. The
                 agents seek to minimize their connection cost, namely
                 the distance of their true location to the nearest
                 facility, and may misreport their location. We are
                 interested in deterministic mechanisms that are
                 strategyproof, that is, ensure that no agent can
                 benefit from misreporting her location, do not resort
                 to monetary transfers, and achieve a bounded
                 approximation ratio to the total connection cost of the
                 agents (or to the $ L_p $ norm of the connection costs,
                 for some $ p \in [1, \infty) $ or for $ p = \infty) $.
                 Our main result is an elegant characterization of
                 deterministic strategyproof mechanisms with a bounded
                 approximation ratio for $2$-Facility Location on the
                 line. In particular, we show that for instances with $
                 n \geq 5 $ agents, any such mechanism either admits a
                 unique dictator or always places the facilities at the
                 leftmost and the rightmost location of the instance. As
                 a corollary, we obtain that the best approximation
                 ratio achievable by deterministic strategyproof
                 mechanisms for the problem of locating $2$ facilities
                 on the line to minimize the total connection cost is
                 precisely $ n - 2$. Another rather surprising
                 consequence is that the Two-Extremes mechanism of
                 Procaccia and Tennenholtz [2009] is the only
                 deterministic anonymous strategyproof mechanism with a
                 bounded approximation ratio for $2$-Facility Location
                 on the line. The proof of the characterization employs
                 several new ideas and technical tools, which provide
                 new insights into the behavior of deterministic
                 strategyproof mechanisms for $K$-Facility Location
                 games and may be of independent interest. Employing one
                 of these tools, we show that for every $ K \geq 3$,
                 there do not exist any deterministic anonymous
                 strategyproof mechanisms with a bounded approximation
                 ratio for $K$-Facility Location on the line, even for
                 simple instances with $ K + 1$ agents. Moreover,
                 building on the characterization for the line, we show
                 that there do not exist any deterministic strategy
                 proof mechanisms with a bounded approximation ratio for
                 $2$-Facility Location and instances with $ n \geq 3$
                 agents located in a star.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Michalak:2014:ICV,
  author =       "Tomasz P. Michalak and Piotr L. Szczepa{\'n}ski and
                 Talal Rahwan and Agata Chrobak and Simina Br{\^a}nzei
                 and Michael Wooldridge and Nicholas R. Jennings",
  title =        "Implementation and Computation of a Value for
                 Generalized Characteristic Function Games",
  journal =      j-TEAC,
  volume =       "2",
  number =       "4",
  pages =        "16:1--16:??",
  month =        oct,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2665007",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Tue Oct 28 16:50:26 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Generalized characteristic function games are a
                 variation of characteristic function games, in which
                 the value of a coalition depends not only on the
                 identities of its members, but also on the order in
                 which the coalition is formed. This class of games is a
                 useful abstraction for a number of realistic settings
                 and economic situations, such as modeling relationships
                 in social networks. To date, two main extensions of the
                 Shapley value have been proposed for generalized
                 characteristic function games: the Nowak--Radzik [1994]
                 value and the S{\'a}nchez--Berganti{\~n}os [1997]
                 value. In this context, the present article studies
                 generalized characteristic function games from the
                 point of view of implementation and computation.
                 Specifically, the article makes two key contributions.
                 First, building upon the mechanism by Dasgupta and Chiu
                 [1998], we present a non-cooperative mechanism that
                 implements both the Nowak--Radzik value and the
                 S{\'a}nchez-Berganti{\~n}os value in Subgame-Perfect
                 Nash Equilibria in expectations. Second, in order to
                 facilitate an efficient computation supporting the
                 implementation mechanism, we propose the Generalized
                 Marginal-Contribution Nets representation for this type
                 of game. This representation extends the results of
                 Ieong and Shoham [2005] and Elkind et al. [2009] for
                 characteristic function games and retains their
                 attractive computational properties.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Chen:2014:AIP,
  author =       "Po-An Chen and Bart {De Keijzer} and David Kempe and
                 Guido Sch{\"a}fer",
  title =        "Altruism and Its Impact on the Price of Anarchy",
  journal =      j-TEAC,
  volume =       "2",
  number =       "4",
  pages =        "17:1--17:??",
  month =        oct,
  year =         "2014",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2597893",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Tue Oct 28 16:50:26 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the inefficiency of equilibria for congestion
                 games when players are (partially) altruistic. We model
                 altruistic behavior by assuming that player $i$'s
                 perceived cost is a convex combination of $\alpha_i$
                 times his direct cost and $\alpha_i$ times the social
                 cost. Tuning the parameters $\alpha_i$ allows smooth
                 interpolation between purely selfish and purely
                 altruistic behavior. Within this framework, we study
                 primarily altruistic extensions of (atomic and
                 nonatomic) congestion games, but also obtain some
                 results on fair cost-sharing games and valid utility
                 games. We derive (tight) bounds on the price of anarchy
                 of these games for several solution concepts. Thereto,
                 we suitably adapt the smoothness notion introduced by
                 Roughgarden and show that it captures the essential
                 properties to determine the robust price of anarchy of
                 these games. Our bounds show that for atomic congestion
                 games and cost-sharing games, the robust price of
                 anarchy gets worse with increasing altruism, while for
                 valid utility games, it remains constant and is not
                 affected by altruism. However, the increase in the
                 price of anarchy is not a universal phenomenon: For
                 general nonatomic congestion games with uniform
                 altruism, the price of anarchy improves with increasing
                 altruism. For atomic and nonatomic symmetric singleton
                 congestion games, we derive bounds on the pure price of
                 anarchy that improve as the average level of altruism
                 increases. (For atomic games, we only derive such
                 bounds when cost functions are linear.) Since the
                 bounds are also strictly lower than the robust price of
                 anarchy, these games exhibit natural examples in which
                 pure Nash equilibria are more efficient than more
                 permissive notions of equilibrium.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Leyton-Brown:2015:ISI,
  author =       "Kevin Leyton-Brown and Panos Ipeirotis",
  title =        "Introduction to the Special Issue on {EC'12}",
  journal =      j-TEAC,
  volume =       "3",
  number =       "1",
  pages =        "1:1--1:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2742678",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 27 17:58:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Caragiannis:2015:APN,
  author =       "Ioannis Caragiannis and Angelo Fanelli and Nick Gravin
                 and Alexander Skopalik",
  title =        "Approximate Pure {Nash} Equilibria in Weighted
                 Congestion Games: Existence, Efficient Computation, and
                 Structure",
  journal =      j-TEAC,
  volume =       "3",
  number =       "1",
  pages =        "2:1--2:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2614687",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 27 17:58:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider structural and algorithmic questions
                 related to the Nash dynamics of weighted congestion
                 games. In weighted congestion games with linear latency
                 functions, the existence of pure Nash equilibria is
                 guaranteed by a potential function argument.
                 Unfortunately, this proof of existence is inefficient
                 and computing pure Nash equilibria in such games is a
                 PLS-hard problem even when all players have unit
                 weights. The situation gets worse when superlinear
                 (e.g., quadratic) latency functions come into play; in
                 this case, the Nash dynamics of the game may contain
                 cycles and pure Nash equilibria may not even exist.
                 Given these obstacles, we consider approximate pure
                 Nash equilibria as alternative solution concepts. A $
                 \rho $-approximate pure Nash equilibrium is a state of
                 a (weighted congestion) game from which no player has
                 any incentive to deviate in order to improve her cost
                 by a multiplicative factor higher than $ \rho $. Do
                 such equilibria exist for small values of $ \rho $ ?
                 And if so, can we compute them efficiently? We provide
                 positive answers to both questions for weighted
                 congestion games with polynomial latency functions by
                 exploiting an ``approximation'' of such games by a new
                 class of potential games that we call $ \Psi $-games.
                 This allows us to show that these games have $
                 d!$-approximate pure Nash equilibria, where $d$ is the
                 maximum degree of the latency functions. Our main
                 technical contribution is an efficient algorithm for
                 computing O(1)-approximate pure Nash equilibria when
                 $d$ is a constant. For games with linear latency
                 functions, the approximation guarantee is $ 3 + \sqrt 5
                 / 2 + O(\gamma)$ for arbitrarily small $ \gamma > 0$;
                 for latency functions with maximum degree $ d \geq 2$,
                 it is $ d 2 d + o (d)$. The running time is polynomial
                 in the number of bits in the representation of the game
                 and $ 1 / \gamma $. As a byproduct of our techniques,
                 we also show the following interesting structural
                 statement for weighted congestion games with polynomial
                 latency functions of maximum degree $ d \geq 2 $:
                 polynomially-long sequences of best-response moves from
                 any initial state to a $ d O (d 2)$-approximate pure
                 Nash equilibrium exist and can be efficiently
                 identified in such games as long as $d$ is a constant.
                 To the best of our knowledge, these are the first
                 positive algorithmic results for approximate pure Nash
                 equilibria in weighted congestion games. Our techniques
                 significantly extend our recent work on unweighted
                 congestion games through the use of $ \Psi $-games. The
                 concept of approximating nonpotential games by
                 potential ones is interesting in itself and might have
                 further applications.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Parkes:2015:BDR,
  author =       "David C. Parkes and Ariel D. Procaccia and Nisarg
                 Shah",
  title =        "Beyond Dominant Resource Fairness: Extensions,
                 Limitations, and Indivisibilities",
  journal =      j-TEAC,
  volume =       "3",
  number =       "1",
  pages =        "3:1--3:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2739040",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 27 17:58:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the problem of allocating multiple resources
                 to agents with heterogeneous demands. Technological
                 advances such as cloud computing and data centers
                 provide a new impetus for investigating this problem
                 under the assumption that agents demand the resources
                 in fixed proportions, known in economics as Leontief
                 preferences. In a recent paper, Ghodsi et al. [2011]
                 introduced the dominant resource fairness (DRF)
                 mechanism, which was shown to possess highly desirable
                 theoretical properties under Leontief preferences. We
                 extend their results in three directions. First, we
                 show that DRF generalizes to more expressive settings,
                 and leverage a new technical framework to formally
                 extend its guarantees. Second, we study the relation
                 between social welfare and properties such as
                 truthfulness; DRF performs poorly in terms of social
                 welfare, but we show that this is an unavoidable
                 shortcoming that is shared by every mechanism that
                 satisfies one of three basic properties. Third, and
                 most importantly, we study a realistic setting that
                 involves indivisibilities. We chart the boundaries of
                 the possible in this setting, contributing a new
                 relaxed notion of fairness and providing both
                 possibility and impossibility results.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Babaioff:2015:DPL,
  author =       "Moshe Babaioff and Shaddin Dughmi and Robert Kleinberg
                 and Aleksandrs Slivkins",
  title =        "Dynamic Pricing with Limited Supply",
  journal =      j-TEAC,
  volume =       "3",
  number =       "1",
  pages =        "4:1--4:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2559152",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 27 17:58:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the problem of designing
                 revenue-maximizing online posted-price mechanisms when
                 the seller has limited supply. A seller has k identical
                 items for sale and is facing n potential buyers
                 (``agents'') that are arriving sequentially. Each agent
                 is interested in buying one item. Each agent's value
                 for an item is an independent sample from some fixed
                 (but unknown) distribution with support [0,1]. The
                 seller offers a take-it-or-leave-it price to each
                 arriving agent (possibly different for different
                 agents), and aims to maximize his expected revenue. We
                 focus on mechanisms that do not use any information
                 about the distribution; such mechanisms are called
                 detail-free (or prior-independent). They are desirable
                 because knowing the distribution is unrealistic in many
                 practical scenarios. We study how the revenue of such
                 mechanisms compares to the revenue of the optimal
                 offline mechanism that knows the distribution
                 (``offline benchmark''). We present a detail-free
                 online posted-price mechanism whose revenue is at most
                 $ O((k \log n)2 / 3) $ less than the offline benchmark,
                 for every distribution that is regular. In fact, this
                 guarantee holds without any assumptions if the
                 benchmark is relaxed to fixed-price mechanisms.
                 Further, we prove a matching lower bound. The
                 performance guarantee for the same mechanism can be
                 improved to $ O (\sqrt k \log n) $, with a
                 distribution-dependent constant, if the ratio $ k / n $
                 is sufficiently small. We show that, in the worst case
                 over all demand distributions, this is essentially the
                 best rate that can be obtained with a
                 distribution-specific constant. On a technical level,
                 we exploit the connection to multiarmed bandits (MAB).
                 While dynamic pricing with unlimited supply can easily
                 be seen as an MAB problem, the intuition behind MAB
                 approaches breaks when applied to the setting with
                 limited supply. Our high-level conceptual contribution
                 is that even the limited supply setting can be
                 fruitfully treated as a bandit problem.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Dutting:2015:PRT,
  author =       "Paul D{\"u}tting and Felix Fischer and Pichayut
                 Jirapinyo and John K. Lai and Benjamin Lubin and David
                 C. Parkes",
  title =        "Payment Rules through Discriminant-Based Classifiers",
  journal =      j-TEAC,
  volume =       "3",
  number =       "1",
  pages =        "5:1--5:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2559049",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 27 17:58:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In mechanism design it is typical to impose incentive
                 compatibility and then derive an optimal mechanism
                 subject to this constraint. By replacing the incentive
                 compatibility requirement with the goal of minimizing
                 expected ex post regret, we are able to adapt
                 statistical machine learning techniques to the design
                 of payment rules. This computational approach to
                 mechanism design is applicable to domains with
                 multi-dimensional types and situations where
                 computational efficiency is a concern. Specifically,
                 given an outcome rule and access to a type
                 distribution, we train a support vector machine with a
                 specific structure imposed on the discriminant
                 function, such that it implicitly learns a
                 corresponding payment rule with desirable incentive
                 properties. We extend the framework to adopt succinct
                 $k$-wise dependent valuations, leveraging a connection
                 with maximum a posteriori assignment on Markov networks
                 to enable training to scale up to settings with a large
                 number of items; we evaluate this construction in the
                 case where $ k = 2 $. We present applications to
                 multiparameter combinatorial auctions with approximate
                 winner determination, and the assignment problem with
                 an egalitarian outcome rule. Experimental results
                 demonstrate that the construction produces payment
                 rules with low ex post regret, and that penalizing
                 classification error is effective in preventing
                 failures of ex post individual rationality.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Roughgarden:2015:PAG,
  author =       "Tim Roughgarden",
  title =        "The Price of Anarchy in Games of Incomplete
                 Information",
  journal =      j-TEAC,
  volume =       "3",
  number =       "1",
  pages =        "6:1--6:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2737816",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Mar 27 17:58:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We define smooth games of incomplete information. We
                 prove an ``extension theorem'' for such games: price of
                 anarchy bounds for pure Nash equilibria for all induced
                 full-information games extend automatically, without
                 quantitative degradation, to all mixed-strategy
                 Bayes--Nash equilibria with respect to a product prior
                 distribution over players' preferences. We also note
                 that, for Bayes--Nash equilibria in games with
                 correlated player preferences, there is no general
                 extension theorem for smooth games. We give several
                 applications of our definition and extension theorem.
                 First, we show that many games of incomplete
                 information for which the price of anarchy has been
                 studied are smooth in our sense. Our extension theorem
                 unifies much of the known work on the price of anarchy
                 in games of incomplete information. Second, we use our
                 extension theorem to prove new bounds on the price of
                 anarchy of Bayes--Nash equilibria in routing games with
                 incomplete information.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Goldstein:2015:IET,
  author =       "Daniel G. Goldstein and R. Preston McAfee and
                 Siddharth Suri",
  title =        "Improving the Effectiveness of Time-Based Display
                 Advertising",
  journal =      j-TEAC,
  volume =       "3",
  number =       "2",
  pages =        "7:1--7:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2716323",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Tue Apr 21 11:23:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Display advertisements are typically sold by the
                 impression, where one impression is simply one download
                 of an ad. Previous work has shown that the longer an ad
                 is in view, the more likely a user is to remember it
                 and that there are diminishing returns to increased
                 exposure time [Goldstein et al. 2011]. Since a pricing
                 scheme that is at least partially based on time is more
                 exact than one based solely on impressions, time-based
                 advertising may become an industry standard. We answer
                 an open question concerning time-based pricing schemes:
                 how should time slots for advertisements be divided? We
                 provide evidence that ads can be scheduled in a way
                 that leads to greater total recollection, which
                 advertisers value, and increased revenue, which
                 publishers value. We document two main findings. First,
                 we show that displaying two shorter ads results in more
                 total recollection than displaying one longer ad of
                 twice the duration. Second, we show that this effect
                 disappears as the duration of these ads increases. We
                 conclude with a theoretical prediction regarding the
                 circumstances under which the display advertising
                 industry would benefit if it moved to a partially or
                 fully time-based standard.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Ganzfried:2015:SOE,
  author =       "Sam Ganzfried and Tuomas Sandholm",
  title =        "Safe Opponent Exploitation",
  journal =      j-TEAC,
  volume =       "3",
  number =       "2",
  pages =        "8:1--8:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2716322",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Tue Apr 21 11:23:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the problem of playing a repeated
                 two-player zero-sum game safety: that is, guaranteeing
                 at least the value of the game per period in
                 expectation regardless of the strategy used by the
                 opponent. Playing a stage-game equilibrium strategy at
                 each time step clearly guarantees safety, and prior
                 work has (incorrectly) stated that it is impossible to
                 simultaneously deviate from a stage-game equilibrium
                 (in hope of exploiting a suboptimal opponent) and to
                 guarantee safety. We show that such profitable
                 deviations are indeed possible specifically in games
                 where certain types of ``gift'' strategies exist, which
                 we define formally. We show that the set of strategies
                 constituting such gifts can be strictly larger than the
                 set of iteratively weakly-dominated strategies; this
                 disproves another recent assertion which states that
                 all noniteratively weakly dominated strategies are best
                 responses to each equilibrium strategy of the other
                 player. We present a full characterization of safe
                 strategies, and develop efficient algorithms for
                 exploiting suboptimal opponents while guaranteeing
                 safety. We also provide analogous results for
                 extensive-form games of perfect and imperfect
                 information, and present safe exploitation algorithms
                 and full characterizations of safe strategies for those
                 settings as well. We present experimental results in
                 Kuhn poker, a canonical test problem for game-theoretic
                 algorithms. Our experiments show that (1) aggressive
                 safe exploitation strategies significantly outperform
                 adjusting the exploitation within stage-game
                 equilibrium strategies only and (2) all the safe
                 exploitation strategies significantly outperform a
                 (nonsafe) best response strategy against strong dynamic
                 opponents.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Hoefer:2015:SSA,
  author =       "Martin Hoefer and Thomas Kesselheim",
  title =        "Secondary Spectrum Auctions for Symmetric and
                 Submodular Bidders",
  journal =      j-TEAC,
  volume =       "3",
  number =       "2",
  pages =        "9:1--9:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2739041",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Tue Apr 21 11:23:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study truthful auctions for secondary spectrum
                 usage in wireless networks. In this scenario, $n$
                 communication requests need to be allocated to $k$
                 available channels that are subject to interference and
                 noise. We present the first truthful mechanisms for
                 secondary spectrum auctions with symmetric or
                 submodular valuations. Our approach to model
                 interference uses an edge-weighted conflict graph, and
                 our algorithms provide asymptotically almost optimal
                 approximation bounds for conflict graphs with a small
                 inductive independence number $ \rho \ll n $. This
                 approach covers a large variety of interference models
                 such as, for instance, the protocol model or the
                 recently popular physical model of interference. For
                 unweighted conflict graphs and symmetric valuations we
                 use LP-rounding to obtain $ O(\rho)$-approximate
                 mechanisms; for weighted conflict graphs we get a
                 factor of $ O(\rho \cdot (\log n + \log k))$. For
                 submodular users we combine the convex rounding
                 framework of Dughmi et al. [2011] with randomized
                 metarounding to obtain $ O(\rho)$-approximate
                 mechanisms for matroid-rank-sum valuations; for
                 weighted conflict graphs we can fully drop the
                 dependence on $k$ to get $ O(\rho \cdot \log n)$. We
                 conclude with promising initial results for
                 deterministically truthful mechanisms that allow
                 approximation factors based on $ \rho $.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Wilkens:2015:SCM,
  author =       "Christopher A. Wilkens and Balasubramanian Sivan",
  title =        "Single-Call Mechanisms",
  journal =      j-TEAC,
  volume =       "3",
  number =       "2",
  pages =        "10:1--10:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2741027",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Tue Apr 21 11:23:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Truthfulness is fragile and demanding. It is
                 oftentimes harder to guarantee truthfulness when
                 solving a problem than it is to solve the problem
                 itself. Even worse, truthfulness can be utterly
                 destroyed by small uncertainties in a mechanism's
                 outcome. One obstacle is that truthful payments depend
                 on outcomes other than the one realized, such as the
                 lengths of non-shortest-paths in a shortest-path
                 auction. Single-call mechanisms are a powerful tool
                 that circumvents this obstacle: they implicitly charge
                 truthful payments, guaranteeing truthfulness in
                 expectation using only the outcome realized by the
                 mechanism. The cost of such truthfulness is a trade-off
                 between the expected quality of the outcome and the
                 risk of large payments. We study two of the most
                 general domains for truthful mechanisms and largely
                 settle when and to what extent single-call mechanisms
                 are possible. The first single-call construction was
                 discovered by Babaioff et al. [2010] in
                 single-parameter domains. They give a transformation
                 that turns any monotone, single-parameter allocation
                 rule into a truthful-in-expectation single-call
                 mechanism. Our first result is a natural complement to
                 Babaioff et al. [2010]: we give a new transformation
                 that produces a single-call VCG mechanism from any
                 allocation rule for which VCG payments are truthful.
                 Second, in both the single-parameter and VCG settings,
                 we precisely characterize the possible transformations,
                 showing that a wide variety of transformations are
                 possible but that all take a very simple form. Finally,
                 we study the inherent trade-off between the expected
                 quality of the outcome and the risk of large payments.
                 We show that our construction and that of Babaioff et
                 al. [2010] simultaneously optimize a variety of metrics
                 in their respective domains. Our study is motivated by
                 settings where uncertainty in a mechanism renders other
                 known techniques untruthful, and we offer a variety of
                 examples where such uncertainty can arise. In
                 particular, we analyze pay-per-click advertising
                 auctions, where the truthfulness of the standard
                 VCG-based auction is easily broken when the
                 auctioneer's estimated click-through-rates are
                 imprecise.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Chakrabarti:2015:TSO,
  author =       "Deepayan Chakrabarti and Erik Vee",
  title =        "Traffic Shaping to Optimize Ad Delivery",
  journal =      j-TEAC,
  volume =       "3",
  number =       "2",
  pages =        "11:1--11:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2739010",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Tue Apr 21 11:23:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Web publishers must balance two objectives: how to
                 keep users engaged by directing them to relevant
                 content, and how to properly monetize this user
                 traffic. The standard approach is to solve each problem
                 in isolation, for example, by displaying content that
                 is tailored to the user's interests so as to maximize
                 clickthrough rates (CTR), and also by building a
                 standalone ad serving system that displays ads
                 depending on the user's characteristics, the article
                 being viewed by the user, and advertiser-specified
                 constraints. However, showing the user only those
                 articles with highest expected CTR precludes the
                 display of some ads; if the publisher had previously
                 guaranteed the delivery of a certain volume of
                 impressions to such ads, then underdelivery penalties
                 might have to be paid. We propose a joint optimization
                 of article selection and ad serving that minimizes
                 underdelivery by shaping some of the incoming traffic
                 to pages where underperforming ads can be displayed,
                 while incurring only minor drops in CTR. In addition to
                 formulating the problem, we design an online
                 optimization algorithm that can find the optimal
                 traffic shaping probabilities for each new user using
                 only a cache of one number per ad contract. Experiments
                 on a large real-world ad-serving Web portal demonstrate
                 significant advantages over the standalone approach: a
                 threefold reduction in underdelivery with only 10\%
                 drop in CTR, or a 2.6-fold reduction with a 4\% CTR
                 drop, and similar results over a wide range.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Ghosh:2015:MME,
  author =       "Arpita Ghosh and Mohammad Mahdian and R. Preston
                 McAfee and Sergei Vassilvitskii",
  title =        "To Match or Not to Match: Economics of Cookie Matching
                 in Online Advertising",
  journal =      j-TEAC,
  volume =       "3",
  number =       "2",
  pages =        "12:1--12:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2745801",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Tue Apr 21 11:23:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Modern online advertising increasingly relies on the
                 ability to follow the same user across the Internet
                 using technology called cookie matching to increase
                 efficiency in ad allocation. Web publishers today use
                 this technology to share information about the websites
                 a user has visited, making it possible to target
                 advertisements to users based on their prior history.
                 This begs the question: do publishers (who are
                 competitors for advertising money) always have the
                 incentive to share online information? Intuitive
                 arguments as well as anecdotal evidence suggest that
                 sometimes a premium publisher might suffer information
                 sharing through an effect called information leakage:
                 by sharing user information with the advertiser, the
                 advertiser will be able to target the same user
                 elsewhere on cheaper publishers, leading to a dilution
                 of the value of the supply on the premium publishers.
                 The goal of this article is to explore this aspect of
                 online information sharing. We show that, when
                 advertisers are homogeneous in the sense that their
                 relative valuations of users are consistent, publishers
                 always agree about the benefits of cookie matching in
                 equilibrium: either all publishers' revenues benefit,
                 or all suffer, from cookie matching. We also show using
                 a simple model that, when advertisers are not
                 homogeneous, the information leakage indeed can occur,
                 with cookie matching helping one publisher's revenues
                 while harming the other.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Kash:2015:EAS,
  author =       "Ian A. Kash and Eric J. Friedman and Joseph Y.
                 Halpern",
  title =        "An Equilibrium Analysis of Scrip Systems",
  journal =      j-TEAC,
  volume =       "3",
  number =       "3",
  pages =        "13:1--13:??",
  month =        jun,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2659006",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Jul 31 19:26:16 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "A game-theoretic model of scrip (artificial currency)
                 systems is analyzed. It is shown that relative entropy
                 can be used to characterize the distribution of agent
                 wealth when all agents use threshold strategies -that
                 is, they volunteer to do work if and only if they have
                 below a threshold amount of money. Monotonicity of
                 agents' best-reply functions is used to show that scrip
                 systems have pure strategy equilibria where all agents
                 use threshold strategies. An algorithm is given that
                 can compute such an equilibrium and the resulting
                 distribution of wealth.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Immorlica:2015:ILR,
  author =       "Nicole Immorlica and Mohammad Mahdian",
  title =        "Incentives in Large Random Two-Sided Markets",
  journal =      j-TEAC,
  volume =       "3",
  number =       "3",
  pages =        "14:1--14:??",
  month =        jun,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2656202",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Jul 31 19:26:16 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Many centralized two-sided markets form a matching
                 between participants by running a stable matching
                 algorithm. It is a well-known fact that no matching
                 mechanism based on a stable matching algorithm can
                 guarantee truthfulness as a dominant strategy for
                 participants. However, we show that in a probabilistic
                 setting where the preference lists on one side of the
                 market are composed of only a constant (independent of
                 the size of the market) number of entries, each drawn
                 from an arbitrary distribution, the number of
                 participants that have more than one stable partner is
                 vanishingly small. This proves (and generalizes) a
                 conjecture of Roth and Peranson [1999]. As a corollary
                 of this result, we show that, with high probability,
                 the truthful strategy is the best response for a random
                 player when the other players are truthful. We also
                 analyze equilibria of the deferred acceptance stable
                 matching game. We show that the game with complete
                 information has an equilibrium in which, in
                 expectation, a $ (1 - o(1)) $ fraction of the
                 strategies are truthful. In the more realistic setting
                 of a game of incomplete information, we will show that
                 the set of truthful strategies form a $ (1 +
                 o(1))$-approximate Bayesian--Nash equilibrium for
                 uniformly random preferences. Our results have
                 implications in many practical settings and are
                 inspired by the work of Roth and Peranson [1999] on the
                 National Residency Matching Program.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Cavallo:2015:DAA,
  author =       "Ruggiero Cavallo and R. Preston Mcafee and Sergei
                 Vassilvitskii",
  title =        "Display Advertising Auctions with Arbitrage",
  journal =      j-TEAC,
  volume =       "3",
  number =       "3",
  pages =        "15:1--15:??",
  month =        jun,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2668033",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Jul 31 19:26:16 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Online display advertising exchanges connect Web
                 publishers with advertisers seeking to place ads. In
                 many cases, the advertiser obtains value from an ad
                 impression (a viewing by a user) only if it is clicked,
                 and frequently advertisers prefer to pay contingent on
                 this occurring. But at the same time, many publishers
                 demand payment independent of clicks. Arbitragers with
                 good estimates of click-probabilities can resolve this
                 conflict by absorbing the risk and acting as an
                 intermediary, paying the publisher on allocation and
                 being paid only if a click occurs. This article
                 examines the incentives of advertisers and arbitragers
                 and contributes an efficient mechanism with truthful
                 bidding by the advertisers and truthful reporting of
                 click predictions by arbitragers as dominant strategies
                 while, given that a hazard rate condition is satisfied,
                 yielding increased revenue to the publisher. We provide
                 empirical evidence based on bid data from Yahoo's Right
                 Media Exchange suggesting that the mechanism would
                 increase revenue in practice.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Bilo:2015:BDN,
  author =       "Davide Bil{\`o} and Luciano Gual{\`a} and Guido
                 Proietti",
  title =        "Bounded-Distance Network Creation Games",
  journal =      j-TEAC,
  volume =       "3",
  number =       "3",
  pages =        "16:1--16:??",
  month =        jun,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2770639",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Jul 31 19:26:16 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "A network creation game simulates a decentralized and
                 noncooperative construction of a communication network.
                 Informally, there are $n$ players sitting on the
                 network nodes, which attempt to establish a reciprocal
                 communication by activating, thereby incurring a
                 certain cost, any of their incident links. The goal of
                 each player is to have all the other nodes as close as
                 possible in the resulting network, while buying as few
                 links as possible. According to this intuition, any
                 model of the game must then appropriately address a
                 balance between these two conflicting objectives.
                 Motivated by the fact that a player might have a strong
                 requirement about her centrality in the network, we
                 introduce a new setting in which a player who maintains
                 her (maximum or average) distance to the other nodes
                 within a given bound incurs a cost equal to the number
                 of activated edges; otherwise her cost is unbounded. We
                 study the problem of understanding the structure of
                 pure Nash equilibria of the resulting games, which we
                 call MaxBD and SumBD, respectively. For both games, we
                 show that when distance bounds associated with players
                 are nonuniform, then equilibria can be arbitrarily bad.
                 On the other hand, for MaxBD, we show that when nodes
                 have a uniform bound $ D \geq 3$ on the maximum
                 distance, then the price of anarchy (PoA) is lower and
                 upper bounded by 2 and $ O(n^{1 / \lfloor \log 3 D
                 \rfloor + 1})$, respectively (i.e., PoA is constant as
                 soon as $D$ is $ \Omega (n^\epsilon)$, for any $
                 \epsilon > 0$), while for the interesting case $ D =
                 2$, we are able to prove that the PoA is $ \Omega
                 (\sqrt {n})$ and $ O(\sqrt {n \log n})$. For the
                 uniform SumBD, we obtain similar (asymptotically)
                 results and moreover show that PoA becomes constant as
                 soon as the bound on the average distance is $ 2^{
                 \omega (\sqrt {\log n})}$.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Chen:2015:ISI,
  author =       "Yiling Chen and Nicole Immorlica",
  title =        "Introduction to the Special Issue on {WINE'13}",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "17:1--17:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2796538",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Bateni:2015:RMN,
  author =       "Mohammadhossein Bateni and Nima Haghpanah and
                 Balasubramanian Sivan and Morteza Zadimoghaddam",
  title =        "Revenue Maximization with Nonexcludable Goods",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "18:1--18:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2790131",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the design of revenue-maximizing mechanisms
                 for selling nonexcludable public goods. In particular,
                 we study revenue-maximizing mechanisms in Bayesian
                 settings for facility location problems on graphs where
                 no agent can be excluded from using a facility that has
                 been constructed. We show that the pointwise
                 optimization problem involved in implementing the
                 revenue optimal mechanism, namely, optimizing over
                 arbitrary profiles of virtual values, is hard to
                 approximate within a factor of $ \Omega (n^{2 -
                 \epsilon }) $ (assuming P $ \neq $ NP) even in star
                 graphs. Furthermore, we show that optimizing the
                 expected revenue is APX-hard. However, in a relevant
                 special case, rooted version with identical
                 distributions, we construct polynomial time truthful
                 mechanisms that approximate the optimal expected
                 revenue within a constant factor. We also study the
                 effect of partially mitigating nonexcludability by
                 collecting tolls for using the facilities. We show that
                 such ``posted-price'' mechanisms obtain significantly
                 higher revenue and often approach the optimal revenue
                 obtainable with full excludability.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Minooei:2015:NOR,
  author =       "Hadi Minooei and Chaitanya Swamy",
  title =        "Near-Optimal and Robust Mechanism Design for Covering
                 Problems with Correlated Players",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "19:1--19:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2790133",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the problem of designing
                 incentive-compatible, ex-post individually rational
                 (IR) mechanisms for covering problems in the Bayesian
                 setting, where players' types are drawn from an
                 underlying distribution and may be correlated, and the
                 goal is to minimize the expected total payment made by
                 the mechanism. We formulate a notion of incentive
                 compatibility (IC) that we call support-based IC that
                 is substantially more robust than Bayesian IC, and
                 develop black-box reductions from support-based-IC
                 mechanism design to algorithm design. For
                 single-dimensional settings, this black-box reduction
                 applies even when we only have an LP-relative
                 approximation algorithm for the algorithmic problem.
                 Thus, we obtain near-optimal mechanisms for various
                 covering settings, including single-dimensional
                 covering problems, multi-item procurement auctions, and
                 multidimensional facility location.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Fotakis:2015:TFD,
  author =       "Dimitris Fotakis and Emmanouil Zampetakis",
  title =        "Truthfulness Flooded Domains and the Power of
                 Verification for Mechanism Design",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "20:1--20:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2790086",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In this work, we investigate the reasons that make
                 symmetric partial verification essentially useless in
                 virtually all domains. Departing from previous work, we
                 consider any possible (finite or infinite) domain and
                 general symmetric verification. We identify a natural
                 property, namely, that the correspondence graph of a
                 symmetric verification $M$ is strongly connected by
                 finite paths along which the preferences are consistent
                 with the preferences at the endpoints, and prove that
                 this property is sufficient for the equivalence of
                 truthfulness and $M$-truthfulness. In fact, defining
                 appropriate versions of this property, we obtain this
                 result for deterministic and randomized mechanisms with
                 and without money. Moreover, we show that a slightly
                 relaxed version of this property is also necessary for
                 the equivalence of truthfulness and $M$-truthfulness.
                 Our conditions provide a generic and convenient way of
                 checking whether truthful implementation can take
                 advantage of any symmetric verification scheme in any
                 (finite or infinite) domain. Since the simplest
                 symmetric verification is the local verification,
                 specific cases of our result are closely related, in
                 the case without money, to the research about the
                 equivalence of local truthfulness and global
                 truthfulness. To complete the picture, we consider
                 asymmetric verification and prove that a social choice
                 function is $M$-truthfully implementable by some
                 asymmetric verification $M$ if and only if $f$ does not
                 admit a cycle of profitable deviations.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Kollias:2015:RPE,
  author =       "Konstantinos Kollias and Tim Roughgarden",
  title =        "Restoring Pure Equilibria to Weighted Congestion
                 Games",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "21:1--21:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2781678",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Congestion games model several interesting
                 applications, including routing and network formation
                 games, and also possess attractive theoretical
                 properties, including the existence of and convergence
                 of natural dynamics to a pure Nash equilibrium.
                 Weighted variants of congestion games that rely on
                 sharing costs proportional to players' weights do not
                 generally have pure-strategy Nash equilibria. We
                 propose a new way of assigning costs to players with
                 weights in congestion games that recovers the important
                 properties of the unweighted model. This method is
                 derived from the Shapley value, and it always induces a
                 game with a (weighted) potential function. For the
                 special cases of weighted network cost-sharing and
                 weighted routing games with Shapley value-based cost
                 shares, we prove tight bounds on the worst-case
                 inefficiency of equilibria. For weighted network
                 cost-sharing games, we precisely calculate the price of
                 stability for any given player weight vector, while for
                 weighted routing games, we precisely calculate the
                 price of anarchy, as a parameter of the set of
                 allowable cost functions.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Babichenko:2015:QCC,
  author =       "Yakov Babichenko and Siddharth Barman",
  title =        "Query Complexity of Correlated Equilibrium",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "22:1--22:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2785668",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study lower bounds on the query complexity of
                 determining correlated equilibrium. In particular, we
                 consider a query model in which an $n$-player game is
                 specified via a black box that returns players'
                 utilities at pure action profiles. In this model, we
                 establish that in order to compute a correlated
                 equilibrium, any deterministic algorithm must query the
                 black box an exponential (in $n$) number of times.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Aumann:2015:EFD,
  author =       "Yonatan Aumann and Yair Dombb",
  title =        "The Efficiency of Fair Division with Connected
                 Pieces",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "23:1--23:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2781776",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "The cake-cutting setting, in which a single
                 heterogeneous good must be divided between multiple
                 parties with different tastes, is a classic model for
                 studying questions regarding fairness in resource
                 allocation. In this work, we turn our attention to
                 (economic) efficiency considerations in cake cutting,
                 examining the possible trade-offs between meeting the
                 fairness criteria, on the one hand, and maximizing
                 social welfare, on the other. We focus on divisions
                 that give each agent a single (contiguous) piece of the
                 cake and provide tight bounds (or, in some cases,
                 nearly tight) on the possible degradation in
                 utilitarian and egalitarian welfare resulting from
                 meeting the fairness requirements.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Dimitrov:2015:SPM,
  author =       "Stanko Dimitrov and Rahul Sami and Marina A. Epelman",
  title =        "Subsidized Prediction Mechanisms for Risk-Averse
                 Agents",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "24:1--24:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2716327",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In this article, we study the design and
                 characterization of sequential prediction mechanisms in
                 the presence of agents with unknown risk aversion. We
                 formulate a collection of desirable properties for any
                 sequential forecasting mechanism. We present a
                 randomized mechanism that satisfies all of these
                 properties, including a guarantee that it is myopically
                 optimal for each agent to report honestly, regardless
                 of her degree of risk aversion. We observe, however,
                 that the mechanism has an undesirable side effect: each
                 agent's expected reward, normalized against the
                 inherent value of her private information, decreases
                 exponentially with the number of agents. We prove a
                 negative result showing that this is unavoidable: any
                 mechanism that is myopically strategyproof for agents
                 of all risk types, while also satisfying other natural
                 properties of sequential forecasting mechanisms, must
                 sometimes result in a player getting an exponentially
                 small expected normalized reward.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Xiao:2015:SOD,
  author =       "Yuanzhang Xiao and Mihaela {Van Der Schaar}",
  title =        "Socially-Optimal Design of Service Exchange Platforms
                 with Imperfect Monitoring",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "25:1--25:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2785627",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the design of service exchange platforms in
                 which long-lived anonymous users exchange services with
                 each other. The users are randomly and repeatedly
                 matched into pairs of clients and servers, and each
                 server can choose to provide high-quality or
                 low-quality services to the client with whom it is
                 matched. Since the users are anonymous and incur high
                 costs (e.g., exert high effort) in providing
                 high-quality services, it is crucial that the platform
                 incentivizes users to provide high-quality services.
                 Rating mechanisms have been shown to work effectively
                 as incentive schemes in such platforms. A rating
                 mechanism labels each user by a rating, which
                 summarizes the user's past behaviors, recommends a
                 desirable behavior to each server (e.g., provide
                 higher-quality services for clients with higher
                 ratings), and updates each server's rating based on the
                 recommendation and its client's report on the service
                 quality. Based on this recommendation, a low-rating
                 user is less likely to obtain high-quality services,
                 thereby providing users with incentives to obtain high
                 ratings by providing high-quality services. However, if
                 monitoring or reporting is imperfect-clients do not
                 perfectly assess the quality or the reports are lost-a
                 user's rating may not be updated correctly. In the
                 presence of such errors, existing rating mechanisms
                 cannot achieve the social optimum. In this article, we
                 propose the first rating mechanism that does achieve
                 the social optimum, even in the presence of monitoring
                 or reporting errors. On one hand, the socially-optimal
                 rating mechanism needs to be complicated enough,
                 because the optimal recommended behavior depends not
                 only on the current rating distribution, but also
                 (necessarily) on the history of past rating
                 distributions in the platform. On the other hand, we
                 prove that the social optimum can be achieved by
                 ``simple'' rating mechanisms that use binary rating
                 labels and a small set of (three) recommended
                 behaviors. We provide design guidelines of
                 socially-optimal rating mechanisms and a low-complexity
                 online algorithm for the rating mechanism to determine
                 the optimal recommended behavior.",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Nath:2015:AMD,
  author =       "Swaprava Nath and Arunava Sen",
  title =        "Affine Maximizers in Domains with Selfish Valuations",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "26:1--26:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2786014",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the domain of selfish and continuous
                 preferences over a ``rich'' allocation space and show
                 that onto, strategyproof and allocation non-bossy
                 social choice functions are affine maximizers. Roberts
                 [1979] proves this result for a finite set of
                 alternatives and an unrestricted valuation space. In
                 this article, we show that in a subdomain of the
                 unrestricted valuations with the additional assumption
                 of allocation non-bossiness, using the richness of the
                 allocations, the strategyproof social choice functions
                 can be shown to be affine maximizers. We provide an
                 example to show that allocation non-bossiness is indeed
                 critical for this result. This work shows that an
                 affine maximizer result needs a certain amount of
                 richness split across valuations and allocations.",
  acknowledgement = ack-nhfb,
  articleno =    "26",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Dutting:2015:EMA,
  author =       "Paul D{\"u}tting and Monika Henzinger and Ingmar
                 Weber",
  title =        "An Expressive Mechanism for Auctions on the {Web}",
  journal =      j-TEAC,
  volume =       "4",
  number =       "1",
  pages =        "1:1--1:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2716312",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Sat Feb 6 08:20:19 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Auctions are widely used on the Web. Applications
                 range from sponsored search to platforms such as eBay.
                 In these and in many other applications the auctions in
                 use are single-/multi-item auctions with unit demand.
                 The main drawback of standard mechanisms for this type
                 of auctions, such as VCG and GSP, is the limited
                 expressiveness that they offer to the bidders. The
                 General Auction Mechanism (GAM) of Aggarwal et al.
                 [2009] takes a first step toward addressing the problem
                 of limited expressiveness by computing a bidder
                 optimal, envy-free outcome for linear utility functions
                 with identical slopes and a single discontinuity per
                 bidder-item pair. We show that in many practical
                 situations this does not suffice to adequately model
                 the preferences of the bidders, and we overcome this
                 problem by presenting the first mechanism for piecewise
                 linear utility functions with nonidentical slopes and
                 multiple discontinuities. Our mechanism runs in
                 polynomial time. Like GAM it is incentive compatible
                 for inputs that fulfill a certain nondegeneracy
                 assumption, but our requirement is more general than
                 the requirement of GAM. For discontinuous utility
                 functions that are nondegenerate as well as for
                 continuous utility functions the outcome of our
                 mechanism is a competitive equilibrium. We also show
                 how our mechanism can be used to compute approximately
                 bidder optimal, envy-free outcomes for a general class
                 of continuous utility functions via piecewise linear
                 approximation. Finally, we prove hardness results for
                 even more expressive settings.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Colini-Baldeschi:2015:MKS,
  author =       "Riccardo Colini-Baldeschi and Stefano Leonardi and
                 Monika Henzinger and Martin Starnberger",
  title =        "On Multiple Keyword Sponsored Search Auctions with
                 Budgets",
  journal =      j-TEAC,
  volume =       "4",
  number =       "1",
  pages =        "2:1--2:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2818357",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Sat Feb 6 08:20:19 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study multiple keyword sponsored search auctions
                 with budgets. Each keyword has multiple ad slots with a
                 click-through rate. The bidders have additive
                 valuations, which are linear in the click-through
                 rates, and budgets, which are restricting their overall
                 payments. Additionally, the number of slots per keyword
                 assigned to a bidder is bounded. We show the following
                 results: (1) We give the first mechanism for multiple
                 keywords, where click-through rates differ among slots.
                 Our mechanism is incentive compatible in expectation,
                 individually rational in expectation, and Pareto
                 optimal. (2) We study the combinatorial setting, where
                 each bidder is only interested in a subset of the
                 keywords. We give an incentive compatible, individually
                 rational, Pareto-optimal, and deterministic mechanism
                 for identical click-through rates. (3) We give an
                 impossibility result for incentive compatible,
                 individually rational, Pareto-optimal, and
                 deterministic mechanisms for bidders with diminishing
                 marginal valuations.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Aumann:2015:ATT,
  author =       "Yonatan Aumann and Yair Dombb and Avinatan Hassidim",
  title =        "Auctioning Time: Truthful Auctions of Heterogeneous
                 Divisible Goods",
  journal =      j-TEAC,
  volume =       "4",
  number =       "1",
  pages =        "3:1--3:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2833086",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Sat Feb 6 08:20:19 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the problem of auctioning time --- a
                 one-dimensional continuously-divisible heterogeneous
                 good --- among multiple agents. Applications include
                 auctioning time for using a shared device, auctioning
                 TV commercial slots, and more. Different agents may
                 have different valuations for the different possible
                 intervals; the goal is to maximize the aggregate
                 utility. Agents are self-interested and may
                 misrepresent their true valuation functions if this
                 benefits them. Thus, we seek auctions that are
                 truthful. Considering the case that each agent may
                 obtain a single interval, the challenge is twofold, as
                 we need to determine both where to slice the interval,
                 and who gets what slice. We consider two settings:
                 discrete and continuous. In the discrete setting, we
                 are given a sequence of m indivisible elements ( e$_1$,
                 ..., e$_m$ ), and the auction must allocate each agent
                 a consecutive subsequence of the elements. In the
                 continuous setting, we are given a continuous,
                 infinitely divisible interval, and the auction must
                 allocate each agent a subinterval. The agents'
                 valuations are nonatomic measures on the interval. We
                 show that, for both settings, the associated
                 computational problem is NP-complete even under very
                 restrictive assumptions. Hence, we provide
                 approximation algorithms. For the discrete case, we
                 provide a truthful auctioning mechanism that
                 approximates the optimal welfare to within a log m
                 factor. The mechanism works for arbitrary monotone
                 valuations. For the continuous setting, we provide a
                 truthful auctioning mechanism that approximates the
                 optimal welfare to within an O (log n ) factor (where n
                 is the number of agents). Additionally, we provide a
                 truthful 2-approximation mechanism for the case that
                 all pieces must be of some fixed size.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Dutting:2015:AHI,
  author =       "Paul D{\"u}tting and Monika Henzinger and Martin
                 Starnberger",
  title =        "Auctions for Heterogeneous Items and Budget Limits",
  journal =      j-TEAC,
  volume =       "4",
  number =       "1",
  pages =        "4:1--4:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2818351",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Sat Feb 6 08:20:19 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study individual rational, Pareto-optimal, and
                 incentive compatible mechanisms for auctions with
                 heterogeneous items and budget limits. We consider
                 settings with multiunit demand and additive valuations.
                 For single-dimensional valuations we prove a positive
                 result for randomized mechanisms, and a negative result
                 for deterministic mechanisms. While the positive result
                 allows for private budgets, the negative result is for
                 public budgets. For multidimensional valuations and
                 public budgets we prove an impossibility result that
                 applies to deterministic and randomized mechanisms.
                 Taken together this shows the power of randomization in
                 certain settings with heterogeneous items, but it also
                 shows its limitations.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Alon:2015:HPT,
  author =       "Noga Alon and Robert Bredereck and Jiehua Chen and
                 Stefan Kratsch and Rolf Niedermeier and Gerhard J.
                 Woeginger",
  title =        "How to Put Through Your Agenda in Collective Binary
                 Decisions",
  journal =      j-TEAC,
  volume =       "4",
  number =       "1",
  pages =        "5:1--5:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2837467",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Sat Feb 6 08:20:19 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the following decision-making scenario: a
                 society of voters has to find an agreement on a set of
                 proposals, and every single proposal is to be accepted
                 or rejected. Each voter supports a certain subset of
                 the proposals-the favorite ballot of this voter-and
                 opposes the remaining ones. He accepts a ballot if he
                 supports more than half of the proposals in this
                 ballot. The task is to decide whether there exists a
                 ballot approving a specified number of selected
                 proposals (agenda) such that all voters (or a strict
                 majority of them) accept this ballot. We show that, on
                 the negative side, both problems are NP-complete, and
                 on the positive side, they are fixed-parameter
                 tractable with respect to the total number of proposals
                 or with respect to the total number of voters. We look
                 into further natural parameters and study their
                 influence on the computational complexity of both
                 problems, thereby providing both tractability and
                 intractability results. Furthermore, we provide tight
                 combinatorial bounds on the worst-case size of an
                 accepted ballot in terms of the number of voters.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Fragiadakis:2015:SMM,
  author =       "Daniel Fragiadakis and Atsushi Iwasaki and Peter
                 Troyan and Suguru Ueda and Makoto Yokoo",
  title =        "Strategyproof Matching with Minimum Quotas",
  journal =      j-TEAC,
  volume =       "4",
  number =       "1",
  pages =        "6:1--6:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2841226",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Sat Feb 6 08:20:19 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study matching markets in which institutions may
                 have minimum and maximum quotas. Minimum quotas are
                 important in many settings, such as hospital residency
                 matching, military cadet matching, and school choice,
                 but current mechanisms are unable to accommodate them,
                 leading to the use of ad hoc solutions. We introduce
                 two new classes of strategyproof mechanisms that allow
                 for minimum quotas as an explicit input and show that
                 our mechanisms improve welfare relative to existing
                 approaches. Because minimum quotas cause a theoretical
                 incompatibility between standard fairness and
                 nonwastefulness properties, we introduce new
                 second-best axioms and show that they are satisfied by
                 our mechanisms. Last, we use simulations to quantify
                 (1) the magnitude of the potential efficiency gains
                 from our mechanisms and (2) how far the resulting
                 assignments are from the first-best definitions of
                 fairness and nonwastefulness. Combining both the
                 theoretical and simulation results, we argue that our
                 mechanisms will improve the performance of matching
                 markets with minimum quota constraints in practice.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Babaioff:2016:MDS,
  author =       "Moshe Babaioff and Moran Feldman and Moshe
                 Tennenholtz",
  title =        "Mechanism Design with Strategic Mediators",
  journal =      j-TEAC,
  volume =       "4",
  number =       "2",
  pages =        "7:1--7:??",
  month =        feb,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2841227",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Sat Feb 6 08:20:20 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the problem of designing mechanisms that
                 interact with strategic agents through strategic
                 intermediaries (or mediators), and investigate the cost
                 to society due to the mediators' strategic behavior.
                 Selfish agents with private information are each
                 associated with exactly one strategic mediator, and can
                 interact with the mechanism exclusively through that
                 mediator. Each mediator aims to optimize the combined
                 utility of his agents, while the mechanism aims to
                 optimize the combined utility of all agents. We focus
                 on the problem of facility location on a metric induced
                 by a publicly known tree. With nonstrategic mediators,
                 there is a dominant strategy mechanism that is optimal.
                 We show that when both agents and mediators act
                 strategically, there is no dominant strategy mechanism
                 that achieves any approximation. We, thus, slightly
                 relax the incentive constraints, and define the notion
                 of a two-sided incentive compatible mechanism. We show
                 that the 3-competitive deterministic mechanism
                 suggested by Procaccia and Tennenholtz [2013] and Dekel
                 et al. [2010] for lines extends naturally to trees, and
                 is still 3-competitive as well as two-sided incentive
                 compatible. This is essentially the best possible
                 (follows from Dekel et al. [2010] and Procaccia and
                 Tennenholtz [2013]). We then show that by allowing
                 randomization one can construct a 2-competitive
                 randomized mechanism that is two-sided incentive
                 compatible, and this is also essentially tight. This
                 result also reduces a gap left in the work of Procaccia
                 and Tennenholtz [2013] and Lu et al. [2009] for the
                 problem of designing strategy-proof mechanisms for
                 weighted agents with no mediators on a line. We also
                 investigate a generalization of the preceding setting
                 where there are multiple levels of mediators.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Roughgarden:2016:NCS,
  author =       "Tim Roughgarden and Okke Schrijvers",
  title =        "Network Cost-Sharing without Anonymity",
  journal =      j-TEAC,
  volume =       "4",
  number =       "2",
  pages =        "8:1--8:??",
  month =        feb,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2841228",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Sat Feb 6 08:20:20 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider network cost-sharing games with
                 nonanonymous cost functions, where the cost of each
                 edge is a submodular function of its users, and this
                 cost is shared using the Shapley value. Nonanonymous
                 cost functions model asymmetries between the players,
                 which can arise from different bandwidth requirements,
                 durations of use, services needed, and so on. These
                 games can possess multiple Nash equilibria of wildly
                 varying quality. The goal of this article is to
                 identify well-motivated equilibrium refinements that
                 admit good worst-case approximation bounds. Our primary
                 results are tight bounds on the cost of strong Nash
                 equilibria and potential function minimizers in network
                 cost-sharing games with nonanonymous cost functions,
                 parameterized by the set C of allowable submodular cost
                 functions. These two worst-case bounds coincide for
                 every set C, and equal the summability parameter
                 introduced in Roughgarden and Sundararajan [2009] to
                 characterize efficiency loss in a family of
                 cost-sharing mechanisms. Thus, a single parameter
                 simultaneously governs the worst-case inefficiency of
                 network cost-sharing games (in two incomparable senses)
                 and cost-sharing mechanisms. This parameter is always
                 at most the $k$th Harmonic number $H_k \approx \ln k$,
                 where $k$ is the number of players, and is constant for
                 many function classes of interest.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Christodoulou:2016:TBP,
  author =       "George Christodoulou and Annam{\'a}ria Kov{\'a}cs and
                 Alkmini Sgouritsa and Bo Tang",
  title =        "Tight Bounds for the Price of Anarchy of Simultaneous
                 First-Price Auctions",
  journal =      j-TEAC,
  volume =       "4",
  number =       "2",
  pages =        "9:1--9:??",
  month =        feb,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2847520",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Sat Feb 6 08:20:20 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the price of anarchy (PoA) of simultaneous
                 first-price auctions (FPAs) for buyers with submodular
                 and subadditive valuations. The current best upper
                 bounds for the Bayesian price of anarchy (BPoA) of
                 these auctions are e /( e --- 1) [Syrgkanis and Tardos
                 2013] and 2 [Feldman et al. 2013], respectively. We
                 provide matching lower bounds for both cases even for
                 the case of full information and for mixed Nash
                 equilibria via an explicit construction. We present an
                 alternative proof of the upper bound of e /( e --- 1)
                 for FPAs with fractionally subadditive valuations that
                 reveals the worst-case price distribution, which is
                 used as a building block for the matching lower bound
                 construction. We generalize our results to a general
                 class of item bidding auctions that we call
                 bid-dependent auctions (including FPAs and all-pay
                 auctions) where the winner is always the highest bidder
                 and each bidder's payment depends only on his own bid.
                 Finally, we apply our techniques to discriminatory
                 price multiunit auctions. We complement the results of
                 de Keijzer et al. [2013] for the case of subadditive
                 valuations by providing a matching lower bound of 2.
                 For the case of submodular valuations, we provide a
                 lower bound of 1.109. For the same class of valuations,
                 we were able to reproduce the upper bound of e /( e ---
                 1) using our nonsmooth approach.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Christodoulou:2016:PSP,
  author =       "George Christodoulou and Martin Gairing",
  title =        "Price of Stability in Polynomial Congestion Games",
  journal =      j-TEAC,
  volume =       "4",
  number =       "2",
  pages =        "10:1--10:??",
  month =        feb,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2841229",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Sat Feb 6 08:20:20 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "The price of anarchy (PoA) in congestion games has
                 attracted a lot of research over the past decade. This
                 has resulted in a thorough understanding of this
                 concept. In contrast, the price of stability (PoS),
                 which is an equally interesting concept, is much less
                 understood. In this article, we consider congestion
                 games with polynomial cost functions with nonnegative
                 coefficients and maximum degree d. We give matching
                 bounds for the PoS in such games-that is, our technique
                 provides the exact value for any degree d. For linear
                 congestion games, tight bounds were previously known.
                 Those bounds hold even for the more restricted case of
                 dominant equilibria, which may not exist. We give a
                 separation result showing that this is not possible for
                 congestion games with quadratic cost functions-in other
                 words, the PoA for the subclass of games that admit a
                 dominant strategy equilibrium is strictly smaller than
                 the PoS for the general class.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Hoefer:2016:TSD,
  author =       "Martin Hoefer and Thomas Kesselheim and Berthold
                 V{\"o}cking",
  title =        "Truthfulness and Stochastic Dominance with Monetary
                 Transfers",
  journal =      j-TEAC,
  volume =       "4",
  number =       "2",
  pages =        "11:1--11:??",
  month =        feb,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2847522",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Sat Feb 6 08:20:20 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider truthfulness concepts for auctions with
                 payments based on first- and second-order stochastic
                 dominance. We assume bidders consider wealth in
                 standard quasilinear form as valuation minus payments.
                 Additionally, they are sensitive to risk in the
                 distribution of wealth stemming from randomized
                 mechanisms. First- and second-order stochastic
                 dominance are well known to capture risk sensitivity,
                 and we apply these concepts to capture truth-telling
                 incentives for bidders. As our first main result, we
                 provide a complete characterization of all
                 social-choice functions over binary single-parameter
                 domains that can be implemented by a mechanism that is
                 truthful in first- and second-order stochastic
                 dominance. We show that these are exactly the
                 social-choice functions implementable by
                 truthful-in-expectation mechanisms, and we provide a
                 novel payment rule that guarantees stochastic
                 dominance. As our second main result we extend the
                 celebrated randomized metarounding approach for
                 truthful-in-expectation mechanisms in packing domains.
                 We design mechanisms that are truthful in first-order
                 stochastic dominance by spending only a logarithmic
                 factor in the approximation guarantee.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Mcafee:2016:I,
  author =       "Preston Mcafee and {\'E}va Tardos",
  title =        "Introduction",
  journal =      j-TEAC,
  volume =       "4",
  number =       "3",
  pages =        "12:1--12:??",
  month =        jun,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2916701",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Thu Jun 16 09:24:17 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Chen:2016:TMA,
  author =       "Yiling Chen and Stephen Chong and Ian A. Kash and Tal
                 Moran and Salil Vadhan",
  title =        "Truthful Mechanisms for Agents That Value Privacy",
  journal =      j-TEAC,
  volume =       "4",
  number =       "3",
  pages =        "13:1--13:??",
  month =        jun,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2892555",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Thu Jun 16 09:24:17 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Recent work has constructed economic mechanisms that
                 are both truthful and differentially private. In these
                 mechanisms, privacy is treated separately from
                 truthfulness; it is not incorporated in players'
                 utility functions (and doing so has been shown to lead
                 to nontruthfulness in some cases). In this work, we
                 propose a new, general way of modeling privacy in
                 players' utility functions. Specifically, we only
                 assume that if an outcome o has the property that any
                 report of player i would have led to o with
                 approximately the same probability, then o has a small
                 privacy cost to player i. We give three mechanisms that
                 are truthful with respect to our modeling of privacy:
                 for an election between two candidates, for a discrete
                 version of the facility location problem, and for a
                 general social choice problem with discrete utilities
                 (via a VCG-like mechanism). As the number n of players
                 increases, the social welfare achieved by our
                 mechanisms approaches optimal (as a fraction of n ).",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Devanur:2016:WPO,
  author =       "Nikhil R. Devanur and Zhiyi Huang and Nitish Korula
                 and Vahab S. Mirrokni and Qiqi Yan",
  title =        "Whole-Page Optimization and Submodular Welfare
                 Maximization with Online Bidders",
  journal =      j-TEAC,
  volume =       "4",
  number =       "3",
  pages =        "14:1--14:??",
  month =        jun,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2892563",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Thu Jun 16 09:24:17 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In the context of online ad serving, display ads may
                 appear on different types of web pages, where each page
                 includes several ad slots and therefore multiple ads
                 can be shown on each page. The set of ads that can be
                 assigned to ad slots of the same page needs to satisfy
                 various prespecified constraints including exclusion
                 constraints, diversity constraints, and the like. Upon
                 arrival of a user, the ad serving system needs to
                 allocate a set of ads to the current webpage respecting
                 these per-page allocation constraints. Previous
                 slot-based settings ignore the important concept of a
                 page and may lead to highly suboptimal results in
                 general. In this article, motivated by these
                 applications in display advertising and inspired by the
                 submodular welfare maximization problem with online
                 bidders, we study a general class of page-based ad
                 allocation problems, present the first (tight)
                 constant-factor approximation algorithms for these
                 problems, and confirm the performance of our algorithms
                 experimentally on real-world datasets. A key technical
                 ingredient of our results is a novel primal-dual
                 analysis for handling free disposal, which updates dual
                 variables using a ``level function'' instead of a
                 single level and unifies with previous analyses of
                 related problems. This new analysis method allows us to
                 handle arbitrarily complicated allocation constraints
                 for each page. Our main result is an algorithm that
                 achieves a $1 - 1 / e - o(1)$-competitive
                 ratio. Moreover, our experiments on real-world datasets
                 show significant improvements of our page-based
                 algorithms compared to the slot-based
                 algorithms. Finally, we observe that our problem is
                 closely related to the submodular welfare maximization
                 (SWM) problem. In particular, we introduce a variant of
                 the SWM problem with online bidders and show how to
                 solve this problem using our algorithm for whole-page
                 optimization.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Caragiannis:2016:WDN,
  author =       "Ioannis Caragiannis and Ariel D. Procaccia and Nisarg
                 Shah",
  title =        "When Do Noisy Votes Reveal the Truth?",
  journal =      j-TEAC,
  volume =       "4",
  number =       "3",
  pages =        "15:1--15:??",
  month =        jun,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2892565",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Thu Jun 16 09:24:17 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "A well-studied approach to the design of voting rules
                 views them as maximum likelihood estimators; given
                 votes that are seen as noisy estimates of a true
                 ranking of the alternatives, the rule must reconstruct
                 the most likely true ranking. We argue that this is too
                 stringent a requirement and instead ask: how many votes
                 does a voting rule need to reconstruct the true
                 ranking? We define the family of pairwise-majority
                 consistent rules and show that for all rules in this
                 family, the number of samples required from Mallows's
                 noise model is logarithmic in the number of
                 alternatives, and that no rule can do asymptotically
                 better (while some rules like plurality do much worse).
                 Taking a more normative point of view, we consider
                 voting rules that surely return the true ranking as the
                 number of samples tends to infinity (we call this
                 property accuracy in the limit ); this allows us to
                 move to a higher level of abstraction. We study
                 families of noise models that are parameterized by
                 distance functions and find voting rules that are
                 accurate in the limit for all noise models in such
                 general families. We characterize the distance
                 functions that induce noise models for which
                 pairwise-majority consistent rules are accurate in the
                 limit and provide a similar result for another novel
                 family of position-dominance consistent rules. These
                 characterizations capture three well-known distance
                 functions.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Easley:2016:IGG,
  author =       "David Easley and Arpita Ghosh",
  title =        "Incentives, Gamification, and Game Theory: an Economic
                 Approach to Badge Design",
  journal =      j-TEAC,
  volume =       "4",
  number =       "3",
  pages =        "16:1--16:??",
  month =        jun,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2910575",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Thu Jun 16 09:24:17 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Gamification is growing increasingly prevalent as a
                 means to incentivize user engagement of social media
                 sites that rely on user contributions. Badges, or
                 equivalent rewards, such as top-contributor lists that
                 are used to recognize a user's contributions on a site,
                 clearly appear to be valued by users who actively
                 pursue and compete for them. However, different sites
                 use different badge designs, varying how, and for what,
                 badges are awarded. Some sites, such as StackOverflow,
                 award badges for meeting fixed levels of
                 contribution. Other sites, such as Amazon and Y!
                 Answers, reward users for being among some top set of
                 contributors on the site, corresponding to a
                 competitive standard of performance. Given that users
                 value badges, and that contributing to a site requires
                 effort, how badges are designed will affect the
                 incentives-therefore the participation and
                 effort-elicited from strategic users on a site. We take
                 a game-theoretic approach to badge design, analyzing
                 the incentives created by widely used badge designs in
                 a model in which winning a badge is valued, effort is
                 costly, and potential contributors to the site
                 endogenously decide whether or not to participate, and
                 how much total effort to put into their contributions
                 to the site. We analyze equilibrium existence, as well
                 as equilibrium participation and effort, in an absolute
                 standards mechanism $M_\alpha$ in which badges are
                 awarded for meeting some absolute level of (observed)
                 effort, and a relative standards mechanism $M_\rho$
                 corresponding to competitive standards, as in a
                 top-$\rho$ contributor badge. We find that equilibria
                 always exist in both mechanisms, even when the value
                 from winning a badge depends endogenously on the number
                 of other winners. However, $M_\alpha$ has
                 zero-participation equilibria for standards that are
                 too high, whereas all equilibria in $M_\rho$ elicit
                 nonzero participation for all possible $\rho$, provided
                 that $\rho$ is specified as a fixed number rather than
                 as a fraction of actual contributors (note that the two
                 are not equivalent in a setting with endogenous
                 participation). Finally, we ask whether or not a site
                 should explicitly announce the number of users winning
                 a badge. The answer to this question is determined by
                 the curvature of the value of winning the badge as a
                 function of the number of other winners.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Roberts:2016:RTS,
  author =       "Ben Roberts and Dinan Gunawardena and Ian A. Kash and
                 Peter Key",
  title =        "Ranking and Tradeoffs in Sponsored Search Auctions",
  journal =      j-TEAC,
  volume =       "4",
  number =       "3",
  pages =        "17:1--17:??",
  month =        jun,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2910576",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Thu Jun 16 09:24:17 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In a sponsored search auction, decisions about how to
                 rank ads impose tradeoffs between objectives, such as
                 revenue and welfare. In this article, we examine how
                 these tradeoffs should be made. We begin by arguing
                 that the most natural solution concept to evaluate
                 these tradeoffs is the lowest symmetric Nash
                 equilibrium (SNE). As part of this argument, we
                 generalise the well-known connection between the lowest
                 SNE and the VCG outcome. We then propose a new ranking
                 algorithm, loosely based on the revenue-optimal
                 auction, that uses a reserve price to order the ads
                 (not just to filter them) and give conditions under
                 which it raises more revenue than simply applying that
                 reserve price. Finally, we conduct extensive
                 simulations examining the tradeoffs enabled by
                 different ranking algorithms and show that our proposed
                 algorithm enables superior operating points by a
                 variety of metrics.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Roughgarden:2016:ORM,
  author =       "Tim Roughgarden and Inbal Talgam-Cohen",
  title =        "Optimal and Robust Mechanism Design with
                 Interdependent Values",
  journal =      j-TEAC,
  volume =       "4",
  number =       "3",
  pages =        "18:1--18:??",
  month =        jun,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2910577",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Thu Jun 16 09:24:17 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study interdependent value settings and extend
                 several results from the well-studied independent
                 private values model to these settings. For
                 revenue-optimal mechanism design, we give conditions
                 under which Myerson's virtual value-based mechanism
                 remains optimal with interdependent values. One of
                 these conditions is robustness of the truthfulness and
                 individual rationality guarantees, in the sense that
                 they are required to hold ex-post. We then consider an
                 even more robust class of mechanisms called ``prior
                 independent'' (``detail free''), and show that, by
                 simply using one of the bidders to set a reserve price,
                 it is possible to extract near-optimal revenue in an
                 interdependent values setting. This shows that a
                 considerable level of robustness is achievable for
                 interdependent values in single-parameter
                 environments.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Conitzer:2016:ISI,
  author =       "Vincent Conitzer and David Easley",
  title =        "Introduction to the Special Issue on {EC'14}",
  journal =      j-TEAC,
  volume =       "4",
  number =       "4",
  pages =        "19:1--19:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2953046",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Mon Aug 29 06:37:22 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Othman:2016:CFT,
  author =       "Abraham Othman and Christos Papadimitriou and Aviad
                 Rubinstein",
  title =        "The Complexity of Fairness Through Equilibrium",
  journal =      j-TEAC,
  volume =       "4",
  number =       "4",
  pages =        "20:1--20:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2956583",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Mon Aug 29 06:37:22 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Competitive equilibrium from equal incomes (CEEI) is a
                 well-known fair allocation mechanism with desirable
                 fairness and efficiency properties; however, with
                 indivisible resources, a CEEI may not exist [Foley
                 1967; Varian 1974; Thomson and Varian 1985]. It was
                 shown in Budish [2011] that in the case of indivisible
                 resources, there is always an allocation, called
                 A-CEEI, that is approximately fair, approximately
                 truthful, and approximately efficient for some
                 favorable approximation parameters. A heuristic search
                 that attempts to find this approximation is used in
                 practice to assign business school students to courses.
                 In this article, we show that finding the A-CEEI
                 allocation guaranteed to exist by Budish's theorem is
                 PPAD-complete. We further show that finding an
                 approximate equilibrium with better approximation
                 guarantees is even harder: NP-complete.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Hassidim:2016:LCM,
  author =       "Avinatan Hassidim and Yishay Mansour and Shai Vardi",
  title =        "Local Computation Mechanism Design",
  journal =      j-TEAC,
  volume =       "4",
  number =       "4",
  pages =        "21:1--21:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2956584",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Mon Aug 29 06:37:22 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We introduce the notion of local computation mechanism
                 design -designing game-theoretic mechanisms that run in
                 polylogarithmic time and space. Local computation
                 mechanisms reply to each query in polylogarithmic time
                 and space, and the replies to different queries are
                 consistent with the same global feasible solution. When
                 the mechanism employs payments, the computation of the
                 payments is also done in polylogarithmic time and
                 space. Furthermore, the mechanism needs to maintain
                 incentive compatibility with respect to the allocation
                 and payments. We present local computation mechanisms
                 for two classical game-theoretical problems: stable
                 matching and job scheduling. For stable matching, some
                 of our techniques may have implications to the global
                 (non-LCA (Local Computation Algorithm)) setting.
                 Specifically, we show that when the men's preference
                 lists are bounded, we can achieve an arbitrarily good
                 approximation to the stable matching within a fixed
                 number of iterations of the Gale--Shapley algorithm.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Ghosh:2016:OCD,
  author =       "Arpita Ghosh and Robert Kleinberg",
  title =        "Optimal Contest Design for Simple Agents",
  journal =      j-TEAC,
  volume =       "4",
  number =       "4",
  pages =        "22:1--22:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2930955",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Mon Aug 29 06:37:22 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Incentives are more likely to elicit desired outcomes
                 when they are designed based on accurate models of
                 agents' strategic behavior. A growing literature,
                 however, suggests that people do not quite behave like
                 standard economic agents in a variety of environments,
                 both online and offline. What consequences might such
                 differences have for the optimal design of mechanisms
                 in these environments? In this article, we explore this
                 question in the context of optimal contest design for
                 simple agents-agents who strategically reason about
                 whether or not to participate in a system, but not
                 about the input they provide to it. Specifically,
                 consider a contest where n potential contestants with
                 types $ (q_i, c^i) $ each choose between participating
                 and producing a submission of quality q$^i$ at cost
                 c$^i$, versus not participating at all to maximize
                 their utilities. How should a principal distribute a
                 total prize V among the n ranks to maximize some
                 increasing function of the qualities of elicited
                 submissions in a contest with such simple agents? We
                 first solve the optimal contest design problem for
                 settings where agents have homogeneous participation
                 costs $ c^i = c$. Here, the contest that maximizes
                 every increasing function of the elicited contributions
                 is always a simple contest, awarding equal prizes of $
                 V / j^*$ each to the top $ j^*= V / c - \Theta (\sqrt {
                 V / (c \ln (V / c))})$ contestants. This is in contrast
                 to the optimal contest structure in comparable models
                 with strategic effort choices, where the optimal
                 contest is either a winner-take-all contest or awards
                 possibly unequal prizes, depending on the curvature of
                 agents' effort cost functions. We next address the
                 general case with heterogeneous costs where agents'
                 types $ (q^i, c^i)$ are inherently two dimensional,
                 significantly complicating equilibrium analysis. With
                 heterogeneous costs, the optimal contest depends on the
                 objective being maximized: our main result here is that
                 the winner-take-all contest is a 3-approximation of the
                 optimal contest when the principal's objective is to
                 maximize the quality of the best elicited contribution.
                 The proof of this result hinges around a
                 ``subequilibrium'' lemma establishing a stochastic
                 dominance relation between the distribution of
                 qualities elicited in an equilibrium and a
                 subequilibrium --- a strategy profile that is a best
                 response for all agents who choose to participate in
                 that strategy profile; this relation between equilibria
                 and subequilibria may be of more general interest.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Fudenberg:2016:RRR,
  author =       "Drew Fudenberg and Alexander Peysakhovich",
  title =        "Recency, Records, and Recaps: Learning and
                 Nonequilibrium Behavior in a Simple Decision Problem",
  journal =      j-TEAC,
  volume =       "4",
  number =       "4",
  pages =        "23:1--23:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2956581",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Mon Aug 29 06:37:22 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Nash equilibrium takes optimization as a primitive,
                 but suboptimal behavior can persist in simple
                 stochastic decision problems. This has motivated the
                 development of other equilibrium concepts such as
                 cursed equilibrium and behavioral equilibrium. We
                 experimentally study a simple adverse selection (or
                 ``lemons'') problem and find that learning models that
                 heavily discount past information (i.e., display
                 recency bias) explain patterns of behavior better than
                 Nash, cursed, or behavioral equilibrium. Providing
                 counterfactual information or a record of past outcomes
                 does little to aid convergence to optimal strategies,
                 but providing sample averages (``recaps'') gets
                 individuals most of the way to optimality. Thus,
                 recency effects are not solely due to limited memory
                 but stem from some other form of cognitive constraints.
                 Our results show the importance of going beyond static
                 optimization and incorporating features of human
                 learning into economic models used in both
                 understanding phenomena and designing market
                 institutions.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Goldberg:2016:BQC,
  author =       "Paul W. Goldberg and Aaron Roth",
  title =        "Bounds for the Query Complexity of Approximate
                 Equilibria",
  journal =      j-TEAC,
  volume =       "4",
  number =       "4",
  pages =        "24:1--24:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2956582",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Mon Aug 29 06:37:22 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We analyze the number of payoff queries needed to
                 compute approximate equilibria of multi-player games.
                 We find that query complexity is an effective tool for
                 distinguishing the computational difficulty of
                 alternative solution concepts, and we develop new
                 techniques for upper- and lower bounding the query
                 complexity. For binary-choice games, we show
                 logarithmic upper and lower bounds on the query
                 complexity of approximate correlated equilibrium. For
                 well-supported approximate correlated equilibrium (a
                 restriction where a player's behavior must always be
                 approximately optimal, in the worst case over draws
                 from the distribution) we show a linear lower bound,
                 thus separating the query complexity of well supported
                 approximate correlated equilibrium from the standard
                 notion of approximate correlated equilibrium. Finally,
                 we give a query-efficient reduction from the problem of
                 computing an approximate well-supported Nash
                 equilibrium to the problem of verifying a well
                 supported Nash equilibrium, where the additional query
                 overhead is proportional to the description length of
                 the game. This gives a polynomial-query algorithm for
                 computing well supported approximate Nash equilibria
                 (and hence correlated equilibria) in concisely
                 represented games. We identify a class of games (which
                 includes congestion games) in which the reduction can
                 be made not only query efficient, but also
                 computationally efficient.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Fearnley:2016:FAN,
  author =       "John Fearnley and Rahul Savani",
  title =        "Finding Approximate {Nash} Equilibria of Bimatrix
                 Games via Payoff Queries",
  journal =      j-TEAC,
  volume =       "4",
  number =       "4",
  pages =        "25:1--25:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "http://dx.doi.org/10.1145/2956579",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "1539-9087",
  bibdate =      "Mon Aug 29 06:37:22 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the deterministic and randomized query
                 complexity of finding approximate equilibria in a $ k
                 \times k $ bimatrix game. We show that the
                 deterministic query complexity of finding an \epsilon
                 -Nash equilibrium when $ \epsilon < 1 / 2 $ is $ \Omega
                 (k^2) $, even in zero-one constant-sum games. In
                 combination with previous results [Fearnley et al.
                 2013], this provides a complete characterization of the
                 deterministic query complexity of approximate Nash
                 equilibria. We also study randomized querying
                 algorithms. We give a randomized algorithm for finding
                 a $ (3 - \sqrt {5 / 2 + \epsilon })$-Nash equilibrium
                 using $ O(k \log k / \epsilon^2)$ payoff queries, which
                 shows that the $ 1 / 2$ barrier for deterministic
                 algorithms can be broken by randomization. For
                 well-supported Nash equilibria (WSNE), we first give a
                 randomized algorithm for finding an $ \epsilon $-WSNE
                 of a zero-sum bimatrix game using $ O (k \log k /
                 \epsilon^4)$ payoff queries, and we then use this to
                 obtain a randomized algorithm for finding a $ (2 / 3 +
                 \epsilon)$-WSNE in a general bimatrix game using $ O(k
                 \log k / \epsilon^4)$ payoff queries. Finally, we
                 initiate the study of lower bounds against randomized
                 algorithms in the context of bimatrix games, by showing
                 that randomized algorithms require $ \Omega (k^2)$
                 payoff queries in order to find an $ \epsilon $-Nash
                 equilibrium with $ \epsilon < 1 / 4 k$, even in
                 zero-one constant-sum games. In particular, this rules
                 out query-efficient randomized algorithms for finding
                 exact Nash equilibria.",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}