Valid HTML 4.0! Valid CSS!
%%% -*-BibTeX-*-
%%% ====================================================================
%%%  BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.21",
%%%     date            = "03 October 2020",
%%%     time            = "07:44:49 MDT",
%%%     filename        = "teac.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "http://www.math.utah.edu/~beebe",
%%%     checksum        = "21226 7598 43222 394552",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                        beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "bibliography; BibTeX; ACM Transactions on
%%%                        Economics and Computation (TEAC)",
%%%     license         = "public domain",
%%%     supported       = "no",
%%%     docstring       = "This is a COMPLETE BibTeX bibliography for
%%%                        the journal ACM Transactions on Economics and
%%%                        Computation (no CODEN, ISSN 2167-8375
%%%                        (print), 2167-8383 (electronic)), for
%%%                        2013--date.
%%%
%%%                        Publication began with volume 1, number 1,
%%%                        in January 2013.  The journal appears
%%%                        quarterly, in January, May, September, and
%%%                        December.
%%%
%%%                        The journal has a World-Wide Web site at:
%%%
%%%                            http://teac.acm.org/
%%%
%%%                        Tables-of-contents of all issues are
%%%                        available at:
%%%
%%%                            http://dl.acm.org/citation.cfm?id=2542174
%%%
%%%                        Qualified subscribers can retrieve the full
%%%                        text of recent articles in PDF form.
%%%
%%%                        At version 1.21, the COMPLETE journal
%%%                        coverage looked like this:
%%%
%%%                             2013 (  21)    2016 (  27)    2019 (  17)
%%%                             2014 (  17)    2017 (  17)    2020 (  20)
%%%                             2015 (  32)    2018 (  21)
%%%
%%%                             Article:        172
%%%
%%%                             Total entries:  172
%%%
%%%                        Spelling has been verified with the UNIX
%%%                        spell and GNU ispell programs using the
%%%                        exception dictionary stored in the
%%%                        companion file with extension .sok.
%%%
%%%                        BibTeX citation tags are uniformly chosen
%%%                        as name:year:abbrev, where name is the
%%%                        family name of the first author or editor,
%%%                        year is a 4-digit number, and abbrev is a
%%%                        3-letter condensation of important title
%%%                        words. Citation tags were automatically
%%%                        generated by software developed for the
%%%                        BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, using ``bibsort -byvolume.''
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility.",
%%%  }
%%% ====================================================================
@Preamble{"\input bibnames.sty"}

%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|http://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:
@String{j-TEAC                  = "ACM Transactions on Economics and
                                  Computation"}

%%% ====================================================================
%%% Publisher abbreviations:
@String{pub-ACM                 = "ACM Press"}
@String{pub-ACM:adr             = "New York, NY 10036, USA"}

%%% ====================================================================
%%% Bibliography entries:
@Article{Conitzer:2013:ATE,
  author =       "Vincent Conitzer and R. Preston Mcafee",
  title =        "The {ACM Transactions on Economics and Computation}:
                 an introduction",
  journal =      j-TEAC,
  volume =       "1",
  number =       "1",
  pages =        "1:1--1:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2399187.2399188",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:51 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Gradwohl:2013:SRC,
  author =       "Ronen Gradwohl and Noam Livne and Alon Rosen",
  title =        "Sequential rationality in cryptographic protocols",
  journal =      j-TEAC,
  volume =       "1",
  number =       "1",
  pages =        "2:1--2:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2399187.2399189",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:51 MDT 2014",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Much of the literature on rational cryptography
                 focuses on analyzing the strategic properties of
                 cryptographic protocols. However, due to the presence
                 of computationally-bounded players and the asymptotic
                 nature of cryptographic security, a definition of
                 sequential rationality for this setting has thus far
                 eluded researchers. We propose a new framework for
                 overcoming these obstacles, and provide the first
                 definitions of computational solution concepts that
                 guarantee sequential rationality. We argue that natural
                 computational variants of subgame perfection are too
                 strong for cryptographic protocols. As an alternative,
                 we introduce a weakening called threat-free Nash
                 equilibrium that is more permissive but still
                 eliminates the undesirable ``empty threats'' of
                 nonsequential solution concepts. To demonstrate the
                 applicability of our framework, we revisit the problem
                 of implementing a mediator for correlated equilibria
                 [Dodis et al 2000], and propose a variant of their
                 protocol that is sequentially rational for a nontrivial
                 class of correlated equilibria. Our treatment provides
                 a better understanding of the conditions under which
                 mediators in a correlated equilibrium can be replaced
                 by a stable protocol.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Jain:2013:GTA,
  author =       "Shaili Jain and David C. Parkes",
  title =        "A game-theoretic analysis of the {ESP} game",
  journal =      j-TEAC,
  volume =       "1",
  number =       "1",
  pages =        "3:1--3:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2399187.2399190",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:51 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "``Games with a Purpose'' are interactive games that
                 users play because they are fun, with the added benefit
                 that the outcome of play is useful work. The ESP game,
                 developed by von Ahn and Dabbish [2004], is an example
                 of such a game devised to label images on the web.
                 Since labeling images is a hard problem for computer
                 vision algorithms and can be tedious and time-consuming
                 for humans, the ESP game provides humans with incentive
                 to do useful work by being enjoyable to play. We
                 present a simple game-theoretic model of the ESP game
                 and characterize the equilibrium behavior in our model.
                 Our equilibrium analysis supports the fact that users
                 appear to coordinate on low effort words. We provide an
                 alternate model of user preferences, modeling a change
                 that could be induced through a different scoring
                 method, and show that equilibrium behavior in this
                 model coordinates on high-effort words. We also give
                 sufficient conditions for coordinating on high-effort
                 words to be a Bayesian-Nash equilibrium. Our results
                 suggest the possibility of formal incentive design in
                 achieving desirable system-wide outcomes for the
                 purpose of human computation, complementing existing
                 considerations of robustness against cheating and human
                 factors.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Naroditskiy:2013:OPD,
  author =       "Victor Naroditskiy and Maria Polukarov and Nicholas R.
                 Jennings",
  title =        "Optimal payments in dominant-strategy mechanisms for
                 single-parameter domains",
  journal =      j-TEAC,
  volume =       "1",
  number =       "1",
  pages =        "4:1--4:??",
  month =        jan,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2399187.2399191",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:51 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study dominant-strategy mechanisms in allocation
                 domains where agents have one-dimensional types and
                 quasilinear utilities. Taking an allocation function as
                 an input, we present an algorithmic technique for
                 finding optimal payments in a class of mechanism design
                 problems, including utilitarian and egalitarian
                 allocation of homogeneous items with nondecreasing
                 marginal costs. Our results link optimality of payment
                 functions to a geometric condition involving
                 triangulations of polytopes. When this condition is
                 satisfied, we constructively show the existence of an
                 optimal payment function that is piecewise linear in
                 agent types.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Feldman:2013:ISI,
  author =       "Michal Feldman and Noam Nisan",
  title =        "Introduction to the {Special Issue on Algorithmic Game
                 Theory}",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "5:1--5:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2465769.2465770",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Blume:2013:NFP,
  author =       "Lawrence Blume and David Easley and Jon Kleinberg and
                 Robert Kleinberg and {\'E}va Tardos",
  title =        "Network Formation in the Presence of Contagious Risk",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "6:1--6:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2465769.2465771",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "There are a number of domains where agents must
                 collectively form a network in the face of the
                 following trade-off: each agent receives benefits from
                 the direct links it forms to others, but these links
                 expose it to the risk of being hit by a cascading
                 failure that might spread over multistep paths.
                 Financial contagion, epidemic disease, the exposure of
                 covert organizations to discovery, and electrical power
                 networks are all settings in which such issues have
                 been articulated. Here we formulate the problem in
                 terms of strategic network formation, and provide
                 asymptotically tight bounds on the welfare of both
                 optimal and stable networks. We find that socially
                 optimal networks are, in a precise sense, situated just
                 beyond a phase transition in the behavior of the
                 cascading failures, and that stable graphs lie slightly
                 further beyond this phase transition, at a point where
                 most of the available welfare has been lost. Our
                 analysis enables us to explore such issues as the
                 trade-offs between clustered and anonymous market
                 structures, and it exposes a fundamental sense in which
                 very small amounts of ``over-linking'' in networks with
                 contagious risk can have strong consequences for the
                 welfare of the participants.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Karlin:2013:SEM,
  author =       "Anna R. Karlin and C. Thach Nguyen and Yuval Peres",
  title =        "Selling in Exclusive Markets: Some Observations on
                 Prior-Free Mechanism Design",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "7:1--7:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2465769.2465772",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider prior-free benchmarks in non-matroid
                 settings. In particular, we show that a very desirable
                 benchmark proposed by Hartline and Roughgarden is too
                 strong, in the sense that no truthful mechanism can
                 compete with it even in a very simple non-matroid
                 setting where there are two exclusive markets and the
                 seller can only sell to agents in one of them. On the
                 other hand, we show that there is a mechanism that
                 competes with a symmetrized version of this benchmark.
                 We further investigate the more traditional best fixed
                 price profit benchmark and show that there are
                 mechanisms that compete with it in any downward-closed
                 settings.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Ha:2013:MDC,
  author =       "Bach Q. Ha and Jason D. Hartline",
  title =        "Mechanism Design via Consensus Estimates, Cross
                 Checking, and Profit Extraction",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "8:1--8:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2465769.2465773",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "There is only one technique for prior-free optimal
                 mechanism design that generalizes beyond the
                 structurally benevolent setting of digital goods. This
                 technique uses random sampling to estimate the
                 distribution of agent values and then employs the
                 Bayesian optimal mechanism for this estimated
                 distribution on the remaining players. Though quite
                 general, even for digital goods, this random sampling
                 auction has a complicated analysis and is known to be
                 suboptimal. To overcome these issues we generalize the
                 consensus and profit extraction techniques from
                 Goldberg and Hartline [2003] to structurally rich
                 environments that include, for example, single-minded
                 combinatorial auctions.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Goldberg:2013:CHM,
  author =       "Paul W. Goldberg and Christos H. Papadimitriou and
                 Rahul Savani",
  title =        "The Complexity of the Homotopy Method, Equilibrium
                 Selection, and {Lemke--Howson} Solutions",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "9:1--9:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2465769.2465774",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We show that the widely used homotopy method for
                 solving fixpoint problems, as well as the
                 Harsanyi-Selten equilibrium selection process for
                 games, are PSPACE-complete to implement. Extending our
                 result for the Harsanyi-Selten process, we show that
                 several other homotopy-based algorithms for finding
                 equilibria of games are also PSPACE-complete to
                 implement. A further application of our techniques
                 yields the result that it is PSPACE-complete to compute
                 any of the equilibria that could be found via the
                 classical Lemke--Howson algorithm, a
                 complexity-theoretic strengthening of the result in
                 Savani and von Stengel [2006]. These results show that
                 our techniques can be widely applied and suggest that
                 the PSPACE-completeness of implementing homotopy
                 methods is a general principle.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Koutsoupias:2013:CRO,
  author =       "Elias Koutsoupias and George Pierrakos",
  title =        "On the Competitive Ratio of Online Sampling Auctions",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "10:1--10:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2465769.2465775",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study online profit-maximizing auctions for digital
                 goods with adversarial bid selection and uniformly
                 random arrivals; in this sense, our model lies at the
                 intersection of prior-free mechanism design and
                 secretary problems. Our goal is to design auctions that
                 are constant competitive with F$^{(2)}$. We give a
                 generic reduction that transforms any offline auction
                 to an online one with only a loss of a factor of 2 in
                 the competitive ratio. We also present some natural
                 auctions, both randomized and deterministic, and study
                 their competitive ratio. Our analysis reveals some
                 interesting connections of one of these auctions with
                 RSOP.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Candogan:2013:NPG,
  author =       "Ozan Candogan and Asuman Ozdaglar and Pablo A.
                 Parrilo",
  title =        "Near-Potential Games: Geometry and Dynamics",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "11:1--11:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2465769.2465776",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Potential games are a special class of games for which
                 many adaptive user dynamics converge to a Nash
                 equilibrium. In this article, we study properties of
                 near-potential games, that is, games that are close in
                 terms of payoffs to potential games, and show that such
                 games admit similar limiting dynamics. We first focus
                 on finite games in strategic form. We introduce a
                 distance notion in the space of games and study the
                 geometry of potential games and sets of games that are
                 equivalent, with respect to various equivalence
                 relations, to potential games. We discuss how, given an
                 arbitrary game, one can find a nearby game in these
                 sets. We then study dynamics in near-potential games by
                 focusing on continuous-time perturbed best response
                 dynamics. We characterize the limiting behavior of this
                 dynamics in terms of the upper contour sets of the
                 potential function of a close potential game and
                 approximate equilibria of the game. Exploiting
                 structural properties of approximate equilibrium sets,
                 we strengthen our result and show that for games that
                 are sufficiently close to a potential game, the
                 sequence of mixed strategies generated by this dynamics
                 converges to a small neighborhood of equilibria whose
                 size is a function of the distance from the set of
                 potential games. In the second part of the article, we
                 study continuous games and show that our approach for
                 characterizing the limiting sets in near-potential
                 games extends to continuous games. In particular, we
                 consider continuous-time best response dynamics and a
                 variant of it (where players update their strategies
                 only if there is at least $ \epsilon $ utility
                 improvement opportunity) in near-potential games where
                 the strategy sets are compact and convex subsets of a
                 Euclidean space. We show that these update rules
                 converge to a neighborhood of equilibria (or the
                 maximizer of the potential function), provided that the
                 potential function of the nearby potential game
                 satisfies some structural properties. Our results
                 generalize the known convergence results for potential
                 games to near-potential games.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Abernethy:2013:EMM,
  author =       "Jacob Abernethy and Yiling Chen and Jennifer Wortman
                 Vaughan",
  title =        "Efficient Market Making via Convex Optimization, and a
                 Connection to Online Learning",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "12:1--12:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2465769.2465777",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We propose a general framework for the design of
                 securities markets over combinatorial or infinite state
                 or outcome spaces. The framework enables the design of
                 computationally efficient markets tailored to an
                 arbitrary, yet relatively small, space of securities
                 with bounded payoff. We prove that any market
                 satisfying a set of intuitive conditions must price
                 securities via a convex cost function, which is
                 constructed via conjugate duality. Rather than deal
                 with an exponentially large or infinite outcome space
                 directly, our framework only requires optimization over
                 a convex hull. By reducing the problem of automated
                 market making to convex optimization, where many
                 efficient algorithms exist, we arrive at a range of new
                 polynomial-time pricing mechanisms for various
                 problems. We demonstrate the advantages of this
                 framework with the design of some particular markets.
                 We also show that by relaxing the convex hull we can
                 gain computational tractability without compromising
                 the market institution's bounded budget. Although our
                 framework was designed with the goal of deriving
                 efficient automated market makers for markets with very
                 large outcome spaces, this framework also provides new
                 insights into the relationship between market design
                 and machine learning, and into the complete market
                 setting. Using our framework, we illustrate the
                 mathematical parallels between cost-function-based
                 markets and online learning and establish a
                 correspondence between cost-function-based markets and
                 market scoring rules for complete markets.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Haghpanah:2013:OAP,
  author =       "Nima Haghpanah and Nicole Immorlica and Vahab Mirrokni
                 and Kamesh Munagala",
  title =        "Optimal Auctions with Positive Network Externalities",
  journal =      j-TEAC,
  volume =       "1",
  number =       "2",
  pages =        "13:1--13:??",
  month =        may,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2465769.2465778",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:52 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the problem of designing auctions in
                 social networks for goods that exhibit single-parameter
                 submodular network externalities in which a bidder's
                 value for an outcome is a fixed private type times a
                 known submodular function of the allocation of his
                 friends. Externalities pose many issues that are hard
                 to address with traditional techniques; our work shows
                 how to resolve these issues in a specific setting of
                 particular interest. We operate in a Bayesian
                 environment and so assume private values are drawn
                 according to known distributions. We prove that the
                 optimal auction is NP-hard to approximate pointwise,
                 and APX-hard on average. Thus we instead design
                 auctions whose revenue approximates that of the optimal
                 auction. Our main result considers step-function
                 externalities in which a bidder's value for an outcome
                 is either zero, or equal to his private type if at
                 least one friend has the good. For these settings, we
                 provide a e / e + 1-approximation. We also give a
                 0.25-approximation auction for general single-parameter
                 submodular network externalities, and discuss
                 optimizing over a class of simple pricing strategies.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Othman:2013:PLS,
  author =       "Abraham Othman and David M. Pennock and Daniel M.
                 Reeves and Tuomas Sandholm",
  title =        "A Practical Liquidity-Sensitive Automated Market
                 Maker",
  journal =      j-TEAC,
  volume =       "1",
  number =       "3",
  pages =        "14:1--14:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2509413.2509414",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:54 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Automated market makers are algorithmic agents that
                 enable participation and information elicitation in
                 electronic markets. They have been widely and
                 successfully applied in artificial-money settings, like
                 some Internet prediction markets. Automated market
                 makers from the literature suffer from two problems
                 that contribute to their impracticality and impair
                 their use beyond artificial-money settings: first, they
                 are unable to adapt to liquidity, so that trades cause
                 prices to move the same amount in both heavily and
                 lightly traded markets, and second, in typical
                 circumstances, they run at a deficit. In this article,
                 we construct a market maker that is both sensitive to
                 liquidity and can run at a profit. Our market maker has
                 bounded loss for any initial level of liquidity and, as
                 the initial level of liquidity approaches zero,
                 worst-case loss approaches zero. For any level of
                 initial liquidity we can establish a boundary in market
                 state space such that, if the market terminates within
                 that boundary, the market maker books a profit
                 regardless of the realized outcome.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Balcan:2013:PU,
  author =       "Maria-Florina Balcan and Avrim Blum and Yishay
                 Mansour",
  title =        "The Price of Uncertainty",
  journal =      j-TEAC,
  volume =       "1",
  number =       "3",
  pages =        "15:1--15:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2509413.2509415",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:54 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In this work, we study the degree to which small
                 fluctuations in costs in well-studied potential games
                 can impact the result of natural best-response and
                 improved-response dynamics. We call this the Price of
                 Uncertainty and study it in a wide variety of potential
                 games including fair cost-sharing games, set-cover
                 games, routing games, and job-scheduling games. We show
                 that in certain cases, even extremely small
                 fluctuations can have the ability to cause these
                 dynamics to spin out of control and move to states of
                 much higher social cost, whereas in other cases these
                 dynamics are much more stable even to large degrees of
                 fluctuation. We also consider the resilience of these
                 dynamics to a small number of Byzantine players about
                 which no assumptions are made. We show again a contrast
                 between different games. In certain cases (e.g., fair
                 cost-sharing, set-cover, job-scheduling) even a single
                 Byzantine player can cause best-response dynamics to
                 transition from low-cost states to states of
                 substantially higher cost, whereas in others (e.g., the
                 class of $ \beta $-nice games, which includes routing,
                 market-sharing and many others) these dynamics are much
                 more resilient. Overall, our work can be viewed as
                 analyzing the inherent resilience or safety of games to
                 different kinds of imperfections in player behavior,
                 player information, or in modeling assumptions made.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Ben-Yehuda:2013:DAE,
  author =       "Orna Agmon Ben-Yehuda and Muli Ben-Yehuda and Assaf
                 Schuster and Dan Tsafrir",
  title =        "Deconstructing {Amazon EC2} Spot Instance Pricing",
  journal =      j-TEAC,
  volume =       "1",
  number =       "3",
  pages =        "16:1--16:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2509413.2509416",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:54 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Cloud providers possessing large quantities of spare
                 capacity must either incentivize clients to purchase it
                 or suffer losses. Amazon is the first cloud provider to
                 address this challenge, by allowing clients to bid on
                 spare capacity and by granting resources to bidders
                 while their bids exceed a periodically changing spot
                 price. Amazon publicizes the spot price but does not
                 disclose how it is determined. By analyzing the spot
                 price histories of Amazon's EC2 cloud, we reverse
                 engineer how prices are set and construct a model that
                 generates prices consistent with existing price traces.
                 Our findings suggest that usually prices are not
                 market-driven, as sometimes previously assumed. Rather,
                 they are likely to be generated most of the time at
                 random from within a tight price range via a dynamic
                 hidden reserve price mechanism. Our model could help
                 clients make informed bids, cloud providers design
                 profitable systems, and researchers design pricing
                 algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Sarne:2013:CSM,
  author =       "David Sarne",
  title =        "Competitive Shopbots-Mediated Markets",
  journal =      j-TEAC,
  volume =       "1",
  number =       "3",
  pages =        "17:1--17:??",
  month =        sep,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2509413.2509417",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:54 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "This article considers markets mediated by autonomous
                 self-interested comparison-shopping agents. As in
                 today's markets, the agents do not charge buyers for
                 their services but rather benefit from payments
                 obtained from sellers upon the execution of a
                 transaction. The agents aim at maximizing their
                 expected benefit, taking into consideration the cost
                 incurred by the search and competition dynamics that
                 arise in the multi-agent setting. This article provides
                 a comprehensive analysis of such models, based on
                 search theory principles. The analysis results in a
                 characterization of the buyers' and agents' search
                 strategies in equilibrium. The main result of this
                 article is that the use of self-interested
                 comparison-shopping agents can result in a beneficial
                 equilibrium, where both buyers and sellers benefit, in
                 comparison to the case where buyers control the
                 comparison-shopping agent, and the comparison-shopping
                 agents necessarily do not lose. This, despite the fact
                 that the service is offered for free to buyers and its
                 cost is essentially covered by sellers. The analysis
                 generalizes to any setting where buyers can use
                 self-interested agents capable of effectively
                 performing the search (e.g., evaluating opportunities)
                 on their behalf.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Procaccia:2013:AMD,
  author =       "Ariel D. Procaccia and Moshe Tennenholtz",
  title =        "Approximate Mechanism Design without Money",
  journal =      j-TEAC,
  volume =       "1",
  number =       "4",
  pages =        "18:1--18:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542174.2542175",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:56 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "The literature on algorithmic mechanism design is
                 mostly concerned with game-theoretic versions of
                 optimization problems to which standard economic
                 money-based mechanisms cannot be applied efficiently.
                 Recent years have seen the design of various truthful
                 approximation mechanisms that rely on payments. In this
                 article, we advocate the reconsideration of highly
                 structured optimization problems in the context of
                 mechanism design. We explicitly argue for the first
                 time that, in such domains, approximation can be
                 leveraged to obtain truthfulness without resorting to
                 payments. This stands in contrast to previous work
                 where payments are almost ubiquitous and (more often
                 than not) approximation is a necessary evil that is
                 required to circumvent computational complexity. We
                 present a case study in approximate mechanism design
                 without money. In our basic setting, agents are located
                 on the real line and the mechanism must select the
                 location of a public facility; the cost of an agent is
                 its distance to the facility. We establish tight upper
                 and lower bounds for the approximation ratio given by
                 strategy-proof mechanisms without payments, with
                 respect to both deterministic and randomized
                 mechanisms, under two objective functions: the social
                 cost and the maximum cost. We then extend our results
                 in two natural directions: a domain where two
                 facilities must be located and a domain where each
                 agent controls multiple locations.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Yildiz:2013:BOD,
  author =       "Ercan Yildiz and Asuman Ozdaglar and Daron Acemoglu
                 and Amin Saberi and Anna Scaglione",
  title =        "Binary Opinion Dynamics with Stubborn Agents",
  journal =      j-TEAC,
  volume =       "1",
  number =       "4",
  pages =        "19:1--19:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2538508",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:56 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study binary opinion dynamics in a social network
                 with stubborn agents who influence others but do not
                 change their opinions. We focus on a generalization of
                 the classical voter model by introducing nodes
                 (stubborn agents) that have a fixed state. We show that
                 the presence of stubborn agents with opposing opinions
                 precludes convergence to consensus; instead, opinions
                 converge in distribution with disagreement and
                 fluctuations. In addition to the first moment of this
                 distribution typically studied in the literature, we
                 study the behavior of the second moment in terms of
                 network properties and the opinions and locations of
                 stubborn agents. We also study the problem of optimal
                 placement of stubborn agents where the location of a
                 fixed number of stubborn agents is chosen to have the
                 maximum impact on the long-run expected opinions of
                 agents.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Mossel:2013:MCT,
  author =       "Elchanan Mossel and Omer Tamuz",
  title =        "Making Consensus Tractable",
  journal =      j-TEAC,
  volume =       "1",
  number =       "4",
  pages =        "20:1--20:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542174.2542176",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:56 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study a model of consensus decision making in which
                 a finite group of Bayesian agents has to choose between
                 one of two courses of action. Each member of the group
                 has a private and independent signal at his or her
                 disposal, giving some indication as to which action is
                 optimal. To come to a common decision, the participants
                 perform repeated rounds of voting. In each round, each
                 agent casts a vote in favor of one of the two courses
                 of action, reflecting his or her current belief, and
                 observes the votes of the rest. We provide an efficient
                 algorithm for the calculation the agents have to
                 perform and show that consensus is always reached and
                 that the probability of reaching a wrong decision
                 decays exponentially with the number of agents.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Hoefer:2013:AAC,
  author =       "Martin Hoefer and Alexander Skopalik",
  title =        "Altruism in Atomic Congestion Games",
  journal =      j-TEAC,
  volume =       "1",
  number =       "4",
  pages =        "21:1--21:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2542174.2542177",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 14 06:10:56 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "This article studies the effects of altruism, a
                 phenomenon widely observed in practice, in the model of
                 atomic congestion games. Altruistic behavior is modeled
                 by a linear trade-off between selfish and social
                 objectives. Our model can be embedded in the framework
                 of congestion games with player-specific latency
                 functions. Stable states are the pure Nash equilibria
                 of these games, and we examine their existence and the
                 convergence of sequential best-response dynamics. In
                 general, pure Nash equilibria are often absent, and
                 existence is NP-hard to decide. Perhaps surprisingly,
                 if all delay functions are affine, the games remain
                 potential games, even when agents are arbitrarily
                 altruistic. The construction underlying this result can
                 be extended to a class of general potential games and
                 social cost functions, and we study a number of
                 prominent examples. These results give important
                 insights into the robustness of multi-agent systems
                 with heterogeneous altruistic incentives. Furthermore,
                 they yield a general technique to prove that
                 stabilization is robust, even with partly altruistic
                 agents, which is of independent interest. In addition
                 to these results for uncoordinated dynamics, we
                 consider a scenario with a central altruistic
                 institution that can set incentives for the agents. We
                 provide constructive and hardness results for finding
                 the minimum number of altruists to stabilize an optimal
                 congestion profile and more general mechanisms to
                 incentivize agents to adopt favorable behavior. These
                 results are closely related to Stackelberg routing and
                 answer open questions raised recently in the
                 literature.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Polevoy:2014:SCS,
  author =       "Gleb Polevoy and Rann Smorodinsky and Moshe
                 Tennenholtz",
  title =        "Signaling Competition and Social Welfare",
  journal =      j-TEAC,
  volume =       "2",
  number =       "1",
  pages =        "1:1--1:??",
  month =        mar,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2560766",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 21 18:00:43 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider an environment where sellers compete over
                 buyers. All sellers are a-priori identical and
                 strategically signal buyers about the product they
                 sell. In a setting motivated by online advertising in
                 display ad exchanges, where firms use second price
                 auctions, a firm's strategy is a decision about its
                 signaling scheme for a stream of goods (e.g., user
                 impressions), and a buyer's strategy is a selection
                 among the firms. In this setting, a single seller will
                 typically provide partial information, and
                 consequently, a product may be allocated inefficiently.
                 Intuitively, competition among sellers may induce
                 sellers to provide more information in order to attract
                 buyers and thus increase efficiency. Surprisingly, we
                 show that such a competition among firms may yield
                 significant loss in consumers' social welfare with
                 respect to the monopolistic setting. Although we also
                 show that in some cases, the competitive setting yields
                 gain in social welfare, we provide a tight bound on
                 that gain, which is shown to be small with respect to
                 the preceding possible loss. Our model is tightly
                 connected with the literature on bundling in
                 auctions.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Albers:2014:NEN,
  author =       "Susanne Albers and Stefan Eilts and Eyal Even-Dar and
                 Yishay Mansour and Liam Roditty",
  title =        "On {Nash} Equilibria for a Network Creation Game",
  journal =      j-TEAC,
  volume =       "2",
  number =       "1",
  pages =        "2:1--2:??",
  month =        mar,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2560767",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 21 18:00:43 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study a basic network creation game proposed by
                 Fabrikant et al. [2003]. In this game, each player
                 (vertex) can create links (edges) to other players at a
                 cost of \alpha per edge. The goal of every player is to
                 minimize the sum consisting of (a) the cost of the
                 links he has created and (b) the sum of the distances
                 to all other players. Fabrikant et al. conjectured that
                 there exists a constant $A$ such that, for any $\alpha
                 > A$, all nontransient Nash equilibria graphs are
                 trees. They showed that if a Nash equilibrium is a
                 tree, the price of anarchy is constant. In this
                 article, we disprove the tree conjecture. More
                 precisely, we show that for any positive integer $n_0$,
                 there exists a graph built by $n \geq n_0$ players
                 which contains cycles and forms a nontransient Nash
                 equilibrium, for any $\alpha$ with $1 < \alpha \leq
                 \sqrt n / 2$. Our construction makes use of some
                 interesting results on finite affine planes. On the
                 other hand, we show that, for $\alpha \geq 12 n \lceil
                 log n \rceil$, every Nash equilibrium forms a
                 tree. Without relying on the tree conjecture, Fabrikant
                 et al. proved an upper bound on the price of anarchy of
                 $O(\sqrt{\alpha})$, where $\alpha \in [2, n^2]$. We
                 improve this bound. Specifically, we derive a constant
                 upper bound for $\alpha \in O(\sqrt{n})$ and for
                 $\alpha \geq 12 n \lceil log n \rceil$. For the
                 intermediate values, we derive an improved bound of
                 $O(1 + (\min\{\alpha^2 / n, n^2 / \alpha \})^{1 /
                 3})$. Additionally, we develop characterizations of
                 Nash equilibria and extend our results to a weighted
                 network creation game as well as to scenarios with cost
                 sharing.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Smeulders:2014:GFM,
  author =       "Bart Smeulders and Frits C. R. Spieksma and Laurens
                 Cherchye and Bram {De Rock}",
  title =        "Goodness-of-Fit Measures for Revealed Preference
                 Tests: Complexity Results and Algorithms",
  journal =      j-TEAC,
  volume =       "2",
  number =       "1",
  pages =        "3:1--3:??",
  month =        mar,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2560793",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 21 18:00:43 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We provide results on the computational complexity of
                 goodness-of-fit measures (i.e., Afriat's efficiency
                 index, Varian's efficiency vector-index, and the
                 Houtman-Maks index) associated with several revealed
                 preference axioms (i.e., WARP, SARP, GARP, and
                 HARP). These results explain the computational
                 difficulties that have been observed in literature when
                 computing these indices. Our NP-hardness results are
                 obtained by reductions from the independent set
                 problem. We also show that this reduction can be used
                 to prove that no approximation algorithm achieving a
                 ratio of $O(n^{1 - \delta})$, $\delta;> 0$ exists for
                 Varian's index, nor for Houtman-Maks' index (unless P =
                 NP). Finally, we give an exact polynomial-time
                 algorithm for finding Afriat's efficiency index.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Zhang:2014:RPO,
  author =       "Yu Zhang and Jaeok Park and Mihaela van der Schaar",
  title =        "Rating Protocols in Online Communities",
  journal =      j-TEAC,
  volume =       "2",
  number =       "1",
  pages =        "4:1--4:??",
  month =        mar,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2560794",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 21 18:00:43 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Sustaining cooperation among self-interested agents is
                 critical for the proliferation of emerging online
                 communities. Providing incentives for cooperation in
                 online communities is particularly challenging because
                 of their unique features: a large population of
                 anonymous agents having asymmetric interests and
                 dynamically joining and leaving the community,
                 operation errors, and agents trying to whitewash when
                 they have a low standing in the community. In this
                 article, we take these features into consideration and
                 propose a framework for designing and analyzing a class
                 of incentive schemes based on rating protocols, which
                 consist of a rating scheme and a recommended strategy.
                 We first define the concept of sustainable rating
                 protocols under which every agent has the incentive to
                 follow the recommended strategy given the deployed
                 rating scheme. We then formulate the problem of
                 designing an optimal rating protocol, which selects the
                 protocol that maximizes the overall social welfare
                 among all sustainable rating protocols. Using the
                 proposed framework, we study the structure of optimal
                 rating protocols and explore the impact of one-sided
                 rating, punishment lengths, and whitewashing on optimal
                 rating protocols. Our results show that optimal rating
                 protocols are capable of sustaining cooperation, with
                 the amount of cooperation varying depending on the
                 community characteristics.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Emek:2014:SSR,
  author =       "Yuval Emek and Michal Feldman and Iftah Gamzu and
                 Renato PaesLeme and Moshe Tennenholtz",
  title =        "Signaling Schemes for Revenue Maximization",
  journal =      j-TEAC,
  volume =       "2",
  number =       "2",
  pages =        "5:1--5:??",
  month =        jun,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2594564",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Jun 9 16:42:02 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Signaling is an important topic in the study of
                 asymmetric information in economic settings. In
                 particular, the transparency of information available
                 to a seller in an auction setting is a question of
                 major interest. We introduce the study of signaling
                 when conducting a second price auction of a
                 probabilistic good whose actual instantiation is known
                 to the auctioneer but not to the bidders. This
                 framework can be used to model impressions selling in
                 display advertising. We establish several results
                 within this framework. First, we study the problem of
                 computing a signaling scheme that maximizes the
                 auctioneer's revenue in a Bayesian setting. We show
                 that this problem is polynomially solvable for some
                 interesting special cases, but computationally hard in
                 general. Second, we establish a tight bound on the
                 minimum number of signals required to implement an
                 optimal signaling scheme. Finally, we show that at
                 least half of the maximum social welfare can be
                 preserved within such a scheme.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Chen:2014:EPR,
  author =       "Yiling Chen and Ian A. Kash and Michael Ruberry and
                 Victor Shnayder",
  title =        "Eliciting Predictions and Recommendations for Decision
                 Making",
  journal =      j-TEAC,
  volume =       "2",
  number =       "2",
  pages =        "6:1--6:??",
  month =        jun,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2556271",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Jun 9 16:42:02 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "When making a decision, a decision maker selects one
                 of several possible actions and hopes to achieve a
                 desirable outcome. To make a better decision, the
                 decision maker often asks experts for advice. In this
                 article, we consider two methods of acquiring advice
                 for decision making. We begin with a method where one
                 or more experts predict the effect of each action and
                 the decision maker then selects an action based on the
                 predictions. We characterize strictly proper decision
                 making, where experts have an incentive to accurately
                 reveal their beliefs about the outcome of each action.
                 However, strictly proper decision making requires the
                 decision maker use a completely mixed strategy to
                 choose an action. To address this limitation, we
                 consider a second method where the decision maker asks
                 a single expert to recommend an action. We show that it
                 is possible to elicit the decision maker's most
                 preferred action for a broad class of preferences of
                 the decision maker, including when the decision maker
                 is an expected value maximizer.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Rozen:2014:EPE,
  author =       "Rakefet Rozen and Rann Smorodinsky",
  title =        "Ex-Post Equilibrium and {VCG} Mechanisms",
  journal =      j-TEAC,
  volume =       "2",
  number =       "2",
  pages =        "7:1--7:??",
  month =        jun,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2594565",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Jun 9 16:42:02 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Consider an abstract social choice setting with
                 incomplete information, where the number of
                 alternatives is large. Albeit natural, implementing VCG
                 mechanisms is infeasible due to the prohibitive
                 communication constraints. However, if players restrict
                 attention to a subset of the alternatives, feasibility
                 may be recovered. This article characterizes the class
                 of subsets that induce an ex-post equilibrium in the
                 original game. It turns out that a crucial condition
                 for such subsets to exist is the availability of a
                 type-independent optimal social alternative for each
                 player. We further analyze the welfare implications of
                 these restrictions. This work follows that of Holzman
                 et al. [2004] and Holzman and Monderer [2004] where
                 similar analysis is done for combinatorial auctions.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Chen:2014:PAS,
  author =       "Xujin Chen and Benjamin Doerr and Carola Doerr and
                 Xiaodong Hu and Weidong Ma and Rob van Stee",
  title =        "The Price of Anarchy for Selfish Ring Routing is Two",
  journal =      j-TEAC,
  volume =       "2",
  number =       "2",
  pages =        "8:1--8:??",
  month =        jun,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2548545",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Jun 9 16:42:02 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We analyze the network congestion game with atomic
                 players, asymmetric strategies, and the maximum latency
                 among all players as social cost. This important social
                 cost function is much less understood than the average
                 latency. We show that the price of anarchy is at most
                 two, when the network is a ring and the link latencies
                 are linear. Our bound is tight. This is the first sharp
                 bound for the maximum latency objective.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Cary:2014:CPA,
  author =       "Matthew Cary and Aparna Das and Benjamin Edelman and
                 Ioannis Giotis and Kurtis Heimerl and Anna R. Karlin
                 and Scott Duke Kominers and Claire Mathieu and Michael
                 Schwarz",
  title =        "Convergence of Position Auctions under Myopic
                 Best-Response Dynamics",
  journal =      j-TEAC,
  volume =       "2",
  number =       "3",
  pages =        "9:1--9:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2632226",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Oct 17 12:45:12 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the dynamics of multiround position auctions,
                 considering both the case of exogenous click-through
                 rates and the case in which click-through rates are
                 determined by an endogenous consumer search process. In
                 both contexts, we demonstrate that dynamic position
                 auctions converge to their associated static, envy-free
                 equilibria. Furthermore, convergence is efficient, and
                 the entry of low-quality advertisers does not slow
                 convergence. Because our approach predominantly relies
                 on assumptions common in the sponsored search
                 literature, our results suggest that dynamic position
                 auctions converge more generally.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Azar:2014:QCS,
  author =       "Pablo Daniel Azar and Silvio Micali",
  title =        "The Query Complexity of Scoring Rules",
  journal =      j-TEAC,
  volume =       "2",
  number =       "3",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2632228",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Oct 17 12:45:12 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Proper scoring rules are crucial tools to elicit
                 truthful information from experts. A scoring rule maps
                 X, an expert-provided distribution over the set of all
                 possible states of the world, and $ \omega $, a
                 realized state of the world, to a real number
                 representing the expert's reward for his provided
                 information. To compute this reward, a scoring rule
                 queries the distribution X at various states. The
                 number of these queries is thus a natural measure of
                 the complexity of the scoring rule. We prove that any
                 bounded and strictly proper scoring rule that is
                 deterministic must make a number of queries to its
                 input distribution that is a quarter of the number of
                 states of the world. When the state space is very
                 large, this makes the computation of such scoring rules
                 impractical. We also show a new stochastic scoring rule
                 that is bounded, strictly proper, and which makes only
                 two queries to its input distribution. Thus, using
                 randomness allows us to have significant savings when
                 computing scoring rules.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Alaei:2014:RSA,
  author =       "Saeed Alaei and Azarakhsh Malekian and Aravind
                 Srinivasan",
  title =        "On Random Sampling Auctions for Digital Goods",
  journal =      j-TEAC,
  volume =       "2",
  number =       "3",
  pages =        "11:1--11:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2517148",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Oct 17 12:45:12 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In the context of auctions for digital goods, an
                 interesting random sampling auction has been proposed
                 by Goldberg et al. [2001]. This auction has been
                 analyzed by Feige et al. [2005], who have shown that it
                 obtains in expectation at least 1/15 fraction of the
                 optimal revenue, which is substantially better than the
                 previously proven constant bounds but still far from
                 the conjectured lower bound of 1/4. In this article, we
                 prove that the aforementioned random sampling auction
                 obtains at least 1/4 fraction of the optimal revenue
                 for a large class of instances where the number of bids
                 above (or equal to) the optimal sale price is at least
                 6. We also show that this auction obtains at least
                 1/4.68 fraction of the optimal revenue for the small
                 class of remaining instances, thus leaving a negligible
                 gap between the lower and upper bound. We employ a mix
                 of probabilistic techniques and dynamic programming to
                 compute these bounds.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Dandekar:2014:PAR,
  author =       "Pranav Dandekar and Nadia Fawaz and Stratis
                 Ioannidis",
  title =        "Privacy Auctions for Recommender Systems",
  journal =      j-TEAC,
  volume =       "2",
  number =       "3",
  pages =        "12:1--12:??",
  month =        jul,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2629665",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Oct 17 12:45:12 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study a market for private data in which a data
                 analyst publicly releases a statistic over a database
                 of private information. Individuals that own the data
                 incur a cost for their loss of privacy proportional to
                 the differential privacy guarantee given by the analyst
                 at the time of the release. The analyst incentivizes
                 individuals by compensating them, giving rise to a
                 privacy auction. Motivated by recommender systems, the
                 statistic we consider is a linear predictor function
                 with publicly known weights. The statistic can be
                 viewed as a prediction of the unknown data of a new
                 individual, based on the data of individuals in the
                 database. We formalize the trade-off between privacy
                 and accuracy in this setting, and show that a simple
                 class of estimates achieves an order-optimal trade-off.
                 It thus suffices to focus on auction mechanisms that
                 output such estimates. We use this observation to
                 design a truthful, individually rational,
                 proportional-purchase mechanism under a fixed budget
                 constraint. We show that our mechanism is 5-approximate
                 in terms of accuracy compared to the optimal mechanism,
                 and that no truthful mechanism can achieve a $ 2 -
                 \epsilon $ approximation, for any $\epsilon > 0$.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Balcan:2014:NOC,
  author =       "Maria-Florina Balcan and Sara Krehbiel and Georgios
                 Piliouras and Jinwoo Shin",
  title =        "Near-Optimality in Covering Games by Exposing Global
                 Information",
  journal =      j-TEAC,
  volume =       "2",
  number =       "4",
  pages =        "13:1--13:??",
  month =        oct,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2597890",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue Oct 28 16:50:26 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Mechanism design for distributed systems is
                 fundamentally concerned with aligning individual
                 incentives with social welfare to avoid socially
                 inefficient outcomes that can arise from agents acting
                 autonomously. One simple and natural approach is to
                 centrally broadcast nonbinding advice intended to guide
                 the system to a socially near-optimal state while still
                 harnessing the incentives of individual agents. The
                 analytical challenge is proving fast convergence to
                 near optimal states, and in this article we give the
                 first results that carefully constructed advice vectors
                 yield stronger guarantees. We apply this approach to a
                 broad family of potential games modeling vertex cover
                 and set cover optimization problems in a distributed
                 setting. This class of problems is interesting because
                 finding exact solutions to their optimization problems
                 is NP-hard yet highly inefficient equilibria exist, so
                 a solution in which agents simply locally optimize is
                 not satisfactory. We show that with an arbitrary advice
                 vector, a set cover game quickly converges to an
                 equilibrium with cost of the same order as the square
                 of the social cost of the advice vector. More
                 interestingly, we show how to efficiently construct an
                 advice vector with a particular structure with cost O
                 (log n ) times the optimal social cost, and we prove
                 that the system quickly converges to an equilibrium
                 with social cost of this same order.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Bhawalkar:2014:WCG,
  author =       "Kshipra Bhawalkar and Martin Gairing and Tim
                 Roughgarden",
  title =        "Weighted Congestion Games: The Price of Anarchy,
                 Universal Worst-Case Examples, and Tightness",
  journal =      j-TEAC,
  volume =       "2",
  number =       "4",
  pages =        "14:1--14:??",
  month =        oct,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2629666",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue Oct 28 16:50:26 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We characterize the Price of Anarchy (POA) in weighted
                 congestion games, as a function of the allowable
                 resource cost functions. Our results provide as
                 thorough an understanding of this quantity as is
                 already known for nonatomic and unweighted congestion
                 games, and take the form of universal (cost
                 function-independent) worst-case examples. One
                 noteworthy by-product of our proofs is the fact that
                 weighted congestion games are ``tight,'' which implies
                 that the worst-case price of anarchy with respect to
                 pure Nash equilibria, mixed Nash equilibria, correlated
                 equilibria, and coarse correlated equilibria are always
                 equal (under mild conditions on the allowable cost
                 functions). Another is the fact that, like nonatomic
                 but unlike atomic (unweighted) congestion games,
                 weighted congestion games with trivial structure
                 already realize the worst-case POA, at least for
                 polynomial cost functions. We also prove a new result
                 about unweighted congestion games: the worst-case price
                 of anarchy in symmetric games is as large as in their
                 more general asymmetric counterparts.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Fotakis:2014:PDM,
  author =       "Dimitris Fotakis and Christos Tzamos",
  title =        "On the Power of Deterministic Mechanisms for Facility
                 Location Games",
  journal =      j-TEAC,
  volume =       "2",
  number =       "4",
  pages =        "15:1--15:??",
  month =        oct,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2665005",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue Oct 28 16:50:26 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider $K$-Facility Location games, where n
                 strategic agents report their locations in a metric
                 space and a mechanism maps them to $K$ facilities. The
                 agents seek to minimize their connection cost, namely
                 the distance of their true location to the nearest
                 facility, and may misreport their location. We are
                 interested in deterministic mechanisms that are
                 strategyproof, that is, ensure that no agent can
                 benefit from misreporting her location, do not resort
                 to monetary transfers, and achieve a bounded
                 approximation ratio to the total connection cost of the
                 agents (or to the $ L_p $ norm of the connection costs,
                 for some $ p \in [1, \infty) $ or for $ p = \infty) $.
                 Our main result is an elegant characterization of
                 deterministic strategyproof mechanisms with a bounded
                 approximation ratio for $2$-Facility Location on the
                 line. In particular, we show that for instances with $
                 n \geq 5 $ agents, any such mechanism either admits a
                 unique dictator or always places the facilities at the
                 leftmost and the rightmost location of the instance. As
                 a corollary, we obtain that the best approximation
                 ratio achievable by deterministic strategyproof
                 mechanisms for the problem of locating $2$ facilities
                 on the line to minimize the total connection cost is
                 precisely $ n - 2$. Another rather surprising
                 consequence is that the Two-Extremes mechanism of
                 Procaccia and Tennenholtz [2009] is the only
                 deterministic anonymous strategyproof mechanism with a
                 bounded approximation ratio for $2$-Facility Location
                 on the line. The proof of the characterization employs
                 several new ideas and technical tools, which provide
                 new insights into the behavior of deterministic
                 strategyproof mechanisms for $K$-Facility Location
                 games and may be of independent interest. Employing one
                 of these tools, we show that for every $ K \geq 3$,
                 there do not exist any deterministic anonymous
                 strategyproof mechanisms with a bounded approximation
                 ratio for $K$-Facility Location on the line, even for
                 simple instances with $ K + 1$ agents. Moreover,
                 building on the characterization for the line, we show
                 that there do not exist any deterministic strategy
                 proof mechanisms with a bounded approximation ratio for
                 $2$-Facility Location and instances with $ n \geq 3$
                 agents located in a star.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Michalak:2014:ICV,
  author =       "Tomasz P. Michalak and Piotr L. Szczepa{\'n}ski and
                 Talal Rahwan and Agata Chrobak and Simina Br{\^a}nzei
                 and Michael Wooldridge and Nicholas R. Jennings",
  title =        "Implementation and Computation of a Value for
                 Generalized Characteristic Function Games",
  journal =      j-TEAC,
  volume =       "2",
  number =       "4",
  pages =        "16:1--16:??",
  month =        oct,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2665007",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue Oct 28 16:50:26 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Generalized characteristic function games are a
                 variation of characteristic function games, in which
                 the value of a coalition depends not only on the
                 identities of its members, but also on the order in
                 which the coalition is formed. This class of games is a
                 useful abstraction for a number of realistic settings
                 and economic situations, such as modeling relationships
                 in social networks. To date, two main extensions of the
                 Shapley value have been proposed for generalized
                 characteristic function games: the Nowak--Radzik [1994]
                 value and the S{\'a}nchez--Berganti{\~n}os [1997]
                 value. In this context, the present article studies
                 generalized characteristic function games from the
                 point of view of implementation and computation.
                 Specifically, the article makes two key contributions.
                 First, building upon the mechanism by Dasgupta and Chiu
                 [1998], we present a non-cooperative mechanism that
                 implements both the Nowak--Radzik value and the
                 S{\'a}nchez-Berganti{\~n}os value in Subgame-Perfect
                 Nash Equilibria in expectations. Second, in order to
                 facilitate an efficient computation supporting the
                 implementation mechanism, we propose the Generalized
                 Marginal-Contribution Nets representation for this type
                 of game. This representation extends the results of
                 Ieong and Shoham [2005] and Elkind et al. [2009] for
                 characteristic function games and retains their
                 attractive computational properties.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Chen:2014:AIP,
  author =       "Po-An Chen and Bart {De Keijzer} and David Kempe and
                 Guido Sch{\"a}fer",
  title =        "Altruism and Its Impact on the Price of Anarchy",
  journal =      j-TEAC,
  volume =       "2",
  number =       "4",
  pages =        "17:1--17:??",
  month =        oct,
  year =         "2014",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2597893",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue Oct 28 16:50:26 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the inefficiency of equilibria for congestion
                 games when players are (partially) altruistic. We model
                 altruistic behavior by assuming that player $i$'s
                 perceived cost is a convex combination of $\alpha_i$
                 times his direct cost and $\alpha_i$ times the social
                 cost. Tuning the parameters $\alpha_i$ allows smooth
                 interpolation between purely selfish and purely
                 altruistic behavior. Within this framework, we study
                 primarily altruistic extensions of (atomic and
                 nonatomic) congestion games, but also obtain some
                 results on fair cost-sharing games and valid utility
                 games. We derive (tight) bounds on the price of anarchy
                 of these games for several solution concepts. Thereto,
                 we suitably adapt the smoothness notion introduced by
                 Roughgarden and show that it captures the essential
                 properties to determine the robust price of anarchy of
                 these games. Our bounds show that for atomic congestion
                 games and cost-sharing games, the robust price of
                 anarchy gets worse with increasing altruism, while for
                 valid utility games, it remains constant and is not
                 affected by altruism. However, the increase in the
                 price of anarchy is not a universal phenomenon: For
                 general nonatomic congestion games with uniform
                 altruism, the price of anarchy improves with increasing
                 altruism. For atomic and nonatomic symmetric singleton
                 congestion games, we derive bounds on the pure price of
                 anarchy that improve as the average level of altruism
                 increases. (For atomic games, we only derive such
                 bounds when cost functions are linear.) Since the
                 bounds are also strictly lower than the robust price of
                 anarchy, these games exhibit natural examples in which
                 pure Nash equilibria are more efficient than more
                 permissive notions of equilibrium.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Leyton-Brown:2015:ISI,
  author =       "Kevin Leyton-Brown and Panos Ipeirotis",
  title =        "Introduction to the Special Issue on {EC'12}",
  journal =      j-TEAC,
  volume =       "3",
  number =       "1",
  pages =        "1:1--1:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2742678",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 27 17:58:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Caragiannis:2015:APN,
  author =       "Ioannis Caragiannis and Angelo Fanelli and Nick Gravin
                 and Alexander Skopalik",
  title =        "Approximate Pure {Nash} Equilibria in Weighted
                 Congestion Games: Existence, Efficient Computation, and
                 Structure",
  journal =      j-TEAC,
  volume =       "3",
  number =       "1",
  pages =        "2:1--2:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2614687",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 27 17:58:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider structural and algorithmic questions
                 related to the Nash dynamics of weighted congestion
                 games. In weighted congestion games with linear latency
                 functions, the existence of pure Nash equilibria is
                 guaranteed by a potential function argument.
                 Unfortunately, this proof of existence is inefficient
                 and computing pure Nash equilibria in such games is a
                 PLS-hard problem even when all players have unit
                 weights. The situation gets worse when superlinear
                 (e.g., quadratic) latency functions come into play; in
                 this case, the Nash dynamics of the game may contain
                 cycles and pure Nash equilibria may not even exist.
                 Given these obstacles, we consider approximate pure
                 Nash equilibria as alternative solution concepts. A $
                 \rho $-approximate pure Nash equilibrium is a state of
                 a (weighted congestion) game from which no player has
                 any incentive to deviate in order to improve her cost
                 by a multiplicative factor higher than $ \rho $. Do
                 such equilibria exist for small values of $ \rho $ ?
                 And if so, can we compute them efficiently? We provide
                 positive answers to both questions for weighted
                 congestion games with polynomial latency functions by
                 exploiting an ``approximation'' of such games by a new
                 class of potential games that we call $ \Psi $-games.
                 This allows us to show that these games have $
                 d!$-approximate pure Nash equilibria, where $d$ is the
                 maximum degree of the latency functions. Our main
                 technical contribution is an efficient algorithm for
                 computing O(1)-approximate pure Nash equilibria when
                 $d$ is a constant. For games with linear latency
                 functions, the approximation guarantee is $ 3 + \sqrt 5
                 / 2 + O(\gamma)$ for arbitrarily small $ \gamma > 0$;
                 for latency functions with maximum degree $ d \geq 2$,
                 it is $ d 2 d + o (d)$. The running time is polynomial
                 in the number of bits in the representation of the game
                 and $ 1 / \gamma $. As a byproduct of our techniques,
                 we also show the following interesting structural
                 statement for weighted congestion games with polynomial
                 latency functions of maximum degree $ d \geq 2 $:
                 polynomially-long sequences of best-response moves from
                 any initial state to a $ d O (d 2)$-approximate pure
                 Nash equilibrium exist and can be efficiently
                 identified in such games as long as $d$ is a constant.
                 To the best of our knowledge, these are the first
                 positive algorithmic results for approximate pure Nash
                 equilibria in weighted congestion games. Our techniques
                 significantly extend our recent work on unweighted
                 congestion games through the use of $ \Psi $-games. The
                 concept of approximating nonpotential games by
                 potential ones is interesting in itself and might have
                 further applications.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Parkes:2015:BDR,
  author =       "David C. Parkes and Ariel D. Procaccia and Nisarg
                 Shah",
  title =        "Beyond Dominant Resource Fairness: Extensions,
                 Limitations, and Indivisibilities",
  journal =      j-TEAC,
  volume =       "3",
  number =       "1",
  pages =        "3:1--3:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2739040",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 27 17:58:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the problem of allocating multiple resources
                 to agents with heterogeneous demands. Technological
                 advances such as cloud computing and data centers
                 provide a new impetus for investigating this problem
                 under the assumption that agents demand the resources
                 in fixed proportions, known in economics as Leontief
                 preferences. In a recent paper, Ghodsi et al. [2011]
                 introduced the dominant resource fairness (DRF)
                 mechanism, which was shown to possess highly desirable
                 theoretical properties under Leontief preferences. We
                 extend their results in three directions. First, we
                 show that DRF generalizes to more expressive settings,
                 and leverage a new technical framework to formally
                 extend its guarantees. Second, we study the relation
                 between social welfare and properties such as
                 truthfulness; DRF performs poorly in terms of social
                 welfare, but we show that this is an unavoidable
                 shortcoming that is shared by every mechanism that
                 satisfies one of three basic properties. Third, and
                 most importantly, we study a realistic setting that
                 involves indivisibilities. We chart the boundaries of
                 the possible in this setting, contributing a new
                 relaxed notion of fairness and providing both
                 possibility and impossibility results.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Babaioff:2015:DPL,
  author =       "Moshe Babaioff and Shaddin Dughmi and Robert Kleinberg
                 and Aleksandrs Slivkins",
  title =        "Dynamic Pricing with Limited Supply",
  journal =      j-TEAC,
  volume =       "3",
  number =       "1",
  pages =        "4:1--4:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2559152",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 27 17:58:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the problem of designing
                 revenue-maximizing online posted-price mechanisms when
                 the seller has limited supply. A seller has k identical
                 items for sale and is facing n potential buyers
                 (``agents'') that are arriving sequentially. Each agent
                 is interested in buying one item. Each agent's value
                 for an item is an independent sample from some fixed
                 (but unknown) distribution with support [0,1]. The
                 seller offers a take-it-or-leave-it price to each
                 arriving agent (possibly different for different
                 agents), and aims to maximize his expected revenue. We
                 focus on mechanisms that do not use any information
                 about the distribution; such mechanisms are called
                 detail-free (or prior-independent). They are desirable
                 because knowing the distribution is unrealistic in many
                 practical scenarios. We study how the revenue of such
                 mechanisms compares to the revenue of the optimal
                 offline mechanism that knows the distribution
                 (``offline benchmark''). We present a detail-free
                 online posted-price mechanism whose revenue is at most
                 $ O((k \log n)2 / 3) $ less than the offline benchmark,
                 for every distribution that is regular. In fact, this
                 guarantee holds without any assumptions if the
                 benchmark is relaxed to fixed-price mechanisms.
                 Further, we prove a matching lower bound. The
                 performance guarantee for the same mechanism can be
                 improved to $ O (\sqrt k \log n) $, with a
                 distribution-dependent constant, if the ratio $ k / n $
                 is sufficiently small. We show that, in the worst case
                 over all demand distributions, this is essentially the
                 best rate that can be obtained with a
                 distribution-specific constant. On a technical level,
                 we exploit the connection to multiarmed bandits (MAB).
                 While dynamic pricing with unlimited supply can easily
                 be seen as an MAB problem, the intuition behind MAB
                 approaches breaks when applied to the setting with
                 limited supply. Our high-level conceptual contribution
                 is that even the limited supply setting can be
                 fruitfully treated as a bandit problem.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Dutting:2015:PRT,
  author =       "Paul D{\"u}tting and Felix Fischer and Pichayut
                 Jirapinyo and John K. Lai and Benjamin Lubin and David
                 C. Parkes",
  title =        "Payment Rules through Discriminant-Based Classifiers",
  journal =      j-TEAC,
  volume =       "3",
  number =       "1",
  pages =        "5:1--5:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2559049",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 27 17:58:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In mechanism design it is typical to impose incentive
                 compatibility and then derive an optimal mechanism
                 subject to this constraint. By replacing the incentive
                 compatibility requirement with the goal of minimizing
                 expected ex post regret, we are able to adapt
                 statistical machine learning techniques to the design
                 of payment rules. This computational approach to
                 mechanism design is applicable to domains with
                 multi-dimensional types and situations where
                 computational efficiency is a concern. Specifically,
                 given an outcome rule and access to a type
                 distribution, we train a support vector machine with a
                 specific structure imposed on the discriminant
                 function, such that it implicitly learns a
                 corresponding payment rule with desirable incentive
                 properties. We extend the framework to adopt succinct
                 $k$-wise dependent valuations, leveraging a connection
                 with maximum a posteriori assignment on Markov networks
                 to enable training to scale up to settings with a large
                 number of items; we evaluate this construction in the
                 case where $ k = 2 $. We present applications to
                 multiparameter combinatorial auctions with approximate
                 winner determination, and the assignment problem with
                 an egalitarian outcome rule. Experimental results
                 demonstrate that the construction produces payment
                 rules with low ex post regret, and that penalizing
                 classification error is effective in preventing
                 failures of ex post individual rationality.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Roughgarden:2015:PAG,
  author =       "Tim Roughgarden",
  title =        "The Price of Anarchy in Games of Incomplete
                 Information",
  journal =      j-TEAC,
  volume =       "3",
  number =       "1",
  pages =        "6:1--6:??",
  month =        mar,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2737816",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Mar 27 17:58:56 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We define smooth games of incomplete information. We
                 prove an ``extension theorem'' for such games: price of
                 anarchy bounds for pure Nash equilibria for all induced
                 full-information games extend automatically, without
                 quantitative degradation, to all mixed-strategy
                 Bayes--Nash equilibria with respect to a product prior
                 distribution over players' preferences. We also note
                 that, for Bayes--Nash equilibria in games with
                 correlated player preferences, there is no general
                 extension theorem for smooth games. We give several
                 applications of our definition and extension theorem.
                 First, we show that many games of incomplete
                 information for which the price of anarchy has been
                 studied are smooth in our sense. Our extension theorem
                 unifies much of the known work on the price of anarchy
                 in games of incomplete information. Second, we use our
                 extension theorem to prove new bounds on the price of
                 anarchy of Bayes--Nash equilibria in routing games with
                 incomplete information.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Goldstein:2015:IET,
  author =       "Daniel G. Goldstein and R. Preston McAfee and
                 Siddharth Suri",
  title =        "Improving the Effectiveness of Time-Based Display
                 Advertising",
  journal =      j-TEAC,
  volume =       "3",
  number =       "2",
  pages =        "7:1--7:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2716323",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue Apr 21 11:23:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Display advertisements are typically sold by the
                 impression, where one impression is simply one download
                 of an ad. Previous work has shown that the longer an ad
                 is in view, the more likely a user is to remember it
                 and that there are diminishing returns to increased
                 exposure time [Goldstein et al. 2011]. Since a pricing
                 scheme that is at least partially based on time is more
                 exact than one based solely on impressions, time-based
                 advertising may become an industry standard. We answer
                 an open question concerning time-based pricing schemes:
                 how should time slots for advertisements be divided? We
                 provide evidence that ads can be scheduled in a way
                 that leads to greater total recollection, which
                 advertisers value, and increased revenue, which
                 publishers value. We document two main findings. First,
                 we show that displaying two shorter ads results in more
                 total recollection than displaying one longer ad of
                 twice the duration. Second, we show that this effect
                 disappears as the duration of these ads increases. We
                 conclude with a theoretical prediction regarding the
                 circumstances under which the display advertising
                 industry would benefit if it moved to a partially or
                 fully time-based standard.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Ganzfried:2015:SOE,
  author =       "Sam Ganzfried and Tuomas Sandholm",
  title =        "Safe Opponent Exploitation",
  journal =      j-TEAC,
  volume =       "3",
  number =       "2",
  pages =        "8:1--8:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2716322",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue Apr 21 11:23:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the problem of playing a repeated
                 two-player zero-sum game safety: that is, guaranteeing
                 at least the value of the game per period in
                 expectation regardless of the strategy used by the
                 opponent. Playing a stage-game equilibrium strategy at
                 each time step clearly guarantees safety, and prior
                 work has (incorrectly) stated that it is impossible to
                 simultaneously deviate from a stage-game equilibrium
                 (in hope of exploiting a suboptimal opponent) and to
                 guarantee safety. We show that such profitable
                 deviations are indeed possible specifically in games
                 where certain types of ``gift'' strategies exist, which
                 we define formally. We show that the set of strategies
                 constituting such gifts can be strictly larger than the
                 set of iteratively weakly-dominated strategies; this
                 disproves another recent assertion which states that
                 all noniteratively weakly dominated strategies are best
                 responses to each equilibrium strategy of the other
                 player. We present a full characterization of safe
                 strategies, and develop efficient algorithms for
                 exploiting suboptimal opponents while guaranteeing
                 safety. We also provide analogous results for
                 extensive-form games of perfect and imperfect
                 information, and present safe exploitation algorithms
                 and full characterizations of safe strategies for those
                 settings as well. We present experimental results in
                 Kuhn poker, a canonical test problem for game-theoretic
                 algorithms. Our experiments show that (1) aggressive
                 safe exploitation strategies significantly outperform
                 adjusting the exploitation within stage-game
                 equilibrium strategies only and (2) all the safe
                 exploitation strategies significantly outperform a
                 (nonsafe) best response strategy against strong dynamic
                 opponents.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Hoefer:2015:SSA,
  author =       "Martin Hoefer and Thomas Kesselheim",
  title =        "Secondary Spectrum Auctions for Symmetric and
                 Submodular Bidders",
  journal =      j-TEAC,
  volume =       "3",
  number =       "2",
  pages =        "9:1--9:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2739041",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue Apr 21 11:23:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study truthful auctions for secondary spectrum
                 usage in wireless networks. In this scenario, $n$
                 communication requests need to be allocated to $k$
                 available channels that are subject to interference and
                 noise. We present the first truthful mechanisms for
                 secondary spectrum auctions with symmetric or
                 submodular valuations. Our approach to model
                 interference uses an edge-weighted conflict graph, and
                 our algorithms provide asymptotically almost optimal
                 approximation bounds for conflict graphs with a small
                 inductive independence number $ \rho \ll n $. This
                 approach covers a large variety of interference models
                 such as, for instance, the protocol model or the
                 recently popular physical model of interference. For
                 unweighted conflict graphs and symmetric valuations we
                 use LP-rounding to obtain $ O(\rho)$-approximate
                 mechanisms; for weighted conflict graphs we get a
                 factor of $ O(\rho \cdot (\log n + \log k))$. For
                 submodular users we combine the convex rounding
                 framework of Dughmi et al. [2011] with randomized
                 metarounding to obtain $ O(\rho)$-approximate
                 mechanisms for matroid-rank-sum valuations; for
                 weighted conflict graphs we can fully drop the
                 dependence on $k$ to get $ O(\rho \cdot \log n)$. We
                 conclude with promising initial results for
                 deterministically truthful mechanisms that allow
                 approximation factors based on $ \rho $.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Wilkens:2015:SCM,
  author =       "Christopher A. Wilkens and Balasubramanian Sivan",
  title =        "Single-Call Mechanisms",
  journal =      j-TEAC,
  volume =       "3",
  number =       "2",
  pages =        "10:1--10:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2741027",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue Apr 21 11:23:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Truthfulness is fragile and demanding. It is
                 oftentimes harder to guarantee truthfulness when
                 solving a problem than it is to solve the problem
                 itself. Even worse, truthfulness can be utterly
                 destroyed by small uncertainties in a mechanism's
                 outcome. One obstacle is that truthful payments depend
                 on outcomes other than the one realized, such as the
                 lengths of non-shortest-paths in a shortest-path
                 auction. Single-call mechanisms are a powerful tool
                 that circumvents this obstacle: they implicitly charge
                 truthful payments, guaranteeing truthfulness in
                 expectation using only the outcome realized by the
                 mechanism. The cost of such truthfulness is a trade-off
                 between the expected quality of the outcome and the
                 risk of large payments. We study two of the most
                 general domains for truthful mechanisms and largely
                 settle when and to what extent single-call mechanisms
                 are possible. The first single-call construction was
                 discovered by Babaioff et al. [2010] in
                 single-parameter domains. They give a transformation
                 that turns any monotone, single-parameter allocation
                 rule into a truthful-in-expectation single-call
                 mechanism. Our first result is a natural complement to
                 Babaioff et al. [2010]: we give a new transformation
                 that produces a single-call VCG mechanism from any
                 allocation rule for which VCG payments are truthful.
                 Second, in both the single-parameter and VCG settings,
                 we precisely characterize the possible transformations,
                 showing that a wide variety of transformations are
                 possible but that all take a very simple form. Finally,
                 we study the inherent trade-off between the expected
                 quality of the outcome and the risk of large payments.
                 We show that our construction and that of Babaioff et
                 al. [2010] simultaneously optimize a variety of metrics
                 in their respective domains. Our study is motivated by
                 settings where uncertainty in a mechanism renders other
                 known techniques untruthful, and we offer a variety of
                 examples where such uncertainty can arise. In
                 particular, we analyze pay-per-click advertising
                 auctions, where the truthfulness of the standard
                 VCG-based auction is easily broken when the
                 auctioneer's estimated click-through-rates are
                 imprecise.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Chakrabarti:2015:TSO,
  author =       "Deepayan Chakrabarti and Erik Vee",
  title =        "Traffic Shaping to Optimize Ad Delivery",
  journal =      j-TEAC,
  volume =       "3",
  number =       "2",
  pages =        "11:1--11:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2739010",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue Apr 21 11:23:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Web publishers must balance two objectives: how to
                 keep users engaged by directing them to relevant
                 content, and how to properly monetize this user
                 traffic. The standard approach is to solve each problem
                 in isolation, for example, by displaying content that
                 is tailored to the user's interests so as to maximize
                 clickthrough rates (CTR), and also by building a
                 standalone ad serving system that displays ads
                 depending on the user's characteristics, the article
                 being viewed by the user, and advertiser-specified
                 constraints. However, showing the user only those
                 articles with highest expected CTR precludes the
                 display of some ads; if the publisher had previously
                 guaranteed the delivery of a certain volume of
                 impressions to such ads, then underdelivery penalties
                 might have to be paid. We propose a joint optimization
                 of article selection and ad serving that minimizes
                 underdelivery by shaping some of the incoming traffic
                 to pages where underperforming ads can be displayed,
                 while incurring only minor drops in CTR. In addition to
                 formulating the problem, we design an online
                 optimization algorithm that can find the optimal
                 traffic shaping probabilities for each new user using
                 only a cache of one number per ad contract. Experiments
                 on a large real-world ad-serving Web portal demonstrate
                 significant advantages over the standalone approach: a
                 threefold reduction in underdelivery with only 10\%
                 drop in CTR, or a 2.6-fold reduction with a 4\% CTR
                 drop, and similar results over a wide range.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Ghosh:2015:MME,
  author =       "Arpita Ghosh and Mohammad Mahdian and R. Preston
                 McAfee and Sergei Vassilvitskii",
  title =        "To Match or Not to Match: Economics of Cookie Matching
                 in Online Advertising",
  journal =      j-TEAC,
  volume =       "3",
  number =       "2",
  pages =        "12:1--12:??",
  month =        apr,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2745801",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue Apr 21 11:23:36 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Modern online advertising increasingly relies on the
                 ability to follow the same user across the Internet
                 using technology called cookie matching to increase
                 efficiency in ad allocation. Web publishers today use
                 this technology to share information about the websites
                 a user has visited, making it possible to target
                 advertisements to users based on their prior history.
                 This begs the question: do publishers (who are
                 competitors for advertising money) always have the
                 incentive to share online information? Intuitive
                 arguments as well as anecdotal evidence suggest that
                 sometimes a premium publisher might suffer information
                 sharing through an effect called information leakage:
                 by sharing user information with the advertiser, the
                 advertiser will be able to target the same user
                 elsewhere on cheaper publishers, leading to a dilution
                 of the value of the supply on the premium publishers.
                 The goal of this article is to explore this aspect of
                 online information sharing. We show that, when
                 advertisers are homogeneous in the sense that their
                 relative valuations of users are consistent, publishers
                 always agree about the benefits of cookie matching in
                 equilibrium: either all publishers' revenues benefit,
                 or all suffer, from cookie matching. We also show using
                 a simple model that, when advertisers are not
                 homogeneous, the information leakage indeed can occur,
                 with cookie matching helping one publisher's revenues
                 while harming the other.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Kash:2015:EAS,
  author =       "Ian A. Kash and Eric J. Friedman and Joseph Y.
                 Halpern",
  title =        "An Equilibrium Analysis of Scrip Systems",
  journal =      j-TEAC,
  volume =       "3",
  number =       "3",
  pages =        "13:1--13:??",
  month =        jun,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2659006",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Jul 31 19:26:16 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "A game-theoretic model of scrip (artificial currency)
                 systems is analyzed. It is shown that relative entropy
                 can be used to characterize the distribution of agent
                 wealth when all agents use threshold strategies -that
                 is, they volunteer to do work if and only if they have
                 below a threshold amount of money. Monotonicity of
                 agents' best-reply functions is used to show that scrip
                 systems have pure strategy equilibria where all agents
                 use threshold strategies. An algorithm is given that
                 can compute such an equilibrium and the resulting
                 distribution of wealth.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Immorlica:2015:ILR,
  author =       "Nicole Immorlica and Mohammad Mahdian",
  title =        "Incentives in Large Random Two-Sided Markets",
  journal =      j-TEAC,
  volume =       "3",
  number =       "3",
  pages =        "14:1--14:??",
  month =        jun,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2656202",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Jul 31 19:26:16 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Many centralized two-sided markets form a matching
                 between participants by running a stable matching
                 algorithm. It is a well-known fact that no matching
                 mechanism based on a stable matching algorithm can
                 guarantee truthfulness as a dominant strategy for
                 participants. However, we show that in a probabilistic
                 setting where the preference lists on one side of the
                 market are composed of only a constant (independent of
                 the size of the market) number of entries, each drawn
                 from an arbitrary distribution, the number of
                 participants that have more than one stable partner is
                 vanishingly small. This proves (and generalizes) a
                 conjecture of Roth and Peranson [1999]. As a corollary
                 of this result, we show that, with high probability,
                 the truthful strategy is the best response for a random
                 player when the other players are truthful. We also
                 analyze equilibria of the deferred acceptance stable
                 matching game. We show that the game with complete
                 information has an equilibrium in which, in
                 expectation, a $ (1 - o(1)) $ fraction of the
                 strategies are truthful. In the more realistic setting
                 of a game of incomplete information, we will show that
                 the set of truthful strategies form a $ (1 +
                 o(1))$-approximate Bayesian--Nash equilibrium for
                 uniformly random preferences. Our results have
                 implications in many practical settings and are
                 inspired by the work of Roth and Peranson [1999] on the
                 National Residency Matching Program.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Cavallo:2015:DAA,
  author =       "Ruggiero Cavallo and R. Preston Mcafee and Sergei
                 Vassilvitskii",
  title =        "Display Advertising Auctions with Arbitrage",
  journal =      j-TEAC,
  volume =       "3",
  number =       "3",
  pages =        "15:1--15:??",
  month =        jun,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2668033",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Jul 31 19:26:16 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Online display advertising exchanges connect Web
                 publishers with advertisers seeking to place ads. In
                 many cases, the advertiser obtains value from an ad
                 impression (a viewing by a user) only if it is clicked,
                 and frequently advertisers prefer to pay contingent on
                 this occurring. But at the same time, many publishers
                 demand payment independent of clicks. Arbitragers with
                 good estimates of click-probabilities can resolve this
                 conflict by absorbing the risk and acting as an
                 intermediary, paying the publisher on allocation and
                 being paid only if a click occurs. This article
                 examines the incentives of advertisers and arbitragers
                 and contributes an efficient mechanism with truthful
                 bidding by the advertisers and truthful reporting of
                 click predictions by arbitragers as dominant strategies
                 while, given that a hazard rate condition is satisfied,
                 yielding increased revenue to the publisher. We provide
                 empirical evidence based on bid data from Yahoo's Right
                 Media Exchange suggesting that the mechanism would
                 increase revenue in practice.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Bilo:2015:BDN,
  author =       "Davide Bil{\`o} and Luciano Gual{\`a} and Guido
                 Proietti",
  title =        "Bounded-Distance Network Creation Games",
  journal =      j-TEAC,
  volume =       "3",
  number =       "3",
  pages =        "16:1--16:??",
  month =        jun,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2770639",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Jul 31 19:26:16 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "A network creation game simulates a decentralized and
                 noncooperative construction of a communication network.
                 Informally, there are $n$ players sitting on the
                 network nodes, which attempt to establish a reciprocal
                 communication by activating, thereby incurring a
                 certain cost, any of their incident links. The goal of
                 each player is to have all the other nodes as close as
                 possible in the resulting network, while buying as few
                 links as possible. According to this intuition, any
                 model of the game must then appropriately address a
                 balance between these two conflicting objectives.
                 Motivated by the fact that a player might have a strong
                 requirement about her centrality in the network, we
                 introduce a new setting in which a player who maintains
                 her (maximum or average) distance to the other nodes
                 within a given bound incurs a cost equal to the number
                 of activated edges; otherwise her cost is unbounded. We
                 study the problem of understanding the structure of
                 pure Nash equilibria of the resulting games, which we
                 call MaxBD and SumBD, respectively. For both games, we
                 show that when distance bounds associated with players
                 are nonuniform, then equilibria can be arbitrarily bad.
                 On the other hand, for MaxBD, we show that when nodes
                 have a uniform bound $ D \geq 3$ on the maximum
                 distance, then the price of anarchy (PoA) is lower and
                 upper bounded by 2 and $ O(n^{1 / \lfloor \log 3 D
                 \rfloor + 1})$, respectively (i.e., PoA is constant as
                 soon as $D$ is $ \Omega (n^\epsilon)$, for any $
                 \epsilon > 0$), while for the interesting case $ D =
                 2$, we are able to prove that the PoA is $ \Omega
                 (\sqrt {n})$ and $ O(\sqrt {n \log n})$. For the
                 uniform SumBD, we obtain similar (asymptotically)
                 results and moreover show that PoA becomes constant as
                 soon as the bound on the average distance is $ 2^{
                 \omega (\sqrt {\log n})}$.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Chen:2015:ISI,
  author =       "Yiling Chen and Nicole Immorlica",
  title =        "Introduction to the Special Issue on {WINE'13}",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "17:1--17:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2796538",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Bateni:2015:RMN,
  author =       "Mohammadhossein Bateni and Nima Haghpanah and
                 Balasubramanian Sivan and Morteza Zadimoghaddam",
  title =        "Revenue Maximization with Nonexcludable Goods",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "18:1--18:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2790131",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the design of revenue-maximizing mechanisms
                 for selling nonexcludable public goods. In particular,
                 we study revenue-maximizing mechanisms in Bayesian
                 settings for facility location problems on graphs where
                 no agent can be excluded from using a facility that has
                 been constructed. We show that the pointwise
                 optimization problem involved in implementing the
                 revenue optimal mechanism, namely, optimizing over
                 arbitrary profiles of virtual values, is hard to
                 approximate within a factor of $ \Omega (n^{2 -
                 \epsilon }) $ (assuming P $ \neq $ NP) even in star
                 graphs. Furthermore, we show that optimizing the
                 expected revenue is APX-hard. However, in a relevant
                 special case, rooted version with identical
                 distributions, we construct polynomial time truthful
                 mechanisms that approximate the optimal expected
                 revenue within a constant factor. We also study the
                 effect of partially mitigating nonexcludability by
                 collecting tolls for using the facilities. We show that
                 such ``posted-price'' mechanisms obtain significantly
                 higher revenue and often approach the optimal revenue
                 obtainable with full excludability.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Minooei:2015:NOR,
  author =       "Hadi Minooei and Chaitanya Swamy",
  title =        "Near-Optimal and Robust Mechanism Design for Covering
                 Problems with Correlated Players",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "19:1--19:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2790133",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the problem of designing
                 incentive-compatible, ex-post individually rational
                 (IR) mechanisms for covering problems in the Bayesian
                 setting, where players' types are drawn from an
                 underlying distribution and may be correlated, and the
                 goal is to minimize the expected total payment made by
                 the mechanism. We formulate a notion of incentive
                 compatibility (IC) that we call support-based IC that
                 is substantially more robust than Bayesian IC, and
                 develop black-box reductions from support-based-IC
                 mechanism design to algorithm design. For
                 single-dimensional settings, this black-box reduction
                 applies even when we only have an LP-relative
                 approximation algorithm for the algorithmic problem.
                 Thus, we obtain near-optimal mechanisms for various
                 covering settings, including single-dimensional
                 covering problems, multi-item procurement auctions, and
                 multidimensional facility location.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Fotakis:2015:TFD,
  author =       "Dimitris Fotakis and Emmanouil Zampetakis",
  title =        "Truthfulness Flooded Domains and the Power of
                 Verification for Mechanism Design",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "20:1--20:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2790086",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In this work, we investigate the reasons that make
                 symmetric partial verification essentially useless in
                 virtually all domains. Departing from previous work, we
                 consider any possible (finite or infinite) domain and
                 general symmetric verification. We identify a natural
                 property, namely, that the correspondence graph of a
                 symmetric verification $M$ is strongly connected by
                 finite paths along which the preferences are consistent
                 with the preferences at the endpoints, and prove that
                 this property is sufficient for the equivalence of
                 truthfulness and $M$-truthfulness. In fact, defining
                 appropriate versions of this property, we obtain this
                 result for deterministic and randomized mechanisms with
                 and without money. Moreover, we show that a slightly
                 relaxed version of this property is also necessary for
                 the equivalence of truthfulness and $M$-truthfulness.
                 Our conditions provide a generic and convenient way of
                 checking whether truthful implementation can take
                 advantage of any symmetric verification scheme in any
                 (finite or infinite) domain. Since the simplest
                 symmetric verification is the local verification,
                 specific cases of our result are closely related, in
                 the case without money, to the research about the
                 equivalence of local truthfulness and global
                 truthfulness. To complete the picture, we consider
                 asymmetric verification and prove that a social choice
                 function is $M$-truthfully implementable by some
                 asymmetric verification $M$ if and only if $f$ does not
                 admit a cycle of profitable deviations.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Kollias:2015:RPE,
  author =       "Konstantinos Kollias and Tim Roughgarden",
  title =        "Restoring Pure Equilibria to Weighted Congestion
                 Games",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "21:1--21:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2781678",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Congestion games model several interesting
                 applications, including routing and network formation
                 games, and also possess attractive theoretical
                 properties, including the existence of and convergence
                 of natural dynamics to a pure Nash equilibrium.
                 Weighted variants of congestion games that rely on
                 sharing costs proportional to players' weights do not
                 generally have pure-strategy Nash equilibria. We
                 propose a new way of assigning costs to players with
                 weights in congestion games that recovers the important
                 properties of the unweighted model. This method is
                 derived from the Shapley value, and it always induces a
                 game with a (weighted) potential function. For the
                 special cases of weighted network cost-sharing and
                 weighted routing games with Shapley value-based cost
                 shares, we prove tight bounds on the worst-case
                 inefficiency of equilibria. For weighted network
                 cost-sharing games, we precisely calculate the price of
                 stability for any given player weight vector, while for
                 weighted routing games, we precisely calculate the
                 price of anarchy, as a parameter of the set of
                 allowable cost functions.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Babichenko:2015:QCC,
  author =       "Yakov Babichenko and Siddharth Barman",
  title =        "Query Complexity of Correlated Equilibrium",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "22:1--22:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2785668",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study lower bounds on the query complexity of
                 determining correlated equilibrium. In particular, we
                 consider a query model in which an $n$-player game is
                 specified via a black box that returns players'
                 utilities at pure action profiles. In this model, we
                 establish that in order to compute a correlated
                 equilibrium, any deterministic algorithm must query the
                 black box an exponential (in $n$) number of times.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Aumann:2015:EFD,
  author =       "Yonatan Aumann and Yair Dombb",
  title =        "The Efficiency of Fair Division with Connected
                 Pieces",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "23:1--23:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2781776",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "The cake-cutting setting, in which a single
                 heterogeneous good must be divided between multiple
                 parties with different tastes, is a classic model for
                 studying questions regarding fairness in resource
                 allocation. In this work, we turn our attention to
                 (economic) efficiency considerations in cake cutting,
                 examining the possible trade-offs between meeting the
                 fairness criteria, on the one hand, and maximizing
                 social welfare, on the other. We focus on divisions
                 that give each agent a single (contiguous) piece of the
                 cake and provide tight bounds (or, in some cases,
                 nearly tight) on the possible degradation in
                 utilitarian and egalitarian welfare resulting from
                 meeting the fairness requirements.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Dimitrov:2015:SPM,
  author =       "Stanko Dimitrov and Rahul Sami and Marina A. Epelman",
  title =        "Subsidized Prediction Mechanisms for Risk-Averse
                 Agents",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "24:1--24:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2716327",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In this article, we study the design and
                 characterization of sequential prediction mechanisms in
                 the presence of agents with unknown risk aversion. We
                 formulate a collection of desirable properties for any
                 sequential forecasting mechanism. We present a
                 randomized mechanism that satisfies all of these
                 properties, including a guarantee that it is myopically
                 optimal for each agent to report honestly, regardless
                 of her degree of risk aversion. We observe, however,
                 that the mechanism has an undesirable side effect: each
                 agent's expected reward, normalized against the
                 inherent value of her private information, decreases
                 exponentially with the number of agents. We prove a
                 negative result showing that this is unavoidable: any
                 mechanism that is myopically strategyproof for agents
                 of all risk types, while also satisfying other natural
                 properties of sequential forecasting mechanisms, must
                 sometimes result in a player getting an exponentially
                 small expected normalized reward.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Xiao:2015:SOD,
  author =       "Yuanzhang Xiao and Mihaela {Van Der Schaar}",
  title =        "Socially-Optimal Design of Service Exchange Platforms
                 with Imperfect Monitoring",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "25:1--25:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2785627",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the design of service exchange platforms in
                 which long-lived anonymous users exchange services with
                 each other. The users are randomly and repeatedly
                 matched into pairs of clients and servers, and each
                 server can choose to provide high-quality or
                 low-quality services to the client with whom it is
                 matched. Since the users are anonymous and incur high
                 costs (e.g., exert high effort) in providing
                 high-quality services, it is crucial that the platform
                 incentivizes users to provide high-quality services.
                 Rating mechanisms have been shown to work effectively
                 as incentive schemes in such platforms. A rating
                 mechanism labels each user by a rating, which
                 summarizes the user's past behaviors, recommends a
                 desirable behavior to each server (e.g., provide
                 higher-quality services for clients with higher
                 ratings), and updates each server's rating based on the
                 recommendation and its client's report on the service
                 quality. Based on this recommendation, a low-rating
                 user is less likely to obtain high-quality services,
                 thereby providing users with incentives to obtain high
                 ratings by providing high-quality services. However, if
                 monitoring or reporting is imperfect-clients do not
                 perfectly assess the quality or the reports are lost-a
                 user's rating may not be updated correctly. In the
                 presence of such errors, existing rating mechanisms
                 cannot achieve the social optimum. In this article, we
                 propose the first rating mechanism that does achieve
                 the social optimum, even in the presence of monitoring
                 or reporting errors. On one hand, the socially-optimal
                 rating mechanism needs to be complicated enough,
                 because the optimal recommended behavior depends not
                 only on the current rating distribution, but also
                 (necessarily) on the history of past rating
                 distributions in the platform. On the other hand, we
                 prove that the social optimum can be achieved by
                 ``simple'' rating mechanisms that use binary rating
                 labels and a small set of (three) recommended
                 behaviors. We provide design guidelines of
                 socially-optimal rating mechanisms and a low-complexity
                 online algorithm for the rating mechanism to determine
                 the optimal recommended behavior.",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Nath:2015:AMD,
  author =       "Swaprava Nath and Arunava Sen",
  title =        "Affine Maximizers in Domains with Selfish Valuations",
  journal =      j-TEAC,
  volume =       "3",
  number =       "4",
  pages =        "26:1--26:??",
  month =        jul,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2786014",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Jul 31 19:26:18 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the domain of selfish and continuous
                 preferences over a ``rich'' allocation space and show
                 that onto, strategyproof and allocation non-bossy
                 social choice functions are affine maximizers. Roberts
                 [1979] proves this result for a finite set of
                 alternatives and an unrestricted valuation space. In
                 this article, we show that in a subdomain of the
                 unrestricted valuations with the additional assumption
                 of allocation non-bossiness, using the richness of the
                 allocations, the strategyproof social choice functions
                 can be shown to be affine maximizers. We provide an
                 example to show that allocation non-bossiness is indeed
                 critical for this result. This work shows that an
                 affine maximizer result needs a certain amount of
                 richness split across valuations and allocations.",
  acknowledgement = ack-nhfb,
  articleno =    "26",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Dutting:2015:EMA,
  author =       "Paul D{\"u}tting and Monika Henzinger and Ingmar
                 Weber",
  title =        "An Expressive Mechanism for Auctions on the {Web}",
  journal =      j-TEAC,
  volume =       "4",
  number =       "1",
  pages =        "1:1--1:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2716312",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Feb 6 08:20:19 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Auctions are widely used on the Web. Applications
                 range from sponsored search to platforms such as eBay.
                 In these and in many other applications the auctions in
                 use are single-/multi-item auctions with unit demand.
                 The main drawback of standard mechanisms for this type
                 of auctions, such as VCG and GSP, is the limited
                 expressiveness that they offer to the bidders. The
                 General Auction Mechanism (GAM) of Aggarwal et al.
                 [2009] takes a first step toward addressing the problem
                 of limited expressiveness by computing a bidder
                 optimal, envy-free outcome for linear utility functions
                 with identical slopes and a single discontinuity per
                 bidder-item pair. We show that in many practical
                 situations this does not suffice to adequately model
                 the preferences of the bidders, and we overcome this
                 problem by presenting the first mechanism for piecewise
                 linear utility functions with nonidentical slopes and
                 multiple discontinuities. Our mechanism runs in
                 polynomial time. Like GAM it is incentive compatible
                 for inputs that fulfill a certain nondegeneracy
                 assumption, but our requirement is more general than
                 the requirement of GAM. For discontinuous utility
                 functions that are nondegenerate as well as for
                 continuous utility functions the outcome of our
                 mechanism is a competitive equilibrium. We also show
                 how our mechanism can be used to compute approximately
                 bidder optimal, envy-free outcomes for a general class
                 of continuous utility functions via piecewise linear
                 approximation. Finally, we prove hardness results for
                 even more expressive settings.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Colini-Baldeschi:2015:MKS,
  author =       "Riccardo Colini-Baldeschi and Stefano Leonardi and
                 Monika Henzinger and Martin Starnberger",
  title =        "On Multiple Keyword Sponsored Search Auctions with
                 Budgets",
  journal =      j-TEAC,
  volume =       "4",
  number =       "1",
  pages =        "2:1--2:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2818357",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Feb 6 08:20:19 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study multiple keyword sponsored search auctions
                 with budgets. Each keyword has multiple ad slots with a
                 click-through rate. The bidders have additive
                 valuations, which are linear in the click-through
                 rates, and budgets, which are restricting their overall
                 payments. Additionally, the number of slots per keyword
                 assigned to a bidder is bounded. We show the following
                 results: (1) We give the first mechanism for multiple
                 keywords, where click-through rates differ among slots.
                 Our mechanism is incentive compatible in expectation,
                 individually rational in expectation, and Pareto
                 optimal. (2) We study the combinatorial setting, where
                 each bidder is only interested in a subset of the
                 keywords. We give an incentive compatible, individually
                 rational, Pareto-optimal, and deterministic mechanism
                 for identical click-through rates. (3) We give an
                 impossibility result for incentive compatible,
                 individually rational, Pareto-optimal, and
                 deterministic mechanisms for bidders with diminishing
                 marginal valuations.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Aumann:2015:ATT,
  author =       "Yonatan Aumann and Yair Dombb and Avinatan Hassidim",
  title =        "Auctioning Time: Truthful Auctions of Heterogeneous
                 Divisible Goods",
  journal =      j-TEAC,
  volume =       "4",
  number =       "1",
  pages =        "3:1--3:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2833086",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Feb 6 08:20:19 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the problem of auctioning time --- a
                 one-dimensional continuously-divisible heterogeneous
                 good --- among multiple agents. Applications include
                 auctioning time for using a shared device, auctioning
                 TV commercial slots, and more. Different agents may
                 have different valuations for the different possible
                 intervals; the goal is to maximize the aggregate
                 utility. Agents are self-interested and may
                 misrepresent their true valuation functions if this
                 benefits them. Thus, we seek auctions that are
                 truthful. Considering the case that each agent may
                 obtain a single interval, the challenge is twofold, as
                 we need to determine both where to slice the interval,
                 and who gets what slice. We consider two settings:
                 discrete and continuous. In the discrete setting, we
                 are given a sequence of m indivisible elements ( e$_1$,
                 \ldots, e$_m$ ), and the auction must allocate each
                 agent a consecutive subsequence of the elements. In the
                 continuous setting, we are given a continuous,
                 infinitely divisible interval, and the auction must
                 allocate each agent a subinterval. The agents'
                 valuations are nonatomic measures on the interval. We
                 show that, for both settings, the associated
                 computational problem is NP-complete even under very
                 restrictive assumptions. Hence, we provide
                 approximation algorithms. For the discrete case, we
                 provide a truthful auctioning mechanism that
                 approximates the optimal welfare to within a log m
                 factor. The mechanism works for arbitrary monotone
                 valuations. For the continuous setting, we provide a
                 truthful auctioning mechanism that approximates the
                 optimal welfare to within an O (log n ) factor (where n
                 is the number of agents). Additionally, we provide a
                 truthful 2-approximation mechanism for the case that
                 all pieces must be of some fixed size.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Dutting:2015:AHI,
  author =       "Paul D{\"u}tting and Monika Henzinger and Martin
                 Starnberger",
  title =        "Auctions for Heterogeneous Items and Budget Limits",
  journal =      j-TEAC,
  volume =       "4",
  number =       "1",
  pages =        "4:1--4:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2818351",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Feb 6 08:20:19 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study individual rational, Pareto-optimal, and
                 incentive compatible mechanisms for auctions with
                 heterogeneous items and budget limits. We consider
                 settings with multiunit demand and additive valuations.
                 For single-dimensional valuations we prove a positive
                 result for randomized mechanisms, and a negative result
                 for deterministic mechanisms. While the positive result
                 allows for private budgets, the negative result is for
                 public budgets. For multidimensional valuations and
                 public budgets we prove an impossibility result that
                 applies to deterministic and randomized mechanisms.
                 Taken together this shows the power of randomization in
                 certain settings with heterogeneous items, but it also
                 shows its limitations.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Alon:2015:HPT,
  author =       "Noga Alon and Robert Bredereck and Jiehua Chen and
                 Stefan Kratsch and Rolf Niedermeier and Gerhard J.
                 Woeginger",
  title =        "How to Put Through Your Agenda in Collective Binary
                 Decisions",
  journal =      j-TEAC,
  volume =       "4",
  number =       "1",
  pages =        "5:1--5:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2837467",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Feb 6 08:20:19 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the following decision-making scenario: a
                 society of voters has to find an agreement on a set of
                 proposals, and every single proposal is to be accepted
                 or rejected. Each voter supports a certain subset of
                 the proposals-the favorite ballot of this voter-and
                 opposes the remaining ones. He accepts a ballot if he
                 supports more than half of the proposals in this
                 ballot. The task is to decide whether there exists a
                 ballot approving a specified number of selected
                 proposals (agenda) such that all voters (or a strict
                 majority of them) accept this ballot. We show that, on
                 the negative side, both problems are NP-complete, and
                 on the positive side, they are fixed-parameter
                 tractable with respect to the total number of proposals
                 or with respect to the total number of voters. We look
                 into further natural parameters and study their
                 influence on the computational complexity of both
                 problems, thereby providing both tractability and
                 intractability results. Furthermore, we provide tight
                 combinatorial bounds on the worst-case size of an
                 accepted ballot in terms of the number of voters.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Fragiadakis:2015:SMM,
  author =       "Daniel Fragiadakis and Atsushi Iwasaki and Peter
                 Troyan and Suguru Ueda and Makoto Yokoo",
  title =        "Strategyproof Matching with Minimum Quotas",
  journal =      j-TEAC,
  volume =       "4",
  number =       "1",
  pages =        "6:1--6:??",
  month =        dec,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2841226",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Feb 6 08:20:19 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study matching markets in which institutions may
                 have minimum and maximum quotas. Minimum quotas are
                 important in many settings, such as hospital residency
                 matching, military cadet matching, and school choice,
                 but current mechanisms are unable to accommodate them,
                 leading to the use of ad hoc solutions. We introduce
                 two new classes of strategyproof mechanisms that allow
                 for minimum quotas as an explicit input and show that
                 our mechanisms improve welfare relative to existing
                 approaches. Because minimum quotas cause a theoretical
                 incompatibility between standard fairness and
                 nonwastefulness properties, we introduce new
                 second-best axioms and show that they are satisfied by
                 our mechanisms. Last, we use simulations to quantify
                 (1) the magnitude of the potential efficiency gains
                 from our mechanisms and (2) how far the resulting
                 assignments are from the first-best definitions of
                 fairness and nonwastefulness. Combining both the
                 theoretical and simulation results, we argue that our
                 mechanisms will improve the performance of matching
                 markets with minimum quota constraints in practice.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Babaioff:2016:MDS,
  author =       "Moshe Babaioff and Moran Feldman and Moshe
                 Tennenholtz",
  title =        "Mechanism Design with Strategic Mediators",
  journal =      j-TEAC,
  volume =       "4",
  number =       "2",
  pages =        "7:1--7:??",
  month =        feb,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2841227",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Feb 6 08:20:20 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider the problem of designing mechanisms that
                 interact with strategic agents through strategic
                 intermediaries (or mediators), and investigate the cost
                 to society due to the mediators' strategic behavior.
                 Selfish agents with private information are each
                 associated with exactly one strategic mediator, and can
                 interact with the mechanism exclusively through that
                 mediator. Each mediator aims to optimize the combined
                 utility of his agents, while the mechanism aims to
                 optimize the combined utility of all agents. We focus
                 on the problem of facility location on a metric induced
                 by a publicly known tree. With nonstrategic mediators,
                 there is a dominant strategy mechanism that is optimal.
                 We show that when both agents and mediators act
                 strategically, there is no dominant strategy mechanism
                 that achieves any approximation. We, thus, slightly
                 relax the incentive constraints, and define the notion
                 of a two-sided incentive compatible mechanism. We show
                 that the 3-competitive deterministic mechanism
                 suggested by Procaccia and Tennenholtz [2013] and Dekel
                 et al. [2010] for lines extends naturally to trees, and
                 is still 3-competitive as well as two-sided incentive
                 compatible. This is essentially the best possible
                 (follows from Dekel et al. [2010] and Procaccia and
                 Tennenholtz [2013]). We then show that by allowing
                 randomization one can construct a 2-competitive
                 randomized mechanism that is two-sided incentive
                 compatible, and this is also essentially tight. This
                 result also reduces a gap left in the work of Procaccia
                 and Tennenholtz [2013] and Lu et al. [2009] for the
                 problem of designing strategy-proof mechanisms for
                 weighted agents with no mediators on a line. We also
                 investigate a generalization of the preceding setting
                 where there are multiple levels of mediators.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Roughgarden:2016:NCS,
  author =       "Tim Roughgarden and Okke Schrijvers",
  title =        "Network Cost-Sharing without Anonymity",
  journal =      j-TEAC,
  volume =       "4",
  number =       "2",
  pages =        "8:1--8:??",
  month =        feb,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2841228",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Feb 6 08:20:20 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider network cost-sharing games with
                 nonanonymous cost functions, where the cost of each
                 edge is a submodular function of its users, and this
                 cost is shared using the Shapley value. Nonanonymous
                 cost functions model asymmetries between the players,
                 which can arise from different bandwidth requirements,
                 durations of use, services needed, and so on. These
                 games can possess multiple Nash equilibria of wildly
                 varying quality. The goal of this article is to
                 identify well-motivated equilibrium refinements that
                 admit good worst-case approximation bounds. Our primary
                 results are tight bounds on the cost of strong Nash
                 equilibria and potential function minimizers in network
                 cost-sharing games with nonanonymous cost functions,
                 parameterized by the set C of allowable submodular cost
                 functions. These two worst-case bounds coincide for
                 every set C, and equal the summability parameter
                 introduced in Roughgarden and Sundararajan [2009] to
                 characterize efficiency loss in a family of
                 cost-sharing mechanisms. Thus, a single parameter
                 simultaneously governs the worst-case inefficiency of
                 network cost-sharing games (in two incomparable senses)
                 and cost-sharing mechanisms. This parameter is always
                 at most the $k$th Harmonic number $H_k \approx \ln k$,
                 where $k$ is the number of players, and is constant for
                 many function classes of interest.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Christodoulou:2016:TBP,
  author =       "George Christodoulou and Annam{\'a}ria Kov{\'a}cs and
                 Alkmini Sgouritsa and Bo Tang",
  title =        "Tight Bounds for the Price of Anarchy of Simultaneous
                 First-Price Auctions",
  journal =      j-TEAC,
  volume =       "4",
  number =       "2",
  pages =        "9:1--9:??",
  month =        feb,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2847520",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Feb 6 08:20:20 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the price of anarchy (PoA) of simultaneous
                 first-price auctions (FPAs) for buyers with submodular
                 and subadditive valuations. The current best upper
                 bounds for the Bayesian price of anarchy (BPoA) of
                 these auctions are e /( e --- 1) [Syrgkanis and Tardos
                 2013] and 2 [Feldman et al. 2013], respectively. We
                 provide matching lower bounds for both cases even for
                 the case of full information and for mixed Nash
                 equilibria via an explicit construction. We present an
                 alternative proof of the upper bound of e /( e --- 1)
                 for FPAs with fractionally subadditive valuations that
                 reveals the worst-case price distribution, which is
                 used as a building block for the matching lower bound
                 construction. We generalize our results to a general
                 class of item bidding auctions that we call
                 bid-dependent auctions (including FPAs and all-pay
                 auctions) where the winner is always the highest bidder
                 and each bidder's payment depends only on his own bid.
                 Finally, we apply our techniques to discriminatory
                 price multiunit auctions. We complement the results of
                 de Keijzer et al. [2013] for the case of subadditive
                 valuations by providing a matching lower bound of 2.
                 For the case of submodular valuations, we provide a
                 lower bound of 1.109. For the same class of valuations,
                 we were able to reproduce the upper bound of e /( e ---
                 1) using our nonsmooth approach.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Christodoulou:2016:PSP,
  author =       "George Christodoulou and Martin Gairing",
  title =        "Price of Stability in Polynomial Congestion Games",
  journal =      j-TEAC,
  volume =       "4",
  number =       "2",
  pages =        "10:1--10:??",
  month =        feb,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2841229",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Feb 6 08:20:20 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "The price of anarchy (PoA) in congestion games has
                 attracted a lot of research over the past decade. This
                 has resulted in a thorough understanding of this
                 concept. In contrast, the price of stability (PoS),
                 which is an equally interesting concept, is much less
                 understood. In this article, we consider congestion
                 games with polynomial cost functions with nonnegative
                 coefficients and maximum degree d. We give matching
                 bounds for the PoS in such games-that is, our technique
                 provides the exact value for any degree d. For linear
                 congestion games, tight bounds were previously known.
                 Those bounds hold even for the more restricted case of
                 dominant equilibria, which may not exist. We give a
                 separation result showing that this is not possible for
                 congestion games with quadratic cost functions-in other
                 words, the PoA for the subclass of games that admit a
                 dominant strategy equilibrium is strictly smaller than
                 the PoS for the general class.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Hoefer:2016:TSD,
  author =       "Martin Hoefer and Thomas Kesselheim and Berthold
                 V{\"o}cking",
  title =        "Truthfulness and Stochastic Dominance with Monetary
                 Transfers",
  journal =      j-TEAC,
  volume =       "4",
  number =       "2",
  pages =        "11:1--11:??",
  month =        feb,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2847522",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Feb 6 08:20:20 MST 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider truthfulness concepts for auctions with
                 payments based on first- and second-order stochastic
                 dominance. We assume bidders consider wealth in
                 standard quasilinear form as valuation minus payments.
                 Additionally, they are sensitive to risk in the
                 distribution of wealth stemming from randomized
                 mechanisms. First- and second-order stochastic
                 dominance are well known to capture risk sensitivity,
                 and we apply these concepts to capture truth-telling
                 incentives for bidders. As our first main result, we
                 provide a complete characterization of all
                 social-choice functions over binary single-parameter
                 domains that can be implemented by a mechanism that is
                 truthful in first- and second-order stochastic
                 dominance. We show that these are exactly the
                 social-choice functions implementable by
                 truthful-in-expectation mechanisms, and we provide a
                 novel payment rule that guarantees stochastic
                 dominance. As our second main result we extend the
                 celebrated randomized metarounding approach for
                 truthful-in-expectation mechanisms in packing domains.
                 We design mechanisms that are truthful in first-order
                 stochastic dominance by spending only a logarithmic
                 factor in the approximation guarantee.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Mcafee:2016:I,
  author =       "Preston Mcafee and {\'E}va Tardos",
  title =        "Introduction",
  journal =      j-TEAC,
  volume =       "4",
  number =       "3",
  pages =        "12:1--12:??",
  month =        jun,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2916701",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Thu Jun 16 09:24:17 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Chen:2016:TMA,
  author =       "Yiling Chen and Stephen Chong and Ian A. Kash and Tal
                 Moran and Salil Vadhan",
  title =        "Truthful Mechanisms for Agents That Value Privacy",
  journal =      j-TEAC,
  volume =       "4",
  number =       "3",
  pages =        "13:1--13:??",
  month =        jun,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2892555",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Thu Jun 16 09:24:17 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Recent work has constructed economic mechanisms that
                 are both truthful and differentially private. In these
                 mechanisms, privacy is treated separately from
                 truthfulness; it is not incorporated in players'
                 utility functions (and doing so has been shown to lead
                 to nontruthfulness in some cases). In this work, we
                 propose a new, general way of modeling privacy in
                 players' utility functions. Specifically, we only
                 assume that if an outcome o has the property that any
                 report of player i would have led to o with
                 approximately the same probability, then o has a small
                 privacy cost to player i. We give three mechanisms that
                 are truthful with respect to our modeling of privacy:
                 for an election between two candidates, for a discrete
                 version of the facility location problem, and for a
                 general social choice problem with discrete utilities
                 (via a VCG-like mechanism). As the number n of players
                 increases, the social welfare achieved by our
                 mechanisms approaches optimal (as a fraction of n ).",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Devanur:2016:WPO,
  author =       "Nikhil R. Devanur and Zhiyi Huang and Nitish Korula
                 and Vahab S. Mirrokni and Qiqi Yan",
  title =        "Whole-Page Optimization and Submodular Welfare
                 Maximization with Online Bidders",
  journal =      j-TEAC,
  volume =       "4",
  number =       "3",
  pages =        "14:1--14:??",
  month =        jun,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2892563",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Thu Jun 16 09:24:17 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In the context of online ad serving, display ads may
                 appear on different types of web pages, where each page
                 includes several ad slots and therefore multiple ads
                 can be shown on each page. The set of ads that can be
                 assigned to ad slots of the same page needs to satisfy
                 various prespecified constraints including exclusion
                 constraints, diversity constraints, and the like. Upon
                 arrival of a user, the ad serving system needs to
                 allocate a set of ads to the current webpage respecting
                 these per-page allocation constraints. Previous
                 slot-based settings ignore the important concept of a
                 page and may lead to highly suboptimal results in
                 general. In this article, motivated by these
                 applications in display advertising and inspired by the
                 submodular welfare maximization problem with online
                 bidders, we study a general class of page-based ad
                 allocation problems, present the first (tight)
                 constant-factor approximation algorithms for these
                 problems, and confirm the performance of our algorithms
                 experimentally on real-world datasets. A key technical
                 ingredient of our results is a novel primal-dual
                 analysis for handling free disposal, which updates dual
                 variables using a ``level function'' instead of a
                 single level and unifies with previous analyses of
                 related problems. This new analysis method allows us to
                 handle arbitrarily complicated allocation constraints
                 for each page. Our main result is an algorithm that
                 achieves a $1 - 1 / e - o(1)$-competitive
                 ratio. Moreover, our experiments on real-world datasets
                 show significant improvements of our page-based
                 algorithms compared to the slot-based
                 algorithms. Finally, we observe that our problem is
                 closely related to the submodular welfare maximization
                 (SWM) problem. In particular, we introduce a variant of
                 the SWM problem with online bidders and show how to
                 solve this problem using our algorithm for whole-page
                 optimization.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Caragiannis:2016:WDN,
  author =       "Ioannis Caragiannis and Ariel D. Procaccia and Nisarg
                 Shah",
  title =        "When Do Noisy Votes Reveal the Truth?",
  journal =      j-TEAC,
  volume =       "4",
  number =       "3",
  pages =        "15:1--15:??",
  month =        jun,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2892565",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Thu Jun 16 09:24:17 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "A well-studied approach to the design of voting rules
                 views them as maximum likelihood estimators; given
                 votes that are seen as noisy estimates of a true
                 ranking of the alternatives, the rule must reconstruct
                 the most likely true ranking. We argue that this is too
                 stringent a requirement and instead ask: how many votes
                 does a voting rule need to reconstruct the true
                 ranking? We define the family of pairwise-majority
                 consistent rules and show that for all rules in this
                 family, the number of samples required from Mallows's
                 noise model is logarithmic in the number of
                 alternatives, and that no rule can do asymptotically
                 better (while some rules like plurality do much worse).
                 Taking a more normative point of view, we consider
                 voting rules that surely return the true ranking as the
                 number of samples tends to infinity (we call this
                 property accuracy in the limit ); this allows us to
                 move to a higher level of abstraction. We study
                 families of noise models that are parameterized by
                 distance functions and find voting rules that are
                 accurate in the limit for all noise models in such
                 general families. We characterize the distance
                 functions that induce noise models for which
                 pairwise-majority consistent rules are accurate in the
                 limit and provide a similar result for another novel
                 family of position-dominance consistent rules. These
                 characterizations capture three well-known distance
                 functions.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Easley:2016:IGG,
  author =       "David Easley and Arpita Ghosh",
  title =        "Incentives, Gamification, and Game Theory: an Economic
                 Approach to Badge Design",
  journal =      j-TEAC,
  volume =       "4",
  number =       "3",
  pages =        "16:1--16:??",
  month =        jun,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2910575",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Thu Jun 16 09:24:17 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Gamification is growing increasingly prevalent as a
                 means to incentivize user engagement of social media
                 sites that rely on user contributions. Badges, or
                 equivalent rewards, such as top-contributor lists that
                 are used to recognize a user's contributions on a site,
                 clearly appear to be valued by users who actively
                 pursue and compete for them. However, different sites
                 use different badge designs, varying how, and for what,
                 badges are awarded. Some sites, such as StackOverflow,
                 award badges for meeting fixed levels of
                 contribution. Other sites, such as Amazon and Y!
                 Answers, reward users for being among some top set of
                 contributors on the site, corresponding to a
                 competitive standard of performance. Given that users
                 value badges, and that contributing to a site requires
                 effort, how badges are designed will affect the
                 incentives-therefore the participation and
                 effort-elicited from strategic users on a site. We take
                 a game-theoretic approach to badge design, analyzing
                 the incentives created by widely used badge designs in
                 a model in which winning a badge is valued, effort is
                 costly, and potential contributors to the site
                 endogenously decide whether or not to participate, and
                 how much total effort to put into their contributions
                 to the site. We analyze equilibrium existence, as well
                 as equilibrium participation and effort, in an absolute
                 standards mechanism $M_\alpha$ in which badges are
                 awarded for meeting some absolute level of (observed)
                 effort, and a relative standards mechanism $M_\rho$
                 corresponding to competitive standards, as in a
                 top-$\rho$ contributor badge. We find that equilibria
                 always exist in both mechanisms, even when the value
                 from winning a badge depends endogenously on the number
                 of other winners. However, $M_\alpha$ has
                 zero-participation equilibria for standards that are
                 too high, whereas all equilibria in $M_\rho$ elicit
                 nonzero participation for all possible $\rho$, provided
                 that $\rho$ is specified as a fixed number rather than
                 as a fraction of actual contributors (note that the two
                 are not equivalent in a setting with endogenous
                 participation). Finally, we ask whether or not a site
                 should explicitly announce the number of users winning
                 a badge. The answer to this question is determined by
                 the curvature of the value of winning the badge as a
                 function of the number of other winners.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Roberts:2016:RTS,
  author =       "Ben Roberts and Dinan Gunawardena and Ian A. Kash and
                 Peter Key",
  title =        "Ranking and Tradeoffs in Sponsored Search Auctions",
  journal =      j-TEAC,
  volume =       "4",
  number =       "3",
  pages =        "17:1--17:??",
  month =        jun,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2910576",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Thu Jun 16 09:24:17 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In a sponsored search auction, decisions about how to
                 rank ads impose tradeoffs between objectives, such as
                 revenue and welfare. In this article, we examine how
                 these tradeoffs should be made. We begin by arguing
                 that the most natural solution concept to evaluate
                 these tradeoffs is the lowest symmetric Nash
                 equilibrium (SNE). As part of this argument, we
                 generalise the well-known connection between the lowest
                 SNE and the VCG outcome. We then propose a new ranking
                 algorithm, loosely based on the revenue-optimal
                 auction, that uses a reserve price to order the ads
                 (not just to filter them) and give conditions under
                 which it raises more revenue than simply applying that
                 reserve price. Finally, we conduct extensive
                 simulations examining the tradeoffs enabled by
                 different ranking algorithms and show that our proposed
                 algorithm enables superior operating points by a
                 variety of metrics.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Roughgarden:2016:ORM,
  author =       "Tim Roughgarden and Inbal Talgam-Cohen",
  title =        "Optimal and Robust Mechanism Design with
                 Interdependent Values",
  journal =      j-TEAC,
  volume =       "4",
  number =       "3",
  pages =        "18:1--18:??",
  month =        jun,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2910577",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Thu Jun 16 09:24:17 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study interdependent value settings and extend
                 several results from the well-studied independent
                 private values model to these settings. For
                 revenue-optimal mechanism design, we give conditions
                 under which Myerson's virtual value-based mechanism
                 remains optimal with interdependent values. One of
                 these conditions is robustness of the truthfulness and
                 individual rationality guarantees, in the sense that
                 they are required to hold ex-post. We then consider an
                 even more robust class of mechanisms called ``prior
                 independent'' (``detail free''), and show that, by
                 simply using one of the bidders to set a reserve price,
                 it is possible to extract near-optimal revenue in an
                 interdependent values setting. This shows that a
                 considerable level of robustness is achievable for
                 interdependent values in single-parameter
                 environments.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Conitzer:2016:ISI,
  author =       "Vincent Conitzer and David Easley",
  title =        "Introduction to the Special Issue on {EC'14}",
  journal =      j-TEAC,
  volume =       "4",
  number =       "4",
  pages =        "19:1--19:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2953046",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Aug 29 06:37:22 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Othman:2016:CFT,
  author =       "Abraham Othman and Christos Papadimitriou and Aviad
                 Rubinstein",
  title =        "The Complexity of Fairness Through Equilibrium",
  journal =      j-TEAC,
  volume =       "4",
  number =       "4",
  pages =        "20:1--20:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2956583",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Aug 29 06:37:22 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Competitive equilibrium from equal incomes (CEEI) is a
                 well-known fair allocation mechanism with desirable
                 fairness and efficiency properties; however, with
                 indivisible resources, a CEEI may not exist [Foley
                 1967; Varian 1974; Thomson and Varian 1985]. It was
                 shown in Budish [2011] that in the case of indivisible
                 resources, there is always an allocation, called
                 A-CEEI, that is approximately fair, approximately
                 truthful, and approximately efficient for some
                 favorable approximation parameters. A heuristic search
                 that attempts to find this approximation is used in
                 practice to assign business school students to courses.
                 In this article, we show that finding the A-CEEI
                 allocation guaranteed to exist by Budish's theorem is
                 PPAD-complete. We further show that finding an
                 approximate equilibrium with better approximation
                 guarantees is even harder: NP-complete.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Hassidim:2016:LCM,
  author =       "Avinatan Hassidim and Yishay Mansour and Shai Vardi",
  title =        "Local Computation Mechanism Design",
  journal =      j-TEAC,
  volume =       "4",
  number =       "4",
  pages =        "21:1--21:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2956584",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Aug 29 06:37:22 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We introduce the notion of local computation mechanism
                 design -designing game-theoretic mechanisms that run in
                 polylogarithmic time and space. Local computation
                 mechanisms reply to each query in polylogarithmic time
                 and space, and the replies to different queries are
                 consistent with the same global feasible solution. When
                 the mechanism employs payments, the computation of the
                 payments is also done in polylogarithmic time and
                 space. Furthermore, the mechanism needs to maintain
                 incentive compatibility with respect to the allocation
                 and payments. We present local computation mechanisms
                 for two classical game-theoretical problems: stable
                 matching and job scheduling. For stable matching, some
                 of our techniques may have implications to the global
                 (non-LCA (Local Computation Algorithm)) setting.
                 Specifically, we show that when the men's preference
                 lists are bounded, we can achieve an arbitrarily good
                 approximation to the stable matching within a fixed
                 number of iterations of the Gale--Shapley algorithm.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Ghosh:2016:OCD,
  author =       "Arpita Ghosh and Robert Kleinberg",
  title =        "Optimal Contest Design for Simple Agents",
  journal =      j-TEAC,
  volume =       "4",
  number =       "4",
  pages =        "22:1--22:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2930955",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Aug 29 06:37:22 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Incentives are more likely to elicit desired outcomes
                 when they are designed based on accurate models of
                 agents' strategic behavior. A growing literature,
                 however, suggests that people do not quite behave like
                 standard economic agents in a variety of environments,
                 both online and offline. What consequences might such
                 differences have for the optimal design of mechanisms
                 in these environments? In this article, we explore this
                 question in the context of optimal contest design for
                 simple agents-agents who strategically reason about
                 whether or not to participate in a system, but not
                 about the input they provide to it. Specifically,
                 consider a contest where n potential contestants with
                 types $ (q_i, c^i) $ each choose between participating
                 and producing a submission of quality q$^i$ at cost
                 c$^i$, versus not participating at all to maximize
                 their utilities. How should a principal distribute a
                 total prize V among the n ranks to maximize some
                 increasing function of the qualities of elicited
                 submissions in a contest with such simple agents? We
                 first solve the optimal contest design problem for
                 settings where agents have homogeneous participation
                 costs $ c^i = c$. Here, the contest that maximizes
                 every increasing function of the elicited contributions
                 is always a simple contest, awarding equal prizes of $
                 V / j^*$ each to the top $ j^*= V / c - \Theta (\sqrt {
                 V / (c \ln (V / c))})$ contestants. This is in contrast
                 to the optimal contest structure in comparable models
                 with strategic effort choices, where the optimal
                 contest is either a winner-take-all contest or awards
                 possibly unequal prizes, depending on the curvature of
                 agents' effort cost functions. We next address the
                 general case with heterogeneous costs where agents'
                 types $ (q^i, c^i)$ are inherently two dimensional,
                 significantly complicating equilibrium analysis. With
                 heterogeneous costs, the optimal contest depends on the
                 objective being maximized: our main result here is that
                 the winner-take-all contest is a 3-approximation of the
                 optimal contest when the principal's objective is to
                 maximize the quality of the best elicited contribution.
                 The proof of this result hinges around a
                 ``subequilibrium'' lemma establishing a stochastic
                 dominance relation between the distribution of
                 qualities elicited in an equilibrium and a
                 subequilibrium --- a strategy profile that is a best
                 response for all agents who choose to participate in
                 that strategy profile; this relation between equilibria
                 and subequilibria may be of more general interest.",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Fudenberg:2016:RRR,
  author =       "Drew Fudenberg and Alexander Peysakhovich",
  title =        "Recency, Records, and Recaps: Learning and
                 Nonequilibrium Behavior in a Simple Decision Problem",
  journal =      j-TEAC,
  volume =       "4",
  number =       "4",
  pages =        "23:1--23:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2956581",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Aug 29 06:37:22 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Nash equilibrium takes optimization as a primitive,
                 but suboptimal behavior can persist in simple
                 stochastic decision problems. This has motivated the
                 development of other equilibrium concepts such as
                 cursed equilibrium and behavioral equilibrium. We
                 experimentally study a simple adverse selection (or
                 ``lemons'') problem and find that learning models that
                 heavily discount past information (i.e., display
                 recency bias) explain patterns of behavior better than
                 Nash, cursed, or behavioral equilibrium. Providing
                 counterfactual information or a record of past outcomes
                 does little to aid convergence to optimal strategies,
                 but providing sample averages (``recaps'') gets
                 individuals most of the way to optimality. Thus,
                 recency effects are not solely due to limited memory
                 but stem from some other form of cognitive constraints.
                 Our results show the importance of going beyond static
                 optimization and incorporating features of human
                 learning into economic models used in both
                 understanding phenomena and designing market
                 institutions.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Goldberg:2016:BQC,
  author =       "Paul W. Goldberg and Aaron Roth",
  title =        "Bounds for the Query Complexity of Approximate
                 Equilibria",
  journal =      j-TEAC,
  volume =       "4",
  number =       "4",
  pages =        "24:1--24:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2956582",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Aug 29 06:37:22 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We analyze the number of payoff queries needed to
                 compute approximate equilibria of multi-player games.
                 We find that query complexity is an effective tool for
                 distinguishing the computational difficulty of
                 alternative solution concepts, and we develop new
                 techniques for upper- and lower bounding the query
                 complexity. For binary-choice games, we show
                 logarithmic upper and lower bounds on the query
                 complexity of approximate correlated equilibrium. For
                 well-supported approximate correlated equilibrium (a
                 restriction where a player's behavior must always be
                 approximately optimal, in the worst case over draws
                 from the distribution) we show a linear lower bound,
                 thus separating the query complexity of well supported
                 approximate correlated equilibrium from the standard
                 notion of approximate correlated equilibrium. Finally,
                 we give a query-efficient reduction from the problem of
                 computing an approximate well-supported Nash
                 equilibrium to the problem of verifying a well
                 supported Nash equilibrium, where the additional query
                 overhead is proportional to the description length of
                 the game. This gives a polynomial-query algorithm for
                 computing well supported approximate Nash equilibria
                 (and hence correlated equilibria) in concisely
                 represented games. We identify a class of games (which
                 includes congestion games) in which the reduction can
                 be made not only query efficient, but also
                 computationally efficient.",
  acknowledgement = ack-nhfb,
  articleno =    "24",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Fearnley:2016:FAN,
  author =       "John Fearnley and Rahul Savani",
  title =        "Finding Approximate {Nash} Equilibria of Bimatrix
                 Games via Payoff Queries",
  journal =      j-TEAC,
  volume =       "4",
  number =       "4",
  pages =        "25:1--25:??",
  month =        aug,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2956579",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Aug 29 06:37:22 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the deterministic and randomized query
                 complexity of finding approximate equilibria in a $ k
                 \times k $ bimatrix game. We show that the
                 deterministic query complexity of finding an \epsilon
                 -Nash equilibrium when $ \epsilon < 1 / 2 $ is $ \Omega
                 (k^2) $, even in zero-one constant-sum games. In
                 combination with previous results [Fearnley et al.
                 2013], this provides a complete characterization of the
                 deterministic query complexity of approximate Nash
                 equilibria. We also study randomized querying
                 algorithms. We give a randomized algorithm for finding
                 a $ (3 - \sqrt {5 / 2 + \epsilon })$-Nash equilibrium
                 using $ O(k \log k / \epsilon^2)$ payoff queries, which
                 shows that the $ 1 / 2$ barrier for deterministic
                 algorithms can be broken by randomization. For
                 well-supported Nash equilibria (WSNE), we first give a
                 randomized algorithm for finding an $ \epsilon $-WSNE
                 of a zero-sum bimatrix game using $ O (k \log k /
                 \epsilon^4)$ payoff queries, and we then use this to
                 obtain a randomized algorithm for finding a $ (2 / 3 +
                 \epsilon)$-WSNE in a general bimatrix game using $ O(k
                 \log k / \epsilon^4)$ payoff queries. Finally, we
                 initiate the study of lower bounds against randomized
                 algorithms in the context of bimatrix games, by showing
                 that randomized algorithms require $ \Omega (k^2)$
                 payoff queries in order to find an $ \epsilon $-Nash
                 equilibrium with $ \epsilon < 1 / 4 k$, even in
                 zero-one constant-sum games. In particular, this rules
                 out query-efficient randomized algorithms for finding
                 exact Nash equilibria.",
  acknowledgement = ack-nhfb,
  articleno =    "25",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Hassidim:2016:G,
  author =       "Avinatan Hassidim and Haim Kaplan and Yishay Mansour
                 and Noam Nisan",
  title =        "The {AND--OR} Game",
  journal =      j-TEAC,
  volume =       "5",
  number =       "1",
  pages =        "1:1--1:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2897186",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Apr 3 11:39:17 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider a simple simultaneous first price auction
                 for two identical items in a complete information
                 setting. Our goal is to analyze this setting for a
                 simple, yet highly interesting, AND--OR game, where one
                 agent is single minded and the other is unit demand. We
                 find a mixed equilibrium of this game and show that
                 every other equilibrium admits the same expected
                 allocation and payments. In addition, we study the
                 equilibrium, highlighting the change in revenue and
                 social welfare as a function of the players'
                 valuations.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Borodin:2016:LGM,
  author =       "Allan Borodin and Brendan Lucier",
  title =        "On the Limitations of Greedy Mechanism Design for
                 Truthful Combinatorial Auctions",
  journal =      j-TEAC,
  volume =       "5",
  number =       "1",
  pages =        "2:1--2:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2956585",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Apr 3 11:39:17 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study mechanisms for the combinatorial auction (CA)
                 problem, in which $m$ objects are sold to rational
                 agents and the goal is to maximize social welfare. Of
                 particular interest is the special case of $s$-CAs,
                 where agents are interested in sets of size at most
                 $s$, for which a simple greedy algorithm obtains an $ s
                 + 1 $ approximation, but no polynomial time
                 deterministic truthful mechanism is known to attain an
                 approximation ratio better than $ O(m / \sqrt {\log
                 m})$. We view this not only as an extreme gap between
                 the power of greedy auctions and truthful greedy
                 auctions, but also as exemplifying the gap between the
                 known power of truthful and non-truthful polynomial
                 time deterministic algorithms. We associate the notion
                 of greediness with a broad class of algorithms, known
                 as priority algorithms, which encapsulate many natural
                 auction methods. This motivates us to ask: how well can
                 a truthful greedy algorithm approximate the optimal
                 social welfare for CA problems? We show that no
                 truthful greedy priority algorithm can obtain an
                 approximation to the CA problem that is sublinear in
                 $m$, even for $s$-CAs with $ s \geq 2$. Our
                 inapproximations are independent of any time
                 constraints on the mechanism and are purely a
                 consequence of the greedy-type restriction. We conclude
                 that any truthful combinatorial auction mechanism with
                 a non-trivial approximation factor must fall outside
                 the scope of many natural auction methods.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Harks:2016:RQC,
  author =       "Tobias Harks and Philipp {Von Falkenhausen}",
  title =        "Robust Quantitative Comparative Statics for a
                 Multimarket Paradox",
  journal =      j-TEAC,
  volume =       "5",
  number =       "1",
  pages =        "3:1--3:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2956580",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Apr 3 11:39:17 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We introduce a quantitative approach to comparative
                 statics that allows to bound the maximum effect of an
                 exogenous parameter change on a system's equilibrium.
                 The motivation for this approach is a well-known
                 paradox in multimarket Cournot competition, where a
                 positive price shock on a monopoly market may actually
                 reduce the monopolist's profit. We use our approach to
                 quantify for the first time the worst-case profit
                 reduction for multimarket oligopolies exposed to
                 arbitrary positive price shocks. For markets with
                 affine price functions and firms with convex cost
                 technologies, we show that the relative profit loss of
                 any firm is at most 25\%, no matter how many firms
                 compete in the oligopoly. We further investigate the
                 impact of positive price shocks on total profit of all
                 firms as well as on social welfare. We find tight
                 bounds also for these measures showing that total
                 profit and social welfare decreases by at most 25\% and
                 16.6\%, respectively. Finally, we show that in our
                 model, mixed, correlated, and coarse correlated
                 equilibria are essentially unique, thus, all our bounds
                 apply to these game solutions, as well.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Hummel:2016:WDI,
  author =       "Patrick Hummel and R. Preston Mcafee",
  title =        "When Does Improved Targeting Increase Revenue?",
  journal =      j-TEAC,
  volume =       "5",
  number =       "1",
  pages =        "4:1--4:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2956586",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Apr 3 11:39:17 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In second-price auctions, we find that improved
                 targeting via enhanced information disclosure decreases
                 revenue when there are two bidders and increases
                 revenue if there are at least four symmetric bidders
                 with values drawn from a distribution with a monotone
                 hazard rate. With asymmetries, improved targeting
                 increases revenue if the most frequent winner wins less
                 than 30.4\% of the time under a model in which shares
                 are well defined, but can decrease revenue otherwise.
                 We derive analogous results for position auctions.
                 Finally, we show that revenue can vary nonmonotonically
                 with the number of bidders who are able to take
                 advantage of improved targeting.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Piliouras:2016:RSP,
  author =       "Georgios Piliouras and Evdokia Nikolova and Jeff S.
                 Shamma",
  title =        "Risk Sensitivity of Price of Anarchy under
                 Uncertainty",
  journal =      j-TEAC,
  volume =       "5",
  number =       "1",
  pages =        "5:1--5:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2930956",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Apr 3 11:39:17 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In game theory, the price of anarchy framework studies
                 efficiency loss in decentralized environments.
                 Optimization and decision theory, on the other hand,
                 explore tradeoffs between optimality and robustness in
                 the case of single-agent decision making under
                 uncertainty. What happens when we combine both
                 approaches? We examine connections between the
                 efficiency loss due to decentralization and the
                 efficiency loss due to uncertainty and establish tight
                 performance guarantees for distributed systems in
                 uncertain environments. We present applications based
                 on novel variants of atomic congestion games with
                 uncertain costs, for which we provide tight performance
                 bounds under a wide range of risk attitudes. Our
                 results establish that the individual's attitude toward
                 uncertainty has a critical effect on system performance
                 and therefore should be a subject of close and
                 systematic investigation.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Devanur:2016:RCP,
  author =       "Nikhil R. Devanur and Jugal Garg and L{\'a}szl{\'o} A.
                 V{\'e}gh",
  title =        "A Rational Convex Program for Linear {Arrow--Debreu}
                 Markets",
  journal =      j-TEAC,
  volume =       "5",
  number =       "1",
  pages =        "6:1--6:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2930658",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Apr 3 11:39:17 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We present a new flow-type convex program describing
                 equilibrium solutions to linear Arrow--Debreu markets.
                 Whereas convex formulations were previously known
                 ([Nenakov and Primak 1983; Jain 2007; Cornet 1989]),
                 our program exhibits several new features. It provides
                 a simple necessary and sufficient condition and a
                 concise proof of the existence and rationality of
                 equilibria, settling an open question raised by
                 Vazirani [2012]. As a consequence, we also obtain a
                 simple new proof of the result in Mertens [2003] that
                 the equilibrium prices form a convex polyhedral set.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Chau:2016:TMC,
  author =       "Chi-Kin Chau and Khaled Elbassioni and Majid Khonji",
  title =        "Truthful Mechanisms for Combinatorial Allocation of
                 Electric Power in Alternating Current Electric Systems
                 for Smart Grid",
  journal =      j-TEAC,
  volume =       "5",
  number =       "1",
  pages =        "7:1--7:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2955089",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Apr 3 11:39:17 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Traditional studies of combinatorial auctions often
                 only consider linear constraints. The rise of smart
                 grid presents a new class of auctions, characterized by
                 quadratic constraints. This article studies the
                 complex-demand knapsack problem, in which the demands
                 are complex valued and the capacity of supplies is
                 described by the magnitude of total complex-valued
                 demand. This naturally captures the power constraints
                 in alternating current electric systems. In this
                 article, we provide a more complete study and
                 generalize the problem to the multi-minded version,
                 beyond the previously known $ \frac
                 {1}{2}$-approximation algorithm for only a subclass of
                 the problem. More precisely, we give a truthful
                 polynomial-time approximation scheme (PTAS) for the
                 case $ \phi \in [0, \frac {\pi }{2} - \delta]$ and a
                 truthful fully polynomial-time approximation scheme
                 (FPTAS), which fully optimizes the objective function
                 but violates the capacity constraint by at most $ (1 +
                 \epsilon)$, for the case $ \phi \in (\frac {\pi }{2},
                 \pi - \delta]$, where $ \phi $ is the maximum argument
                 of any complex-valued demand and $ \epsilon, \delta >
                 0$ are arbitrarily small constants. We complement these
                 results by showing that, unless P=NP, neither a PTAS
                 forg the case $ \phi \in (\frac {\pi }{2}, \pi \delta]$
                 nor any bi-criteria approximation algorithm with
                 polynomial guarantees for the case when $ \phi $ is
                 arbitrarily close to $ \pi $ (that is, when $ \delta $
                 is arbitrarily close to 0) can exist.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Feldman:2016:DCC,
  author =       "Michal Feldman and Ofir Geri",
  title =        "Do Capacity Constraints Constrain Coalitions?",
  journal =      j-TEAC,
  volume =       "5",
  number =       "1",
  pages =        "8:1--8:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2955090",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Apr 3 11:39:17 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study strong equilibria in symmetric capacitated
                 cost-sharing connection games. In these games, a graph
                 with designated source $s$ and sink $t$ is given, and
                 each edge is associated with some cost. Each agent
                 chooses strategically an $s$--$t$ path, knowing that
                 the cost of each edge is shared equally between all
                 agents using it. Two settings of cost-sharing
                 connection games have been previously studied: (i)
                 games where coalitions can form, and (ii) games where
                 edges are associated with capacities; both settings are
                 inspired by real-life scenarios. In this work we
                 combine these scenarios and analyze strong equilibria
                 (profiles where no coalition can deviate) in
                 capacitated games. This combination gives rise to new
                 phenomena that do not occur in the previous settings.
                 Our contribution is twofold. First, we provide a
                 topological characterization of networks that always
                 admit a strong equilibrium. Second, we establish tight
                 bounds on the efficiency loss that may be incurred due
                 to strategic behavior, as quantified by the strong
                 price of anarchy (and stability) measures.
                 Interestingly, our results qualitatively differ from
                 those obtained in the analysis of each scenario alone,
                 and the combination of coalitions and capacities
                 entails the introduction of more refined topology
                 classes than previously studied.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Brandt:2017:CDB,
  author =       "Felix Brandt and Markus Brill",
  title =        "Computing Dominance-Based Solution Concepts",
  journal =      j-TEAC,
  volume =       "5",
  number =       "2",
  pages =        "9:1--9:??",
  month =        mar,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2963093",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Aug 9 16:06:10 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Two common criticisms of Nash equilibrium are its
                 dependence on very demanding epistemic assumptions and
                 its computational intractability. We study the
                 computational properties of less demanding set-valued
                 solution concepts that are based on varying notions of
                 dominance. These concepts are intuitively appealing,
                 always exist, and admit unique minimal solutions in
                 important subclasses of games. Examples include
                 Shapley's saddles, Harsanyi and Selten's primitive
                 formations, Basu and Weibull's CURB sets, and Dutta and
                 Laslier's minimal covering set. Based on a unifying
                 framework proposed by Duggan and Le Breton, we
                 formulate two generic algorithms for computing these
                 concepts and investigate for which classes of games and
                 which properties of the underlying dominance notion the
                 algorithms are sound and efficient. We identify two
                 sets of conditions that are sufficient for
                 polynomial-time computability and show that the
                 conditions are satisfied, for instance, by saddles and
                 primitive formations in normal-form games, minimal CURB
                 sets in two-player games, and the minimal covering set
                 in symmetric matrix games. Our positive algorithmic
                 results explain regularities observed in the
                 literature, but also apply to several solution concepts
                 whose computational complexity was previously
                 unknown.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Conitzer:2017:FEL,
  author =       "Vincent Conitzer and Preston McAfee",
  title =        "Farewell Editorial: Looking Back on Our Terms Editing
                 {ACM TEAC} and into the Future",
  journal =      j-TEAC,
  volume =       "5",
  number =       "2",
  pages =        "9:1--9:??",
  month =        mar,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3079047",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Aug 9 16:06:10 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  acknowledgement = ack-nhfb,
  articleno =    "9e",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Pennock:2017:ENT,
  author =       "David Pennock and Ilya Segal",
  title =        "Editorial from the New {TEAC} {Co-Editors-in-Chief}",
  journal =      j-TEAC,
  volume =       "5",
  number =       "2",
  pages =        "9:1--9:??",
  month =        mar,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3084545",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Aug 9 16:06:10 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  acknowledgement = ack-nhfb,
  articleno =    "9ee",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Pai:2017:ATL,
  author =       "Mallesh M. Pai and Aaron Roth and Jonathan Ullman",
  title =        "An Antifolk Theorem for Large Repeated Games",
  journal =      j-TEAC,
  volume =       "5",
  number =       "2",
  pages =        "10:1--10:??",
  month =        mar,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2976734",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Aug 9 16:06:10 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In this article, we study infinitely repeated games in
                 settings of imperfect monitoring. We first prove a
                 family of theorems showing that when the signals
                 observed by the players satisfy a condition known as $
                 (\epsilon, \gamma)$-differential privacy, the folk
                 theorem has little bite: for values of $ \epsilon $ and
                 $ \gamma $ sufficiently small, for a fixed discount
                 factor, any equilibrium of the repeated game involves
                 players playing approximate equilibria of the stage
                 game in every period. Next we argue that in large games
                 ($n$-player games in which unilateral deviations by
                 single players have only a small impact on the utility
                 of other players), many monitoring settings naturally
                 lead to signals that satisfy $ (\epsilon,
                 \gamma)$-differential privacy for $ \epsilon $ and $
                 \gamma $ tending to zero as the number of players $n$
                 grows large. We conclude that in such settings, the set
                 of equilibria of the repeated game collapses to the set
                 of equilibria of the stage game. Our results nest and
                 generalize previous results of Green [1980] and
                 Sabourian [1990], suggesting that differential privacy
                 is a natural measure of the ``largeness'' of a game.
                 Further, techniques from the literature on differential
                 privacy allow us to prove quantitative bounds, where
                 the existing literature focuses on limiting results.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Azaria:2017:DMM,
  author =       "Amos Azaria and David Sarne and Yonatan Aumann",
  title =        "Distributed Matching with Mixed Maximum--Minimum
                 Utilities",
  journal =      j-TEAC,
  volume =       "5",
  number =       "2",
  pages =        "11:1--11:??",
  month =        mar,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3038911",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Aug 9 16:06:10 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In this article, we study distributed agent matching
                 with search friction in environments characterized by
                 costly exploration, where each agent's utility from
                 forming a partnership is influenced by some linear
                 combination of the maximum and the minimum among the
                 two agents' competence. The article provides a cohesive
                 analysis for such case, proving the equilibrium
                 structure for the different min-max linear combinations
                 that may be used. The article presents an extensive
                 equilibrium analysis of such settings, proving three
                 distinct resulting patterns of the acceptance
                 thresholds used by the different agents. The first
                 relates to settings where a greater emphasis is placed
                 on the minimum type, or in the extreme case where the
                 minimum type solely determines the output. In these
                 cases, the assortative matching characteristic holds,
                 where all agents set their threshold below their own
                 type and the greater is the agent type, the greater is
                 its threshold. When the utility from the partnership
                 formation is solely determined by the maximum type, we
                 show that there exists a type $ x^* $ where
                 partnerships form if and only if one of the agents has
                 a type equal to or greater than $ x^* $. When a greater
                 emphasis is placed on the maximum type (but not only),
                 we prove that assortative matching never holds, and the
                 change in the agents' acceptance thresholds can
                 frequently shift from an increase to a decrease.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Chan:2017:PAW,
  author =       "Hau Chan and Jing Chen and Gowtham Srinivasan",
  title =        "Provision-After-Wait with Common Preferences",
  journal =      j-TEAC,
  volume =       "5",
  number =       "2",
  pages =        "12:1--12:??",
  month =        mar,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3038910",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Aug 9 16:06:10 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In this article, we study the Provision-after-Wait
                 problem in healthcare (Braverman, Chen, and Kannan,
                 2016). In this setting, patients seek a medical
                 procedure that can be performed by different hospitals
                 of different costs. Each patient has a value for each
                 hospital and a budget-constrained government/planner
                 pays for the expenses of the patients. Patients are
                 free to choose hospitals, but the planner controls how
                 much money each hospital gets paid and thus how many
                 patients each hospital can serve (in one budget period,
                 say, one month or one year). Waiting times are used in
                 order to balance the patients' demand, and the
                 planner's goal is to find a stable assignment that
                 maximizes the social welfare while keeping the expenses
                 within the budget. It has been shown that the optimal
                 stable assignment is NP-hard to compute, and, beyond
                 this, little is known about the complexity of the
                 Provision-after-Wait problem. We start by showing that
                 this problem is in fact strongly NP-hard, and thus does
                 not have a Fully Polynomial-Time Approximation Scheme
                 (FPTAS). We then focus on the common preference
                 setting, where the patients have the same ranking over
                 the hospitals. Even when the patients perceive the
                 hospitals' values to them based on the same quality
                 measurement-referred to as proportional preferences,
                 which has been widely studied in resource
                 allocation-the problem is still NP-hard. However, in a
                 more general setting where the patients are ordered
                 according to the differences of their values between
                 consecutive hospitals, we construct an FPTAS for it. To
                 develop our results, we characterize the structure of
                 optimal stable assignments and their social welfare,
                 and we consider a new combinatorial optimization
                 problem that may be of independent interest, the
                 ordered Knapsack problem. Optimal stable assignments
                 are deterministic and ex-post individually rational for
                 patients. The downside is that waiting times are
                 dead-loss to patients and may burn a lot of social
                 welfare. If randomness is allowed, then the planner can
                 use lotteries as a rationing tool: The hope is that
                 they reduce the patients' waiting times, although they
                 are interim individually rational instead of ex-post.
                 Previous study has only considered lotteries for two
                 hospitals. In our setting, for arbitrary number of
                 hospitals, we characterize the structure of the optimal
                 lottery scheme and conditions under which using
                 lotteries generates better (expected) social welfare
                 than using waiting times.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Babaioff:2017:PPU,
  author =       "Moshe Babaioff and Liad Blumrosen and Shaddin Dughmi
                 and Yaron Singer",
  title =        "Posting Prices with Unknown Distributions",
  journal =      j-TEAC,
  volume =       "5",
  number =       "2",
  pages =        "13:1--13:??",
  month =        mar,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3037382",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Aug 9 16:06:10 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We consider a dynamic auction model, where bidders
                 sequentially arrive to the market. The values of the
                 bidders for the item for sale are independently drawn
                 from a distribution, but this distribution is unknown
                 to the seller. The seller offers a personalized
                 take-it-or-leave-it price for each arriving bidder and
                 aims to maximize revenue. We study how well can such
                 sequential posted-price mechanisms approximate the
                 optimal revenue that would be achieved if the
                 distribution was known to the seller. On the negative
                 side, we show that sequential posted-price mechanisms
                 cannot guarantee a constant fraction of this revenue
                 when the class of candidate distributions is
                 unrestricted. We show that this impossibility holds
                 even for randomized mechanisms and even if the set of
                 possible distributions is very small or when the seller
                 has a prior distribution over the candidate
                 distributions. On the positive side, we devise a simple
                 posted-price mechanism that guarantees a constant
                 fraction of the known-distribution revenue when all
                 candidate distributions exhibit the monotone hazard
                 rate property.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Fadaei:2017:TMG,
  author =       "Salman Fadaei and Martin Bichler",
  title =        "A Truthful Mechanism for the Generalized Assignment
                 Problem",
  journal =      j-TEAC,
  volume =       "5",
  number =       "3",
  pages =        "14:1--14:??",
  month =        aug,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3105787",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Aug 9 16:06:10 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We propose a truthful-in-expectation, $ (1 - 1 /
                 e)$-approximation mechanism for a strategic variant of
                 the generalized assignment problem (GAP). In GAP, a set
                 of items has to be optimally assigned to a set of bins
                 without exceeding the capacity of any singular bin. In
                 the strategic variant of the problem we study, values
                 for assigning items to bins are the private information
                 of bidders and the mechanism should provide bidders
                 with incentives to truthfully report their values. The
                 approximation ratio of the mechanism is a significant
                 improvement over the approximation ratio of the
                 existing truthful mechanism for GAP. The proposed
                 mechanism comprises a novel convex optimization program
                 as the allocation rule as well as an appropriate
                 payment rule. To implement the convex program in
                 polynomial time, we propose a fractional local search
                 algorithm which approximates the optimal solution
                 within an arbitrarily small error leading to an
                 approximately truthful-in-expectation mechanism. The
                 proposed algorithm improves upon the existing
                 optimization algorithms for GAP in terms of simplicity
                 and runtime while the approximation ratio closely
                 matches the best approximation ratio known for GAP when
                 all inputs are publicly known.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Jaggard:2017:DBG,
  author =       "Aaron D. Jaggard and Neil Lutz and Michael Schapira
                 and Rebecca N. Wright",
  title =        "Dynamics at the Boundary of Game Theory and
                 Distributed Computing",
  journal =      j-TEAC,
  volume =       "5",
  number =       "3",
  pages =        "15:1--15:??",
  month =        aug,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3107182",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Aug 9 16:06:10 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We use ideas from distributed computing and game
                 theory to study dynamic and decentralized environments
                 in which computational nodes, or decision makers,
                 interact strategically and with limited information. In
                 such environments, which arise in many real-world
                 settings, the participants act as both economic and
                 computational entities. We exhibit a general
                 non-convergence result for a broad class of dynamics in
                 asynchronous settings. We consider implications of our
                 result across a wide variety of interesting and timely
                 applications: game dynamics, circuit design, social
                 networks, Internet routing, and congestion control. We
                 also study the computational and communication
                 complexity of testing the convergence of asynchronous
                 dynamics. Our work opens a new avenue for research at
                 the intersection of distributed computing and game
                 theory.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Anshelevich:2017:EFP,
  author =       "Elliot Anshelevich and Koushik Kar and Shreyas Sekar",
  title =        "Envy-Free Pricing in Large Markets: Approximating
                 Revenue and Welfare",
  journal =      j-TEAC,
  volume =       "5",
  number =       "3",
  pages =        "16:1--16:??",
  month =        aug,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3105786",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Aug 9 16:06:10 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the classic setting of envy-free pricing, in
                 which a single seller chooses prices for its many
                 items, with the goal of maximizing revenue once the
                 items are allocated. Despite the large body of work
                 addressing such settings, most versions of this problem
                 have resisted good approximation factors for maximizing
                 revenue; this is true even for the classic unit-demand
                 case. In this article, we study envy-free pricing with
                 unit-demand buyers, but unlike previous work we focus
                 on large markets: ones in which the demand of each
                 buyer is infinitesimally small compared to the size of
                 the overall market. We assume that the buyer valuations
                 for the items they desire have a nice (although
                 reasonable) structure, that is, that the aggregate
                 buyer demand has a monotone hazard rate and that the
                 values of every buyer type come from the same support.
                 For such large markets, our main contribution is a
                 1.88-approximation algorithm for maximizing revenue,
                 showing that good pricing schemes can be computed when
                 the number of buyers is large. We also give a $ (e,
                 2)$-bicriteria algorithm that simultaneously
                 approximates both maximum revenue and welfare, thus
                 showing that it is possible to obtain both good revenue
                 and welfare at the same time. We further generalize our
                 results by relaxing some of our assumptions and
                 quantify the necessary tradeoffs between revenue and
                 welfare in our setting. Our results are the first known
                 approximations for large markets and crucially rely on
                 new lower bounds, which we prove for the
                 revenue-maximizing prices.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Frongillo:2017:GPM,
  author =       "Rafael Frongillo and Jens Witkowski",
  title =        "A Geometric Perspective on Minimal Peer Prediction",
  journal =      j-TEAC,
  volume =       "5",
  number =       "3",
  pages =        "17:1--17:??",
  month =        aug,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3070903",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Aug 9 16:06:10 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Minimal peer prediction mechanisms truthfully elicit
                 private information (e.g., opinions or experiences)
                 from rational agents without the requirement that
                 ground truth is eventually revealed. In this article,
                 we use a geometric perspective to prove that minimal
                 peer prediction mechanisms are equivalent to power
                 diagrams, a type of weighted Voronoi diagram. Using
                 this characterization and results from computational
                 geometry, we show that many of the mechanisms in the
                 literature are unique up to affine transformations. We
                 also show that classical peer prediction is
                 ``complete'' in that every minimal mechanism can be
                 written as a classical peer prediction mechanism for
                 some scoring rule. Finally, we use our geometric
                 characterization to develop a general method for
                 constructing new truthful mechanisms, and we show how
                 to optimize for the mechanisms' effort incentives and
                 robustness.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Cole:2017:ASR,
  author =       "Richard Cole and Shravas Rao",
  title =        "Applications of $ \alpha $-Strongly Regular
                 Distributions to {Bayesian} Auctions",
  journal =      j-TEAC,
  volume =       "5",
  number =       "4",
  pages =        "18:1--18:??",
  month =        dec,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3157083",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Dec 22 18:33:00 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Two classes of distributions that are widely used in
                 the analysis of Bayesian auctions are the monotone
                 hazard rate (MHR) and regular distributions. They can
                 both be characterized in terms of the rate of change of
                 the associated virtual value functions: for MHR
                 distributions, the condition is that for values $ v <
                 v^' $, $ \phi (v^') - \phi (v) \geq v^' - v $, and for
                 regular distributions, $ \phi (v^') - \phi (v) \geq 0
                 $. Cole and Roughgarden introduced the interpolating
                 class of $ \alpha $-strongly regular distributions ($
                 \alpha $-SR distributions for short), for which $ \phi
                 (v^') - \phi (v) \geq \alpha (v^' v)$, for $ 0 \leq
                 \alpha \leq 1$. In this article, we investigate five
                 distinct auction settings for which good expected
                 revenue bounds are known when the bidders' valuations
                 are given by MHR distributions. In every case, we show
                 that these bounds degrade gracefully when extended to $
                 \alpha $-SR distributions. For four of these settings,
                 the auction mechanism requires knowledge of these
                 distributions (in the remaining setting, the
                 distributions are needed only to ensure good bounds on
                 the expected revenue). In these cases, we also
                 investigate what happens when the distributions are
                 known only approximately via samples, specifically how
                 to modify the mechanisms so that they remain effective
                 and how the expected revenue depends on the number of
                 samples.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Giannakopoulos:2017:VMB,
  author =       "Yiannis Giannakopoulos and Maria Kyropoulou",
  title =        "The {VCG} Mechanism for {Bayesian} Scheduling",
  journal =      j-TEAC,
  volume =       "5",
  number =       "4",
  pages =        "19:1--19:??",
  month =        dec,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3105968",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Dec 22 18:33:00 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the problem of scheduling m tasks to n
                 selfish, unrelated machines in order to minimize the
                 makespan, in which the execution times are independent
                 random variables, identical across machines. We show
                 that the VCG mechanism, which myopically allocates each
                 task to its best machine, achieves an approximation
                 ratio of O (ln n \&frac; ln ln n ). This improves
                 significantly on the previously best known bound of O (
                 m / n ) for prior-independent mechanisms, given by
                 Chawla et al. [7] under the additional assumption of
                 Monotone Hazard Rate (MHR) distributions. Although we
                 demonstrate that this is tight in general, if we do
                 maintain the MHR assumption, then we get improved,
                 (small) constant bounds for m {$>$}= n ln n i.i.d.
                 tasks. We also identify a sufficient condition on the
                 distribution that yields a constant approximation ratio
                 regardless of the number of tasks.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Assadi:2017:FCD,
  author =       "Sepehr Assadi and Sanjeev Khanna and Yang Li and
                 Rakesh Vohra",
  title =        "Fast Convergence in the Double Oral Auction",
  journal =      j-TEAC,
  volume =       "5",
  number =       "4",
  pages =        "20:1--20:??",
  month =        dec,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3084358",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Dec 22 18:33:00 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "A classical trading experiment consists of a set of
                 unit demand buyers and unit supply sellers with
                 identical items. Each agent's value or opportunity cost
                 for the item is his private information, and
                 preferences are quasilinear. Trade between agents
                 employs a double oral auction (DOA) in which both
                 buyers and sellers call out bids or offers that an
                 auctioneer recognizes. Transactions resulting from
                 accepted bids and offers are recorded. This continues
                 until there are no more acceptable bids or offers.
                 Remarkably, the experiment consistently terminates in a
                 Walrasian price. The main result of this article is a
                 mechanism in the spirit of the DOA that converges to a
                 Walrasian equilibrium in a polynomial number of steps,
                 thus providing a theoretical basis for the empirical
                 phenomenon described previously. It is well known that
                 computation of a Walrasian equilibrium for this market
                 corresponds to solving a maximum weight bipartite
                 matching problem. The uncoordinated but mildly rational
                 responses of agents thus solve in a distributed fashion
                 a maximum weight bipartite matching problem that is
                 encoded by their private valuations. We show,
                 furthermore, that every Walrasian equilibrium is
                 reachable by some sequence of responses. This is in
                 contrast to the well-known auction algorithms for this
                 problem that only allow one side to make offers and
                 thus essentially choose an equilibrium that maximizes
                 the surplus for the side making offers. Our results
                 extend to the setting where not every agent pair is
                 allowed to trade with each other.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Bjelde:2017:ISP,
  author =       "Antje Bjelde and Felix Fischer and Max Klimm",
  title =        "Impartial Selection and the Power of Up to Two
                 Choices",
  journal =      j-TEAC,
  volume =       "5",
  number =       "4",
  pages =        "21:1--21:??",
  month =        dec,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3107922",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Dec 22 18:33:00 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study mechanisms that select members of a set of
                 agents based on nominations by other members and that
                 are impartial in the sense that agents cannot influence
                 their own chance of selection. Prior work has shown
                 that deterministic mechanisms for selecting any fixed
                 number k of agents are severely limited and cannot
                 extract a constant fraction of the nominations of the k
                 most highly nominated agents. We prove here that this
                 impossibility result can be circumvented by allowing
                 the mechanism to sometimes but not always select fewer
                 than k agents. This added flexibility also improves the
                 performance of randomized mechanisms, for which we show
                 a separation between mechanisms that make exactly two
                 or up to two choices and give upper and lower bounds
                 for mechanisms allowed more than two choices.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Adamczyk:2017:SPP,
  author =       "Marek Adamczyk and Allan Borodin and Diodato Ferraioli
                 and Bart {De Keijzer} and Stefano Leonardi",
  title =        "Sequential Posted-Price Mechanisms with Correlated
                 Valuations",
  journal =      j-TEAC,
  volume =       "5",
  number =       "4",
  pages =        "22:1--22:??",
  month =        dec,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3157085",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Dec 22 18:33:00 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the revenue performance of sequential
                 posted-price mechanisms and some natural extensions for
                 a setting where the valuations of the buyers are drawn
                 from a correlated distribution. Sequential posted-price
                 mechanisms are conceptually simple mechanisms that work
                 by proposing a ``take-it-or-leave-it'' offer to each
                 buyer. We apply sequential posted-price mechanisms to
                 single-parameter multiunit settings in which each buyer
                 demands only one item and the mechanism can assign the
                 service to at most k of the buyers. For standard
                 sequential posted-price mechanisms, we prove that with
                 the valuation distribution having finite support, no
                 sequential posted-price mechanism can extract a
                 constant fraction of the optimal expected revenue, even
                 with unlimited supply. We extend this result to the
                 case of a continuous valuation distribution when
                 various standard assumptions hold simultaneously (i.e.,
                 everywhere-supported, continuous, symmetric, and
                 normalized (conditional) distributions that satisfy
                 regularity, the MHR condition, and affiliation ). In
                 fact, it turns out that the best fraction of the
                 optimal revenue that is extractable by a sequential
                 posted-price mechanism is proportional to the ratio of
                 the highest and lowest possible valuation. We prove
                 that a simple generalization of these mechanisms
                 achieves a better revenue performance; namely, if the
                 sequential posted-price mechanism has for each buyer
                 the option of either proposing an offer or asking the
                 buyer for its valuation, then a \Omega (1/max { 1, d })
                 fraction of the optimal revenue can be extracted, where
                 d denotes the degree of dependence of the valuations,
                 ranging from complete independence ( d \&equal;0) to
                 arbitrary dependence ( d = n -1).",
  acknowledgement = ack-nhfb,
  articleno =    "22",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Bosansky:2017:CSE,
  author =       "Branislav Bosansk{\'y} and Simina Br{\^a}nzei and
                 Kristoffer Arnsfelt Hansen and Troels Bjerre Lund and
                 Peter Bro Miltersen",
  title =        "Computation of {Stackelberg} Equilibria of Finite
                 Sequential Games",
  journal =      j-TEAC,
  volume =       "5",
  number =       "4",
  pages =        "23:1--23:??",
  month =        dec,
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3133242",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Fri Dec 22 18:33:00 MST 2017",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "The Stackelberg equilibrium is a solution concept that
                 describes optimal strategies to commit to: Player 1
                 (the leader) first commits to a strategy that is
                 publicly announced, then Player 2 (the follower) plays
                 a best response to the leader's choice. We study the
                 problem of computing Stackelberg equilibria in finite
                 sequential (i.e., extensive-form) games and provide new
                 exact algorithms, approximation algorithms, and
                 hardness results for finding equilibria for several
                 classes of such two-player games.",
  acknowledgement = ack-nhfb,
  articleno =    "23",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Garg:2018:RCD,
  author =       "Jugal Garg and Ruta Mehta and Vijay V. Vazirani and
                 Sadra Yazdanbod",
  title =        "$ \exists $ {R}-Completeness for Decision Versions of
                 Multi-Player (Symmetric) {Nash} Equilibria",
  journal =      j-TEAC,
  volume =       "6",
  number =       "1",
  pages =        "1:1--1:??",
  month =        mar,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3175494",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:45 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "As a result of a series of important works [7--9, 15,
                 23], the complexity of two-player Nash equilibrium is
                 by now well understood, even when equilibria with
                 special properties are desired and when the game is
                 symmetric. However, for multi-player games, when
                 equilibria with special properties are desired, the
                 only result known is due to Schaefer and Stefankovic
                 [28]: that checking whether a three-player Nash
                 Equilibrium (3-Nash) instance has an equilibrium in a
                 ball of radius half in l$_\infty $-norm is $ \exists $
                 R-complete, where $ \exists $ R is the class of
                 decision problems that can be reduced in polynomial
                 time to Existential Theory of the Reals. Building on
                 their work, we show that the following decision
                 versions of 3-Nash are also $ \exists $ R-complete:
                 checking whether (i) there are two or more equilibria,
                 (ii) there exists an equilibrium in which each player
                 gets at least h payoff, where h is a rational number,
                 (iii) a given set of strategies are played with
                 non-zero probability, and (iv) all the played
                 strategies belong to a given set. Next, we give a
                 reduction from 3-Nash to symmetric 3-Nash, hence
                 resolving an open problem of Papadimitriou [25]. This
                 yields $ \exists $ R-completeness for symmetric 3-Nash
                 for the last two problems stated above as well as
                 completeness for the class FIXP$_a$, a variant of FIXP
                 for strong approximation. All our results extend to k
                 -Nash for any constant k {$>$}= 3.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Candogan:2018:PEG,
  author =       "Ozan Candogan and Asuman Ozdaglar and Pablo Parrilo",
  title =        "Pricing Equilibria and Graphical Valuations",
  journal =      j-TEAC,
  volume =       "6",
  number =       "1",
  pages =        "2:1--2:??",
  month =        mar,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3175495",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:45 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study pricing equilibria for graphical valuations,
                 which are a class of valuations that admit a compact
                 representation. These valuations are associated with a
                 value graph, whose nodes correspond to items, and edges
                 encode (pairwise) complementarities/substitutabilities
                 between items. It is known that for graphical
                 valuations a Walrasian equilibrium (a pricing
                 equilibrium that relies on anonymous item prices) does
                 not exist in general. On the other hand, a pricing
                 equilibrium exists when the seller uses an
                 agent-specific graphical pricing rule that involves
                 prices for each item and markups/discounts for pairs of
                 items. We study the existence of pricing equilibria
                 with simpler pricing rules which either (i) require
                 anonymity (so that prices are identical for all agents)
                 while allowing for pairwise markups/discounts or (ii)
                 involve offering prices only for items. We show that a
                 pricing equilibrium with the latter pricing rule exists
                 if and only if a Walrasian equilibrium exists, whereas
                 the former pricing rule may guarantee the existence of
                 a pricing equilibrium even for graphical valuations
                 that do not admit a Walrasian equilibrium.
                 Interestingly, by exploiting a novel connection between
                 the existence of a pricing equilibrium and the
                 partitioning polytope associated with the underlying
                 graph, we also establish that for simple
                 (series-parallel) value graphs, a pricing equilibrium
                 with anonymous graphical pricing rule exists if and
                 only if a Walrasian equilibrium exists. These
                 equivalence results imply that simpler pricing rules
                 (i) and (ii) do not guarantee the existence of a
                 pricing equilibrium for all graphical valuations.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Fujishige:2018:RAP,
  author =       "Satoru Fujishige and Yoshio Sano and Ping Zhan",
  title =        "The Random Assignment Problem with Submodular
                 Constraints on Goods",
  journal =      j-TEAC,
  volume =       "6",
  number =       "1",
  pages =        "3:1--3:??",
  month =        mar,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3175496",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:45 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Problems of allocating indivisible goods to agents in
                 an efficient and fair manner without money have long
                 been investigated in literature. The random assignment
                 problem is one of them, where we are given a fixed
                 feasible (available) set of indivisible goods and a
                 profile of ordinal preferences over the goods, one for
                 each agent. Then, using lotteries, we determine an
                 assignment of goods to agents in a randomized way. A
                 seminal paper of Bogomolnaia and Moulin (2001) shows a
                 probabilistic serial (PS) mechanism to give an
                 ordinally efficient and envy-free solution to the
                 assignment problem. In this article, we consider an
                 extension of the random assignment problem to
                 submodular constraints on goods. We show that the
                 approach of the PS mechanism by Bogomolnaia and Moulin
                 is powerful enough to solve the random assignment
                 problem with submodular (matroidal and polymatroidal)
                 constraints. Under the agents' ordinal preferences over
                 goods we show the following: (1) The obtained PS
                 solution for the problem with unit demands and
                 matroidal constraints is ordinally efficient,
                 envy-free, and weakly strategy-proof with respect to
                 the associated stochastic dominance relation. (2)For
                 the multiunit demand and polymatroidal constraint
                 problem, the PS solution is ordinally efficient and
                 envy-free but is not strategy-proof in general.
                 However, we show that under a mild condition (that is
                 likely to be satisfied in practice) the PS solution is
                 a weak Nash equilibrium.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Harks:2018:CPR,
  author =       "Tobias Harks and Britta Peis and Daniel Schmand and
                 Bjoern Tauer and Laura Vargas Koch",
  title =        "Competitive Packet Routing with Priority Lists",
  journal =      j-TEAC,
  volume =       "6",
  number =       "1",
  pages =        "4:1--4:??",
  month =        mar,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3184137",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:45 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "In competitive packet routing games, the packets are
                 routed selfishly through a network and scheduling
                 policies at edges determine which packets are forwarded
                 first if there is not enough capacity on an edge to
                 forward all packets at once. We analyze the impact of
                 priority lists on the worst-case quality of pure Nash
                 equilibria. A priority list is an ordered list of
                 players that may or may not depend on the edge.
                 Whenever the number of packets entering an edge exceeds
                 the inflow capacity, packets are processed in list
                 order. We derive several new bounds on the price of
                 anarchy and stability for global and local priority
                 policies. We also consider the question of the
                 complexity of computing an optimal priority list. It
                 turns out that even for very restricted cases, i.e.,
                 for routing on a tree, the computation of an optimal
                 priority list is APX-hard.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Dutting:2018:VCV,
  author =       "Paul D{\"u}tting and Monika Henzinger and Martin
                 Starnberger",
  title =        "Valuation Compressions in {VCG}-Based Combinatorial
                 Auctions",
  journal =      j-TEAC,
  volume =       "6",
  number =       "2",
  pages =        "5:1--5:??",
  month =        oct,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3232860",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "The focus of classic mechanism design has been on
                 truthful direct-revelation mechanisms. In the context
                 of combinatorial auctions, the truthful
                 direct-revelation mechanism that maximizes social
                 welfare is the Vickrey-Clarke-Groves mechanism. For
                 many valuation spaces, computing the allocation and
                 payments of the VCG mechanism, however, is a
                 computationally hard problem. We thus study the
                 performance of the VCG mechanism when bidders are
                 forced to choose bids from a subspace of the valuation
                 space for which the VCG outcome can be computed
                 efficiently. We prove improved upper bounds on the
                 welfare loss for restrictions to additive bids and
                 upper and lower bounds for restrictions to non-additive
                 bids. These bounds show that increased expressiveness
                 can give rise to additional equilibria of poorer
                 efficiency.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Irfan:2018:CSI,
  author =       "Mohammad T. Irfan and Luis E. Ortiz",
  title =        "Causal Strategic Inference in a Game-Theoretic Model
                 of Multiplayer Networked Microfinance Markets",
  journal =      j-TEAC,
  volume =       "6",
  number =       "2",
  pages =        "6:1--6:??",
  month =        oct,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3232861",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "Performing interventions is a major challenge in
                 economic policy-making. We present causal strategic
                 inference as a framework for conducting interventions
                 and apply it to large, networked microfinance
                 economies. The basic solution platform consists of
                 modeling a microfinance market as a networked economy,
                 learning the model using single-sample real-world
                 microfinance data, and designing algorithms for various
                 causal questions. For a special case of our model, we
                 show that an equilibrium point always exists and that
                 the equilibrium interest rates are unique. For the
                 general case, we give a constructive proof of the
                 existence of an equilibrium point. Our empirical study
                 is based on microfinance data from Bangladesh and
                 Bolivia, which we use to first learn our models. We
                 show that causal strategic inference can assist
                 policy-makers by evaluating the outcomes of various
                 types of interventions, such as removing a loss-making
                 bank from the market, imposing an interest-rate cap,
                 and subsidizing banks.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Ghosh:2018:CC,
  author =       "Arpita Ghosh and Patrick Hummel",
  title =        "Cardinal Contests",
  journal =      j-TEAC,
  volume =       "6",
  number =       "2",
  pages =        "7:1--7:??",
  month =        oct,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3232862",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We model and analyze cardinal contests, where a
                 principal running a rank-order tournament has access to
                 an absolute measure of the quality of agents'
                 submissions in addition to their relative rankings. We
                 show that a mechanism that compares each agent's output
                 quality against a threshold to decide whether to award
                 her the prize corresponding to her rank is optimal
                 amongst the set of all mixed cardinal-ordinal
                 mechanisms where the j th-ranked submission receives a
                 fraction of the j th prize that is a non-decreasing
                 function of the submission's quality. Furthermore, the
                 optimal threshold mechanism uses exactly the same
                 threshold for each rank.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Wang:2018:VPS,
  author =       "Weina Wang and Lei Ying and Junshan Zhang",
  title =        "The Value of Privacy: Strategic Data Subjects,
                 Incentive Mechanisms, and Fundamental Limits",
  journal =      j-TEAC,
  volume =       "6",
  number =       "2",
  pages =        "8:1--8:??",
  month =        oct,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3232863",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "We study the value of data privacy in a game-theoretic
                 model of trading private data, where a data collector
                 purchases private data from strategic data subjects
                 (individuals) through an incentive mechanism. One
                 primary goal of the data collector is to learn some
                 desired information from the elicited data.
                 Specifically, this information is modeled by an
                 underlying state, and the private data of each
                 individual represents his of her knowledge about the
                 state. Departing from most of the existing work on
                 privacy-aware surveys, our model does not assume the
                 data collector to be trustworthy. Further, an
                 individual takes full control of his or her own data
                 privacy and reports only a privacy-preserving version
                 of his or her data. In this article, the value of
                 \epsilon units of privacy is measured by the minimum
                 payment among all nonnegative payment mechanisms, under
                 which an individual's best response at a Nash
                 equilibrium is to report his or her data in an \epsilon
                 -locally differentially private manner. The higher
                 \epsilon is, the less private the reported data is. We
                 derive lower and upper bounds on the value of privacy
                 that are asymptotically tight as the number of data
                 subjects becomes large. Specifically, the lower bound
                 assures that it is impossible to use a lower payment to
                 buy \epsilon units of privacy, and the upper bound is
                 given by an achievable payment mechanism that we
                 design. Based on these fundamental limits, we further
                 derive lower and upper bounds on the minimum total
                 payment for the data collector to achieve a given
                 accuracy target for learning the underlying state and
                 show that the total payment of the designed mechanism
                 is at most one individual's payment away from the
                 minimum.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Velez:2018:ERD,
  author =       "Rodrigo A. Velez",
  title =        "Equitable Rent Division",
  journal =      j-TEAC,
  volume =       "6",
  number =       "2",
  pages =        "9:1--9:??",
  month =        oct,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3274528",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  abstract =     "How should a group of roommates allocate the rooms and
                 contributions to rent in the house they lease?
                 Economists have provided partial answers to this
                 question in a literature that spans the last 40 years.
                 Unfortunately, these results were developed in a
                 non-linear fashion, which obscures them to the
                 non-specialist. Recently, computer scientists have
                 developed an interest in this problem, advancing from
                 an algorithmic complexity perspective. With this new
                 interest gaining traction, there is an evident need for
                 a coherent development of the results in economics
                 literature. This article does so. In particular, we
                 build connections among results that were seemingly
                 unrelated and considerably simplify their development,
                 fill in non-trivial gaps, and identify open questions.
                 Our focus is on incentives issues, the area in which we
                 believe economists have more to contribute in this
                 discussion.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Feldman:2018:ISI,
  author =       "Michal Feldman and Brendan Lucier and Michael
                 Schwarz",
  title =        "Introduction to the Special Issue on {EC'15}",
  journal =      j-TEAC,
  volume =       "6",
  number =       "3--4",
  pages =        "10:1--10:??",
  month =        nov,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3274531",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3274531",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Kurokawa:2018:LAR,
  author =       "David Kurokawa and Ariel D. Procaccia and Nisarg
                 Shah",
  title =        "Leximin Allocations in the Real World",
  journal =      j-TEAC,
  volume =       "6",
  number =       "3--4",
  pages =        "11:1--11:??",
  month =        nov,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3274641",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3274641",
  abstract =     "As part of a collaboration with a major California
                 school district, we study the problem of fairly
                 allocating unused classrooms in public schools to
                 charter schools. Our approach revolves around the
                 randomized leximin mechanism. We extend previous work
                 to show that the leximin mechanism is proportional,
                 envy-free, Pareto optimal, and group strategyproof, not
                 only in our classroom allocation setting, but in a
                 general framework that subsumes a number of settings
                 previously studied in the literature. We also prove
                 that the leximin mechanism provides a (worst-case)
                 4-approximation to the maximum number of classrooms
                 that can possibly be allocated. Our experiments, which
                 are based on real data, show that a non-trivial
                 implementation of the leximin mechanism scales
                 gracefully in terms of running time (even though the
                 problem is intractable in theory), and performs
                 extremely well with respect to a number of efficiency
                 objectives. We establish the practicability of our
                 approach, and discuss issues related to its
                 deployment.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Kannan:2018:PPO,
  author =       "Sampath Kannan and Jamie Morgenstern and Ryan Rogers
                 and Aaron Roth",
  title =        "Private {Pareto} Optimal Exchange",
  journal =      j-TEAC,
  volume =       "6",
  number =       "3--4",
  pages =        "12:1--12:??",
  month =        nov,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3105445",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3105445",
  abstract =     "We consider the problem of implementing an
                 individually rational, asymptotically Pareto optimal
                 allocation in a barter-exchange economy where agents
                 are endowed with goods and preferences over the goods
                 of others, but may not use money as a medium of
                 exchange. Because one of the most important
                 instantiations of such economies is kidney
                 exchange-where the ``input'' to the problem consists of
                 sensitive patient medical records-we ask to what extent
                 such exchanges can be carried out while providing
                 formal privacy guarantees to the participants. We show
                 that individually rational allocations cannot achieve
                 any non-trivial approximation to Pareto optimality if
                 carried out under the constraint of differential
                 privacy-or even the relaxation of joint -differential
                 privacy, under which it is known that asymptotically
                 optimal allocations can be computed in two sided
                 markets (Hsu et al. STOC 2014). We therefore consider a
                 further relaxation that we call marginal -differential
                 privacy-which promises, informally, that the privacy of
                 every agent i is protected from every other agent j /=
                 i so long as j does not collude or share allocation
                 information with other agents. We show that under
                 marginal differential privacy, it is possible to
                 compute an individually rational and asymptotically
                 Pareto optimal allocation in such exchange economies.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Blum:2018:LWG,
  author =       "Avrim Blum and Yishay Mansour and Jamie Morgenstern",
  title =        "Learning What's Going on: Reconstructing Preferences
                 and Priorities from Opaque Transactions",
  journal =      j-TEAC,
  volume =       "6",
  number =       "3--4",
  pages =        "13:1--13:??",
  month =        nov,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3274642",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3274642",
  abstract =     "We consider a setting where n buyers, with
                 combinatorial preferences over m items, and a seller,
                 running a priority-based allocation mechanism,
                 repeatedly interact. Our goal, from observing limited
                 information about the results of these interactions, is
                 to reconstruct both the preferences of the buyers and
                 the mechanism of the seller. More specifically, we
                 consider an online setting where at each stage, a
                 subset of the buyers arrive and are allocated items,
                 according to some unknown priority that the seller has
                 among the buyers. Our learning algorithm observes only
                 which buyers arrive and the allocation produced (or
                 some function of the allocation, such as just which
                 buyers received positive utility and which did not),
                 and its goal is to predict the outcome for future
                 subsets of buyers. For this task, the learning
                 algorithm needs to reconstruct both the priority among
                 the buyers and the preferences of each buyer. We derive
                 mistake bound algorithms for additive, unit-demand and
                 single-minded buyers. We also consider the case where
                 buyers' utilities for a fixed bundle can change between
                 stages due to different (observed) prices. Our
                 algorithms are efficient both in computation time and
                 in the maximum number of mistakes (both polynomial in
                 the number of buyers and items).",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Esfandiari:2018:ATS,
  author =       "Hossein Esfandiari and Nitish Korula and Vahab
                 Mirrokni",
  title =        "Allocation with Traffic Spikes: Mixing Adversarial and
                 Stochastic Models",
  journal =      j-TEAC,
  volume =       "6",
  number =       "3--4",
  pages =        "14:1--14:??",
  month =        nov,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3105446",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3105446",
  abstract =     "Motivated by Internet advertising applications, online
                 allocation problems have been studied extensively in
                 various adversarial and stochastic models. While the
                 adversarial arrival models are too pessimistic, many of
                 the stochastic (such as i.i.d. or random-order) arrival
                 models do not realistically capture uncertainty in
                 predictions. A significant cause for such uncertainty
                 is the presence of unpredictable traffic spikes, often
                 due to breaking news or similar events. To address this
                 issue, a simultaneous approximation framework has been
                 proposed to develop algorithms that work well both in
                 the adversarial and stochastic models; however, this
                 framework does not enable algorithms that make good use
                 of partially accurate forecasts when making online
                 decisions. In this article, we propose a robust online
                 stochastic model that captures the nature of traffic
                 spikes in online advertising. In our model, in addition
                 to the stochastic input for which we have good
                 forecasting, an unknown number of impressions arrive
                 that are adversarially chosen. We design algorithms
                 that combine a stochastic algorithm with an online
                 algorithm that adaptively reacts to inaccurate
                 predictions. We provide provable bounds for our new
                 algorithms in this framework. We accompany our positive
                 results with a set of hardness results showing that our
                 algorithms are not far from optimal in this framework.
                 As a byproduct of our results, we also present improved
                 online algorithms for a slight variant of the
                 simultaneous approximation framework.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Heidari:2018:IMM,
  author =       "Hoda Heidari and S{\'e}bastien Lahaie and David M.
                 Pennock and Jennifer Wortman Vaughan",
  title =        "Integrating Market Makers, Limit Orders, and
                 Continuous Trade in Prediction Markets",
  journal =      j-TEAC,
  volume =       "6",
  number =       "3--4",
  pages =        "15:1--15:??",
  month =        nov,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3274643",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3274643",
  abstract =     "We provide the first concrete algorithm for combining
                 market makers and limit orders in a prediction market
                 with continuous trade. Our mechanism is general enough
                 to handle both bundle orders and arbitrary securities
                 defined over combinatorial outcome spaces. We define
                 the notion of an \epsilon -fair trading path, a path in
                 security space along which no order executes at a price
                 more than \epsilon above its limit, and every order
                 executes when its market price falls more than \epsilon
                 below its limit. We show that, under a certain
                 supermodularity condition, a fair trading path exists
                 for which the endpoint is efficient, but that under
                 general conditions reaching an efficient endpoint via
                 an \epsilon -fair trading path is not possible. We
                 develop an algorithm for operating a continuous market
                 maker with limit orders that respects the \epsilon
                 -fairness conditions in the general case. We conduct
                 simulations of our algorithm using real combinatorial
                 predictions made during the 2008 US presidential
                 election and evaluate it against a natural baseline
                 according to trading volume, social welfare, and
                 violations of the two fairness conditions.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Naor:2018:NOO,
  author =       "Joseph (Seffi) Naor and David Wajc",
  title =        "Near-Optimum Online Ad Allocation for Targeted
                 Advertising",
  journal =      j-TEAC,
  volume =       "6",
  number =       "3--4",
  pages =        "16:1--16:??",
  month =        nov,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3105447",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3105447",
  abstract =     "Motivated by Internet targeted advertising, we address
                 several ad allocation problems. Prior work has
                 established that these problems admit no randomized
                 online algorithm better than (1 --- 1/ e)-competitive
                 (see Karp et al. (1990) and Mehta et al. (2007)), yet
                 simple heuristics have been observed to perform much
                 better in practice. We explain this phenomenon by
                 studying a generalization of the bounded-degree inputs
                 considered by Buchbinder et al. (2007), graphs which we
                 call (k, d)- bounded. In such graphs the maximal degree
                 on the online side is at most d and the minimal degree
                 on the offline side is at least k. We prove that, for
                 such graphs, these problems' natural greedy algorithms
                 attain a competitive ratio of 1 --- d -1/ k + d -1,
                 tending to 1 as d / k tends to zero. We prove this
                 bound is tight for these algorithms. Next, we develop
                 deterministic primal-dual algorithms for the above
                 problems, achieving a competitive ratio of 1-(1 --- 1/
                 d)$^k$ \&gt; 1 --- \& 1/ e$^{k / d}$, or exponentially
                 better loss as a function of k / /d, and strictly
                 better than 1 --- 1/e whenever k {$>$}= d. We
                 complement our lower bounds with matching upper bounds
                 for the vertex-weighted problem. Finally, we use our
                 deterministic algorithms to prove by dual-fitting that
                 simple randomized algorithms achieve the same bounds in
                 expectation. Our algorithms and analysis differ from
                 previous ad allocation algorithms, which largely scale
                 bids based on the spent fraction of their bidder's
                 budget, whereas we scale bids according to the number
                 of times the bidder could have spent as much as her
                 current bid. Our algorithms differ from previous online
                 primal-dual algorithms in that they do not maintain
                 dual feasibility but only a primal-to-dual ratio and
                 only attain dual feasibility upon termination. We
                 believe our techniques could find applications to other
                 well-behaved online packing problems.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Kleinberg:2018:TPT,
  author =       "Jon Kleinberg and Maithra Raghu",
  title =        "Team Performance with Test Scores",
  journal =      j-TEAC,
  volume =       "6",
  number =       "3--4",
  pages =        "17:1--17:??",
  month =        nov,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3274644",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3274644",
  abstract =     "Team performance is a ubiquitous area of inquiry in
                 the social sciences, and it motivates the problem of
                 team selection -choosing the members of a team for
                 maximum performance. Influential work of Hong and Page
                 has argued that testing individuals in isolation and
                 then assembling the highest scoring ones into a team is
                 not an effective method for team selection. For a broad
                 class of performance measures, based on the expected
                 maximum of random variables representing individual
                 candidates, we show that tests directly measuring
                 individual performance are indeed ineffective, but that
                 a more subtle family of tests used in isolation can
                 provide a constant-factor approximation for team
                 performance. These new tests measure the ``potential''
                 of individuals, in a precise sense, rather than
                 performance; to our knowledge they represent the first
                 time that individual tests have been shown to produce
                 near-optimal teams for a nontrivial team performance
                 measure. We also show families of subdmodular and
                 supermodular team performance functions for which no
                 test applied to individuals can produce near-optimal
                 teams, and we discuss implications for submodular
                 maximization via hill-climbing.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Gopalan:2018:PPB,
  author =       "Parikshit Gopalan and Noam Nisan and Tim Roughgarden",
  title =        "Public Projects, {Boolean} Functions, and the Borders
                 of {Border}'s Theorem",
  journal =      j-TEAC,
  volume =       "6",
  number =       "3--4",
  pages =        "18:1--18:??",
  month =        nov,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3274645",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3274645",
  abstract =     "Border's theorem gives an intuitive linear
                 characterization of the feasible interim allocation
                 rules of a Bayesian single-item environment, and it has
                 several applications in economic and algorithmic
                 mechanism design. All known generalizations of Border's
                 theorem either restrict attention to relatively simple
                 settings or resort to approximation. This article
                 identifies a complexity-theoretic barrier that
                 indicates, assuming standard complexity class
                 separations, that Border's theorem cannot be extended
                 significantly beyond the state of the art. We also
                 identify a surprisingly tight connection between
                 Myerson's optimal auction theory, when applied to
                 public project settings, and some fundamental results
                 in the analysis of Boolean functions.",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Rubinstein:2018:SMS,
  author =       "Aviad Rubinstein and S. Matthew Weinberg",
  title =        "Simple Mechanisms for a Subadditive Buyer and
                 Applications to Revenue Monotonicity",
  journal =      j-TEAC,
  volume =       "6",
  number =       "3--4",
  pages =        "19:1--19:??",
  month =        nov,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3105448",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3105448",
  abstract =     "We study the revenue maximization problem of a seller
                 with n heterogeneous items for sale to a single buyer
                 whose valuation function for sets of items is unknown
                 and drawn from some distribution D. We show that if D
                 is a distribution over subadditive valuations with
                 independent items, then the better of pricing each item
                 separately or pricing only the grand bundle achieves a
                 constant-factor approximation to the revenue of the
                 optimal mechanism. This includes buyers who are k
                 -demand, additive up to a matroid constraint, or
                 additive up to constraints of any downward-closed set
                 system (and whose values for the individual items are
                 sampled independently), as well as buyers who are
                 fractionally subadditive with item multipliers drawn
                 independently. Our proof makes use of the core-tail
                 decomposition framework developed in prior work showing
                 similar results for the significantly simpler class of
                 additive buyers. In the second part of the article, we
                 develop a connection between approximately optimal
                 simple mechanisms and approximate revenue monotonicity
                 with respect to buyers' valuations. Revenue
                 non-monotonicity is the phenomenon that sometimes
                 strictly increasing buyers' values for every set can
                 strictly decrease the revenue of the optimal mechanism.
                 Using our main result, we derive a bound on how bad
                 this degradation can be (and dub such a bound a proof
                 of approximate revenue monotonicity); we further show
                 that better bounds on approximate monotonicity imply a
                 better analysis of our simple mechanisms.",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Daskalakis:2018:RME,
  author =       "Constantinos Daskalakis and Nikhil R. Devanur and S.
                 Matthew Weinberg",
  title =        "Revenue Maximization and Ex-Post Budget Constraints",
  journal =      j-TEAC,
  volume =       "6",
  number =       "3--4",
  pages =        "20:1--20:??",
  month =        nov,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3274647",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3274647",
  abstract =     "We consider the problem of a revenue-maximizing seller
                 with m items for sale to n additive bidders with hard
                 budget constraints, assuming that the seller has some
                 prior distribution over bidder values and budgets. The
                 prior may be correlated across items and budgets of the
                 same bidder, but is assumed independent across bidders.
                 We target mechanisms that are Bayesian incentive
                 compatible, but that are ex-post individually rational
                 and ex-post budget respecting. Virtually no such
                 mechanisms are known that satisfy all these conditions
                 and guarantee any revenue approximation, even with just
                 a single item. We provide a computationally efficient
                 mechanism that is a 3-approximation with respect to all
                 BIC, ex-post IR, and ex-post budget respecting
                 mechanisms. Note that the problem is NP-hard to
                 approximate better than a factor of 16/15, even in the
                 case where the prior is a point mass. We further
                 characterize the optimal mechanism in this setting,
                 showing that it can be interpreted as a distribution
                 over virtual welfare maximizers. We prove our results
                 by making use of a black-box reduction from mechanism
                 to algorithm design developed by Cai et al. Our main
                 technical contribution is a computationally efficient
                 3-approximation algorithm for the algorithmic problem
                 that results from an application of their framework to
                 this problem. The algorithmic problem has a mixed-sign
                 objective and is NP-hard to optimize exactly, so it is
                 surprising that a computationally efficient
                 approximation is possible at all. In the case of a
                 single item (m =1), the algorithmic problem can be
                 solved exactly via exhaustive search, leading to a
                 computationally efficient exact algorithm and a
                 stronger characterization of the optimal mechanism as a
                 distribution over virtual value maximizers.",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Bronfman:2018:RIM,
  author =       "Slava Bronfman and Noga Alon and Avinatan Hassidim and
                 Assaf Romm",
  title =        "Redesigning the {Israeli} Medical Internship Match",
  journal =      j-TEAC,
  volume =       "6",
  number =       "3--4",
  pages =        "21:1--21:??",
  month =        nov,
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3274646",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3274646",
  abstract =     "The final step in getting an Israeli MD is performing
                 a year-long internship in one of the hospitals in
                 Israel. Internships are decided upon by a lottery,
                 which is known as the Internship Lottery. In 2014, we
                 redesigned the lottery, replacing it with a more
                 efficient one. This article presents the market, the
                 redesign process, and the new mechanism that is now in
                 use. In this article, we describe the redesign and
                 focus on two-body problems that we faced in the new
                 mechanism. Specifically, we show that decomposing
                 stochastic assignment matrices to deterministic
                 allocations is NP-hard in the presence of couples, and
                 present a polynomial-time algorithm with the optimal
                 worst case guarantee. We also study the performance of
                 our algorithm on real-world and simulated data.",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Kirousis:2019:AVM,
  author =       "Lefteris Kirousis and Phokion G. Kolaitis and John
                 Livieratos",
  title =        "Aggregation of Votes with Multiple Positions on Each
                 Issue",
  journal =      j-TEAC,
  volume =       "7",
  number =       "1",
  pages =        "1:1--1:??",
  month =        feb,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3296675",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3296675",
  abstract =     "We consider the problem of aggregating votes cast by a
                 society on a fixed set of issues, where each member of
                 the society may vote for one of several positions on
                 each issue, but the combination of votes on the various
                 issues is restricted to a set of feasible voting
                 patterns. We follow the aggregation framework used by
                 Dokow and Holzman [Aggregation of non-binary
                 evaluations, Advances in Applied Mathematics, 45:4,
                 487--504, 2010], in which both preference aggregation
                 and judgment aggregation can be cast. We require the
                 aggregation to be independent on each issue, and also
                 supportive, i.e., for every issue, the corresponding
                 component of every aggregator, when applied to a tuple
                 of votes, must take as value one of the votes in that
                 tuple. We prove that, in such a setup, non-dictatorial
                 aggregation of votes in a society of an arbitrary size
                 is possible if and only if either there is a
                 non-dictatorial aggregator for two voters or there is
                 an aggregator for three voters such that, for each
                 issue, the corresponding component of the aggregator,
                 when restricted to two-element sets of votes, is a
                 majority operation or a minority operation. We then
                 introduce a notion of a uniform non-dictatorial
                 aggregator, which is an aggregator such that on every
                 issue, and when restricted to arbitrary two-element
                 subsets of the votes for that issue, it differs from
                 all projection functions. We first give a
                 characterization of sets of feasible voting patterns
                 that admit a uniform non-dictatorial aggregator. After
                 this and by making use of Bulatov's dichotomy theorem
                 for conservative constraint satisfaction problems, we
                 connect social choice theory with the computational
                 complexity of constraint satisfaction by proving that
                 if a set of feasible voting patterns has a uniform
                 non-dictatorial aggregator of some arity, then the
                 multi-sorted conservative constraint satisfaction
                 problem on that set (with each issue representing a
                 different sort) is solvable in polynomial time;
                 otherwise, it is NP-complete.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Kong:2019:ITF,
  author =       "Yuqing Kong and Grant Schoenebeck",
  title =        "An Information Theoretic Framework For Designing
                 Information Elicitation Mechanisms That Reward
                 Truth-telling",
  journal =      j-TEAC,
  volume =       "7",
  number =       "1",
  pages =        "2:1--2:??",
  month =        feb,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3296670",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3296670",
  abstract =     "In the setting where information cannot be verified,
                 we propose a simple yet powerful information
                 theoretical framework-the Mutual Information
                 Paradigm-for information elicitation mechanisms. Our
                 framework pays every agent a measure of mutual
                 information between her signal and a peer's signal. We
                 require that the mutual information measurement has the
                 key property that any ``data processing'' on the two
                 random variables will decrease the mutual information
                 between them. We identify such information measures
                 that generalize Shannon mutual information. Our Mutual
                 Information Paradigm overcomes the two main challenges
                 in information elicitation without verification: (1)
                 how to incentivize high-quality reports and avoid
                 agents colluding to report random or identical
                 responses; (2) how to motivate agents who believe they
                 are in the minority to report truthfully. Aided by the
                 information measures, we found (1) we use the paradigm
                 to design a family of novel mechanisms where
                 truth-telling is a dominant strategy and pays better
                 than any other strategy profile (in the multi-question,
                 detail free, minimal setting where the number of
                 questions is large); (2) we show the versatility of our
                 framework by providing a unified theoretical
                 understanding of existing mechanisms-Bayesian Truth
                 Serum Prelec (2004) and Dasgupta and Ghosh (2013)-by
                 mapping them into our framework such that theoretical
                 results of those existing mechanisms can be
                 reconstructed easily. We also give an impossibility
                 result that illustrates, in a certain sense, the the
                 optimality of our framework.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Faliszewski:2019:CSR,
  author =       "Piotr Faliszewski and Piotr Skowron and Arkadii Slinko
                 and Nimrod Talmon",
  title =        "Committee Scoring Rules: Axiomatic Characterization
                 and Hierarchy",
  journal =      j-TEAC,
  volume =       "7",
  number =       "1",
  pages =        "3:1--3:??",
  month =        feb,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3296672",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3296672",
  abstract =     "Committee scoring voting rules are multiwinner
                 analogues of positional scoring rules, which constitute
                 an important subclass of single-winner voting rules. We
                 identify several natural subclasses of committee
                 scoring rules, namely, weakly separable,
                 representation-focused, top- k -counting, OWA-based,
                 and decomposable rules. We characterize SNTV, Bloc, and
                 k -Approval Chamberlin--Courant as the only nontrivial
                 rules in pairwise intersections of these classes. We
                 provide some axiomatic characterizations for these
                 classes, where monotonicity properties appear to be
                 especially useful. The class of decomposable rules is
                 new to the literature. We show that it strictly
                 contains the class of OWA-based rules and describe some
                 of the applications of decomposable rules.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Abraham:2019:DPL,
  author =       "Ittai Abraham and Danny Dolev and Joseph Y. Halpern",
  title =        "Distributed Protocols for Leader Election: a
                 Game-Theoretic Perspective",
  journal =      j-TEAC,
  volume =       "7",
  number =       "1",
  pages =        "4:1--4:??",
  month =        feb,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3303712",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:46 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/cryptography2010.bib;
                 http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3303712",
  abstract =     "We do a game-theoretic analysis of leader election,
                 under the assumption that each agent prefers to have
                 some leader than no leader at all. We show that it is
                 possible to obtain a fair Nash equilibrium, where each
                 agent has an equal probability of being elected leader,
                 in a completely connected network, in a bidirectional
                 ring, and a unidirectional ring, in the synchronous
                 setting. In the asynchronous setting, Nash equilibrium
                 is not quite the right solution concept. Rather, we
                 must consider ex post Nash equilibrium; this means that
                 we have a Nash equilibrium no matter what a scheduling
                 adversary does. We show that ex post Nash equilibrium
                 is attainable in the asynchronous setting in all the
                 networks we consider, using a protocol with bounded
                 running time. However, in the asynchronous setting, we
                 require that n \&gt; 2. We show that we can get a fair
                 ex post \epsilon -Nash equilibrium if n =2 in the
                 asynchronous setting under some cryptographic
                 assumptions (specifically, the existence of a one-way
                 functions), using a commitment protocol. We then
                 generalize these results to a setting where we can have
                 deviations by a coalition of size k. In this case, we
                 can get what we call a fair k -resilient equilibrium in
                 a completely connected network if n \&gt; 2 k; under
                 the same cryptographic assumptions, we can a get a k
                 -resilient equilibrium in a completely connected
                 network, unidirectional ring, or bidirectional ring if
                 n \&gt; k. Finally, we show that under minimal
                 assumptions, not only do our protocols give a Nash
                 equilibrium, they also give a sequential equilibrium,
                 so players even play optimally off the equilibrium
                 path.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Hummel:2019:BLT,
  author =       "Patrick Hummel and Uri Nadav",
  title =        "Bid-Limited Targeting",
  journal =      j-TEAC,
  volume =       "7",
  number =       "2",
  pages =        "5:1--5:??",
  month =        aug,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3327968",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:47 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3327968",
  abstract =     "This article analyzes a mechanism for selling items in
                 auctions in which the auctioneer specifies a cap on the
                 ratio between the maximum and minimum bids that bidders
                 may use in the auctions. Such a mechanism is widely
                 used in online advertising through the caps that
                 companies impose on the minimum and maximum bid
                 multipliers that advertisers may use in targeting. When
                 bidders' values are independent and identically
                 distributed, this mechanism results in greater revenue
                 than allowing bidders to condition their bids on the
                 targeting information in an arbitrary way and also
                 almost always results in greater revenue than not
                 allowing bidders to target. Choosing the optimal cap on
                 the ratio between the maximum and minimum bids can also
                 be more important than introducing additional
                 competition in the auction. However, if bidders' values
                 are not identically distributed, pure-strategy
                 equilibria may fail to exist.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Aziz:2019:FHG,
  author =       "Haris Aziz and Florian Brandl and Felix Brandt and
                 Paul Harrenstein and Martin Olsen and Dominik Peters",
  title =        "Fractional Hedonic Games",
  journal =      j-TEAC,
  volume =       "7",
  number =       "2",
  pages =        "6:1--6:??",
  month =        aug,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3327970",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:47 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3327970",
  abstract =     "The work we present in this article initiated the
                 formal study of fractional hedonic games (FHGs),
                 coalition formation games in which the utility of a
                 player is the average value he ascribes to the members
                 of his coalition. Among other settings, this covers
                 situations in which players only distinguish between
                 friends and non-friends and desire to be in a coalition
                 in which the fraction of friends is maximal. FHGs thus
                 not only constitute a natural class of succinctly
                 representable coalition formation games but also
                 provide an interesting framework for network
                 clustering. We propose a number of conditions under
                 which the core of FHGs is non-empty and provide
                 algorithms for computing a core stable outcome. By
                 contrast, we show that the core may be empty in other
                 cases, and that it is computationally hard in general
                 to decide non-emptiness of the core.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Kash:2019:SPS,
  author =       "Ian A. Kash and Peter Key and Warut Suksompong",
  title =        "Simple Pricing Schemes for the Cloud",
  journal =      j-TEAC,
  volume =       "7",
  number =       "2",
  pages =        "7:1--7:??",
  month =        aug,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3327973",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:47 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3327973",
  abstract =     "The problem of pricing the cloud has attracted much
                 recent attention due to the widespread use of cloud
                 computing and cloud services. From a theoretical
                 perspective, several mechanisms that provide strong
                 efficiency or fairness guarantees and desirable
                 incentive properties have been designed. However, these
                 mechanisms often rely on a rigid model, with several
                 parameters needing to be precisely known for the
                 guarantees to hold. In this article, we consider a
                 stochastic model and show that it is possible to obtain
                 good welfare and revenue guarantees with simple
                 mechanisms that do not make use of the information on
                 some of these parameters. In particular, we prove that
                 a mechanism that sets the same price per timestep for
                 jobs of any length achieves at least 50\% of the
                 welfare and revenue obtained by a mechanism that can
                 set different prices for jobs of different lengths, and
                 the ratio can be improved if we have more specific
                 knowledge of some parameters. Similarly, a mechanism
                 that sets the same price for all servers even though
                 the servers may receive different kinds of jobs can
                 provide a reasonable welfare and revenue approximation
                 compared to a mechanism that is allowed to set
                 different prices for different servers.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Goel:2019:KVP,
  author =       "Ashish Goel and Anilesh K. Krishnaswamy and Sukolsak
                 Sakshuwong and Tanja Aitamurto",
  title =        "Knapsack Voting for Participatory Budgeting",
  journal =      j-TEAC,
  volume =       "7",
  number =       "2",
  pages =        "8:1--8:??",
  month =        aug,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3340230",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:47 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3340230",
  abstract =     "We address the question of aggregating the preferences
                 of voters in the context of participatory budgeting. We
                 scrutinize the voting method currently used in
                 practice, underline its drawbacks, and introduce a
                 novel scheme tailored to this setting, which we call
                 ``Knapsack Voting.'' We study its strategic
                 properties-we show that it is strategy-proof under a
                 natural model of utility (a dis-utility given by the
                 l$_1$ distance between the outcome and the true
                 preference of the voter) and ``partially''
                 strategy-proof under general additive utilities. We
                 extend Knapsack Voting to more general settings with
                 revenues, deficits, or surpluses and prove a similar
                 strategy-proofness result. To further demonstrate the
                 applicability of our scheme, we discuss its
                 implementation on the digital voting platform that we
                 have deployed in partnership with the local government
                 bodies in many cities across the nation. From voting
                 data thus collected, we present empirical evidence that
                 Knapsack Voting works well in practice.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Halpern:2019:SEC,
  author =       "Joseph Y. Halpern and Rafael Pass",
  title =        "Sequential Equilibrium in Computational Games",
  journal =      j-TEAC,
  volume =       "7",
  number =       "2",
  pages =        "9:1--9:??",
  month =        aug,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3340232",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:47 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3340232",
  abstract =     "We examine sequential equilibrium in the context of
                 computational games (Halpern and Pass 2015), where
                 agents are charged for computation. In such games, an
                 agent can rationally choose to forget, so issues of
                 imperfect recall arise. In this setting, we consider
                 two notions of sequential equilibrium. One is an ex
                 ante notion, where a player chooses his strategy before
                 the game starts and is committed to it, but chooses it
                 in such a way that it remains optimal even off the
                 equilibrium path. The second is an interim notion,
                 where a player can reconsider at each information set
                 whether he is doing the ``right'' thing, and if not,
                 can change his strategy. The two notions agree in games
                 of perfect recall, but not in games of imperfect
                 recall. Although the interim notion seems more
                 appealing, in a companion article (Halpern and Pass
                 2016), we argue that there are some deep conceptual
                 problems with it in standard games of imperfect recall.
                 We show that the conceptual problems largely disappear
                 in the computational setting. Moreover, in this
                 setting, under natural assumptions, the two notions
                 coincide.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Bei:2019:EUL,
  author =       "Xiaohui Bei and Jugal Garg and Martin Hoefer and Kurt
                 Mehlhorn",
  title =        "Earning and Utility Limits in {Fisher} Markets",
  journal =      j-TEAC,
  volume =       "7",
  number =       "2",
  pages =        "10:1--10:??",
  month =        aug,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3340234",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:47 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3340234",
  abstract =     "Earning limits and utility limits are novel aspects in
                 the classic Fisher market model. Sellers with earning
                 limits have bounds on their income and lower the supply
                 they bring to the market if income exceeds the limit.
                 Buyers with utility limits have an upper bound on the
                 amount of utility that they want to derive and lower
                 the budget they bring to the market if utility exceeds
                 the limit. Markets with these properties can have
                 multiple equilibria with different characteristics. We
                 analyze earning limits and utility limits in markets
                 with linear and spending-constraint utilities. For
                 markets with earning limits and spending-constraint
                 utilities, we show that equilibrium price vectors form
                 a lattice and the spending of buyers is unique in
                 non-degenerate markets. We provide a scaling-based
                 algorithm to compute an equilibrium in time O (n$^3$ l
                 log (l + nU)), where n is the number of agents, l
                 {$>$}= n a bound on the segments in the utility
                 functions, and U the largest integer in the market
                 representation. We show how to refine any equilibrium
                 in polynomial time to one with minimal prices or one
                 with maximal prices (if it exists). Moreover, our
                 algorithm can be used to obtain in polynomial time a
                 2-approximation for maximizing Nash social welfare in
                 multi-unit markets with indivisible items that come in
                 multiple copies. For markets with utility limits and
                 linear utilities, we show similar results-lattice
                 structure of price vectors, uniqueness of allocation in
                 non-degenerate markets, and polynomial-time refinement
                 procedures to obtain equilibria with minimal and
                 maximal prices. We complement these positive results
                 with hardness results for related computational
                 questions. We prove that it is NP-hard to compute a
                 market equilibrium that maximizes social welfare, and
                 it is PPAD-hard to find any market equilibrium with
                 utility functions with separate satiation points for
                 each buyer and each good.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Chen:2019:ISI,
  author =       "Yiling Chen and Dirk Bergemann",
  title =        "Introduction to the Special Issue on {EC'16}",
  journal =      j-TEAC,
  volume =       "7",
  number =       "3",
  pages =        "11:1--11:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3357235",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:47 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3357235",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Caragiannis:2019:UFM,
  author =       "Ioannis Caragiannis and David Kurokawa and Herv{\'e}
                 Moulin and Ariel D. Procaccia and Nisarg Shah and
                 Junxing Wang",
  title =        "The Unreasonable Fairness of Maximum {Nash} Welfare",
  journal =      j-TEAC,
  volume =       "7",
  number =       "3",
  pages =        "12:1--12:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3355902",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:47 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3355902",
  abstract =     "The maximum Nash welfare (MNW) solution-which selects
                 an allocation that maximizes the product of
                 utilities-is known to provide outstanding fairness
                 guarantees when allocating divisible goods. And while
                 it seems to lose its luster when applied to indivisible
                 goods, we show that, in fact, the MNW solution is
                 strikingly fair even in that setting. In particular, we
                 prove that it selects allocations that are envy-free up
                 to one good-a compelling notion that is quite elusive
                 when coupled with economic efficiency. We also
                 establish that the MNW solution provides a good
                 approximation to another popular (yet possibly
                 infeasible) fairness property, the maximin share
                 guarantee, in theory and-even more so-in practice.
                 While finding the MNW solution is computationally hard,
                 we develop a nontrivial implementation and demonstrate
                 that it scales well on real data. These results
                 establish MNW as a compelling solution for allocating
                 indivisible goods and underlie its deployment on a
                 popular fair-division website.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Epstein:2019:GBU,
  author =       "Ziv Epstein and Alexander Peysakhovich and David
                 Rand",
  title =        "The Good, the Bad, and the Unflinchingly Selfish:
                 Pro-sociality can be Well Predicted Using Payoffs and
                 Three Behavioral Types",
  journal =      j-TEAC,
  volume =       "7",
  number =       "3",
  pages =        "13:1--13:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3355947",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:47 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3355947",
  abstract =     "The human willingness to pay costs to benefit
                 anonymous others is often explained by social
                 preferences: rather than only valuing their own
                 material payoff, people also include the payoffs of
                 others in their utility function. But how successful is
                 this concept of outcome-based social preferences for
                 actually predicting out-of-sample behavior? We
                 investigate this question by having 1,067 participants
                 each make 20 Dictator Game decisions with randomized
                 parameters (e.g., outcomes for the self, for the other,
                 benefit/cost ratio of pro-sociality). We then use
                 machine learning to try to predict behavior by each
                 participant in each decision. A representative agent
                 model (a small, shared, set of parameters) predicts
                 better than random but still quite poorly (AUC = 0.69).
                 Allowing for full heterogeneity across individuals in
                 the mapping from decision-parameters to outcome yields
                 good predictive performance (AUC = 0.89). However, this
                 heterogeneous model is complicated and unwieldy, thus
                 we also investigate whether a simpler model can yield
                 similar performance. We find that the vast majority of
                 the predictive power (AUC = 0.88) is achieved by a
                 model that allows for three behavioral types. Finally,
                 we show that cannot be well proxied for by other
                 measures in psychology. This final analysis adds
                 further evidence to the literature that human
                 ``cooperative phenotypes'' are indeed meaningful,
                 relatively orthogonal person-level traits.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Alon:2019:DES,
  author =       "Noga Alon and Michal Feldman and Yishay Mansour and
                 Sigal Oren and Moshe Tennenholtz",
  title =        "Dynamics of Evolving Social Groups",
  journal =      j-TEAC,
  volume =       "7",
  number =       "3",
  pages =        "14:1--14:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3355948",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:47 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3355948",
  abstract =     "Exclusive social groups are ones in which the group
                 members decide whether or not to admit a candidate to
                 the group. Examples of exclusive social groups include
                 academic departments and fraternal organizations. In
                 this article, we introduce an analytic framework for
                 studying the dynamics of exclusive social groups. In
                 our model, every group member is characterized by his
                 opinion, which is represented as a point on the real
                 line. The group evolves in discrete time steps through
                 a voting process carried out by the group's members.
                 Due to homophily, each member votes for the candidate
                 who is more similar to him (i.e., closer to him on the
                 line). An admission rule is then applied to determine
                 which candidate, if any, is admitted. We consider
                 several natural admission rules including majority and
                 consensus. We ask: How do different admission rules
                 affect the composition of the group in the long run? We
                 study both growing groups (where new members join old
                 ones) and fixed-size groups (where new members replace
                 those who quit). Our analysis reveals intriguing
                 phenomena and phase transitions, some of which are
                 quite counterintuitive.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Bilo:2019:DTP,
  author =       "Vittorio Bil{\`o} and Cosimo Vinci",
  title =        "Dynamic Taxes for Polynomial Congestion Games",
  journal =      j-TEAC,
  volume =       "7",
  number =       "3",
  pages =        "15:1--15:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3355946",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:47 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3355946",
  abstract =     "We consider the efficiency of taxation in congestion
                 games with polynomial latency functions along the line
                 of research initiated by Caragiannis et al. [ ACM
                 Transactions on Algorithms, 2010], who focused on both
                 pure and mixed Nash equilibria in games with affine
                 latencies only. By exploiting the primal-dual method
                 [Bil{\`o}, Proceedings of the 10th Workshop on
                 Approximation and Online Algorithms, 2012], we obtain
                 interesting upper bounds with respect to a variety of
                 different solution concepts ranging from approximate
                 pure Nash equilibria up to approximate coarse
                 correlated equilibria, and including also approximate
                 one-round walks starting from the empty state. Our
                 findings show a high beneficial effect of taxation that
                 increases more than linearly with the degree of the
                 latency functions. In some cases, a tight relationship
                 with some well-studied polynomials in Combinatorics and
                 Number Theory, such as the Touchard and the Geometric
                 polynomials, arises. In these cases, we can also show
                 matching lower bounds, albeit under mild assumptions;
                 interestingly, our upper bounds are derived by
                 exploiting the combinatorial definition of these
                 polynomials, while our lower bounds are constructed by
                 relying on their analytical characterization.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Assadi:2019:SMP,
  author =       "Sepehr Assadi and Sanjeev Khanna and Yang Li",
  title =        "The Stochastic Matching Problem with (Very) Few
                 Queries",
  journal =      j-TEAC,
  volume =       "7",
  number =       "3",
  pages =        "16:1--16:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3355903",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:47 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3355903",
  abstract =     "Motivated by an application in kidney exchange, we
                 study the following stochastic matching problem: We are
                 given a graph G (V, E) (not necessarily bipartite),
                 where each edge in E is realized with some constant
                 probability p \&gt; 0, and the goal is to find a
                 maximum matching in the realized graph. An algorithm in
                 this setting is allowed to make queries to edges in E
                 to determine whether or not they are realized. We
                 design an adaptive algorithm for this problem that, for
                 any graph G, computes a (1- \epsilon)-approximate
                 maximum matching in the realized graph G$_p$ with high
                 probability, while making O (log (1/ \epsilon p)/
                 \epsilon p) queries per vertex, where the edges to
                 query are chosen adaptively in O (log (1/ \epsilon p)/
                 \epsilon p) rounds. We further present a non-adaptive
                 algorithm that makes O (log (1/ \epsilon p)/ \epsilon
                 p) queries per vertex and computes a (1/2-
                 \epsilon)-approximate maximum matching in G$_p$ with
                 high probability. Both our adaptive and non-adaptive
                 algorithms achieve the same approximation factor as the
                 previous best algorithms of Blum et al. (EC 2015) for
                 this problem, while requiring an exponentially smaller
                 number of per-vertex queries (and rounds of adaptive
                 queries for the adaptive algorithm). Our results settle
                 an open problem raised by Blum et al. by achieving only
                 a polynomial dependency on both \epsilon and p.
                 Moreover, the approximation guarantee of our algorithms
                 is instance-wise as opposed to only being competitive
                 in expectation as is the case for Blum et al. This is
                 of particular relevance to the key application of
                 stochastic matching in kidney exchange. We obtain our
                 results via two main techniques-namely, matching-covers
                 and vertex sparsification -that may be of independent
                 interest.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Roughgarden:2019:MRM,
  author =       "Tim Roughgarden and Joshua R. Wang",
  title =        "Minimizing Regret with Multiple Reserves",
  journal =      j-TEAC,
  volume =       "7",
  number =       "3",
  pages =        "17:1--17:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3355900",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 19 12:38:47 MDT 2019",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3355900",
  abstract =     "We study the problem of computing and learning
                 non-anonymous reserve prices to maximize revenue. We
                 first define the Maximizing Multiple Reserves (MMR)
                 problem in single-parameter matroid environments, where
                 the input is m valuation profiles v$^1$, \ldots, v$^m$,
                 indexed by the same n bidders, and the goal is to
                 compute the vector r of (non-anonymous) reserve prices
                 that maximizes the total revenue obtained on these
                 profiles by the VCG mechanism with reserves r. We prove
                 that the problem is APX-hard, even in the special case
                 of single-item environments, and give a polynomial-time
                 1/2-approximation algorithm for it in arbitrary matroid
                 environments. We then consider the online no-regret
                 learning problem and show how to exploit the special
                 structure of the MMR problem to translate our offline
                 approximation algorithm into an online learning
                 algorithm that achieves asymptotically time-averaged
                 revenue at least 1/2 times that of the best fixed
                 reserve prices in hindsight. On the negative side, we
                 show that, quite generally, computational hardness for
                 the offline optimization problem translates to
                 computational hardness for obtaining vanishing
                 time-averaged regret. Thus, our hardness result for the
                 MMR problem implies that computationally efficient
                 online learning requires approximation, even in the
                 special case of single-item auction environments.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "http://dl.acm.org/citation.cfm?id=2542174",
}

@Article{Hart:2020:BHS,
  author =       "Sergiu Hart and Philip J. Reny",
  title =        "The Better Half of Selling Separately",
  journal =      j-TEAC,
  volume =       "7",
  number =       "4",
  pages =        "18:1--18:18",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3369927",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Feb 3 08:53:37 MST 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3369927",
  abstract =     "Separate selling of two independent goods is shown to
                 yield at least 62% of the optimal revenue, and at least
                 73% when the goods satisfy the Myerson regularity
                 condition. This improves the 50% result of Hart and
                 Nisan [2017, originally circulated in \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "18",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Ahuja:2020:DMA,
  author =       "Kartik Ahuja and Mihaela {Van der Schaar}",
  title =        "Dynamic Matching and Allocation of Tasks",
  journal =      j-TEAC,
  volume =       "7",
  number =       "4",
  pages =        "19:1--19:27",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3369925",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Feb 3 08:53:37 MST 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3369925",
  abstract =     "In many two-sided markets, the parties to be matched
                 have incomplete information about their
                 characteristics. We consider the settings where the
                 parties engaged are extremely patient and are
                 interested in long-term partnerships. Hence, once the
                 final \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "19",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Ezra:2020:PMU,
  author =       "Tomer Ezra and Michal Feldman and Tim Roughgarden and
                 Warut Suksompong",
  title =        "Pricing Multi-Unit Markets",
  journal =      j-TEAC,
  volume =       "7",
  number =       "4",
  pages =        "20:1--20:29",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3373715",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Feb 3 08:53:37 MST 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3373715",
  abstract =     "We study the power and limitations of posted prices in
                 multi-unit markets, where agents arrive sequentially in
                 an arbitrary order. We prove upper and lower bounds on
                 the largest fraction of the optimal social welfare that
                 can be guaranteed with posted \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "20",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Kawase:2020:SPE,
  author =       "Yasushi Kawase and Yutaro Yamaguchi and Yu Yokoi",
  title =        "Subgame Perfect Equilibria of Sequential Matching
                 Games",
  journal =      j-TEAC,
  volume =       "7",
  number =       "4",
  pages =        "21:1--21:30",
  month =        feb,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3373717",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Mon Feb 3 08:53:37 MST 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3373717",
  abstract =     "We study a decentralized matching market in which
                 firms sequentially make offers to potential
                 workers. For each offer, the worker can choose
                 ``accept'' or ``reject,'' but the decision is
                 irrevocable. The acceptance of an offer guarantees her
                 job at the firm,. \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "21",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Anonymous:2020:ISI,
  author =       "Anonymous",
  title =        "Introduction to the Special Issue on {EC'17}",
  journal =      j-TEAC,
  volume =       "8",
  number =       "1",
  pages =        "1:1--1:1",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3383321",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Apr 22 09:10:06 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3383321",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Agarwal:2020:PPH,
  author =       "Arpit Agarwal and Debmalya Mandal and David C. Parkes
                 and Nisarg Shah",
  title =        "Peer Prediction with Heterogeneous Users",
  journal =      j-TEAC,
  volume =       "8",
  number =       "1",
  pages =        "2:1--2:34",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3381519",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Apr 22 09:10:06 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3381519",
  abstract =     "Peer prediction mechanisms incentivize agents to
                 truthfully report their signals, in the absence of a
                 verification mechanism, by comparing their reports with
                 those of their peers. Prior work in this area is
                 essentially restricted to the case of \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Assadi:2020:CAD,
  author =       "Sepehr Assadi",
  title =        "Combinatorial Auctions Do Need Modest Interaction",
  journal =      j-TEAC,
  volume =       "8",
  number =       "1",
  pages =        "3:1--3:23",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3381521",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Apr 22 09:10:06 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3381521",
  abstract =     "We study the necessity of interaction for obtaining
                 efficient allocations in combinatorial auctions with
                 subadditive bidders. This problem was originally
                 introduced by Dobzinski, Nisan, and Oren (STOC'14) as
                 the following simple market scenario: m items \ldots",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Colini-Baldeschi:2020:AET,
  author =       "Riccardo Colini-Baldeschi and Paul W. Goldberg and
                 Bart de Keijzer and Stefano Leonardi and Tim
                 Roughgarden and Stefano Turchetta",
  title =        "Approximately Efficient Two-Sided Combinatorial
                 Auctions",
  journal =      j-TEAC,
  volume =       "8",
  number =       "1",
  pages =        "4:1--4:29",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3381523",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Apr 22 09:10:06 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3381523",
  abstract =     "We develop and extend a line of recent work on the
                 design of mechanisms for two-sided markets. The markets
                 we consider consist of buyers and sellers of a number
                 of items, and the aim of a mechanism is to improve the
                 social welfare by arranging purchases \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Barman:2020:AAM,
  author =       "Siddharth Barman and Sanath Kumar Krishnamurthy",
  title =        "Approximation Algorithms for Maximin Fair Division",
  journal =      j-TEAC,
  volume =       "8",
  number =       "1",
  pages =        "5:1--5:28",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3381525",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Apr 22 09:10:06 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3381525",
  abstract =     "We consider the problem of allocating indivisible
                 goods fairly among n agents who have additive and
                 submodular valuations for the goods. Our fairness
                 guarantees are in terms of the maximin share, which is
                 defined to be the maximum value that an agent
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Roth:2020:MDP,
  author =       "Aaron Roth and Aleksandrs Slivkins and Jonathan Ullman
                 and Zhiwei Steven Wu",
  title =        "Multidimensional Dynamic Pricing for Welfare
                 Maximization",
  journal =      j-TEAC,
  volume =       "8",
  number =       "1",
  pages =        "6:1--6:35",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3381527",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Wed Apr 22 09:10:06 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3381527",
  abstract =     "We study the problem of a seller dynamically pricing d
                 distinct types of indivisible goods, when faced with
                 the online arrival of unit-demand buyers drawn
                 independently from an unknown distribution. The goods
                 are not in limited supply, but can only be \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Feldman:2020:DSG,
  author =       "Moran Feldman and Moshe Tennenholtz and Omri
                 Weinstein",
  title =        "Distributed Signaling Games",
  journal =      j-TEAC,
  volume =       "8",
  number =       "2",
  pages =        "7:1--7:26",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3381529",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue May 19 09:17:46 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3381529",
  abstract =     "A recurring theme in recent computer science
                 literature is that proper design of signaling schemes
                 is a crucial aspect of effective mechanisms aiming to
                 optimize social welfare or revenue. One of the research
                 endeavors of this line of work is \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Song:2020:PBS,
  author =       "Jiayi Song and Roch Gu{\'e}rin",
  title =        "Pricing (and Bidding) Strategies for Delay
                 Differentiated Cloud Services",
  journal =      j-TEAC,
  volume =       "8",
  number =       "2",
  pages =        "8:1--8:58",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3381531",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue May 19 09:17:46 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3381531",
  abstract =     "We consider a cloud provider that seeks to maximize
                 its revenue by offering services with different
                 trade-offs between cost and timeliness of job
                 completion. Spot instances and preemptible instances
                 are examples of such services, with, in both cases,
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Chen:2020:PG,
  author =       "Yiling Chen and Or Sheffet and Salil Vadhan",
  title =        "Privacy Games",
  journal =      j-TEAC,
  volume =       "8",
  number =       "2",
  pages =        "9:1--9:37",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3381533",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue May 19 09:17:46 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3381533",
  abstract =     "The problem of analyzing the effect of privacy
                 concerns on the behavior of selfish utility-maximizing
                 agents has received much attention lately. Privacy
                 concerns are often modeled by altering the utility
                 functions of agents to consider also their \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Gast:2020:LRS,
  author =       "Nicolas Gast and Stratis Ioannidis and Patrick Loiseau
                 and Benjamin Roussillon",
  title =        "Linear Regression from Strategic Data Sources",
  journal =      j-TEAC,
  volume =       "8",
  number =       "2",
  pages =        "10:1--10:24",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3391436",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue May 19 09:17:46 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3391436",
  abstract =     "Linear regression is a fundamental building block of
                 statistical data analysis. It amounts to estimating the
                 parameters of a linear model that maps input features
                 to corresponding outputs. In the classical setting
                 where the precision of each data point \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Gairing:2020:EEE,
  author =       "Martin Gairing and Kostas Kollias and Grammateia
                 Kotsialou",
  title =        "Existence and Efficiency of Equilibria for
                 Cost-Sharing in Generalized Weighted Congestion Games",
  journal =      j-TEAC,
  volume =       "8",
  number =       "2",
  pages =        "11:1--11:28",
  month =        may,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3391434",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Tue May 19 09:17:46 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3391434",
  abstract =     "This work studies the impact of cost-sharing methods
                 on the existence and efficiency of (pure) Nash
                 equilibria in weighted congestion games. We also study
                 generalized weighted congestion games, where each
                 player may control multiple commodities. Our \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Knop:2020:VBS,
  author =       "Dusan Knop and Martin Kouteck{\'y} and Matthias
                 Mnich",
  title =        "Voting and Bribing in Single-Exponential Time",
  journal =      j-TEAC,
  volume =       "8",
  number =       "3",
  pages =        "12:1--12:28",
  month =        oct,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3396855",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 3 07:43:29 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3396855",
  abstract =     "We introduce a general problem about bribery in voting
                 systems. In the R-Multi-Bribery problem, the goal is to
                 bribe a set of voters at minimum cost such that a
                 desired candidate is a winner in the perturbed election
                 under the voting rule R. Voters \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Shah:2020:AVI,
  author =       "Nihar B. Shah and Dengyong Zhou",
  title =        "Approval Voting and Incentives in Crowdsourcing",
  journal =      j-TEAC,
  volume =       "8",
  number =       "3",
  pages =        "13:1--13:40",
  month =        oct,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3396863",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 3 07:43:29 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3396863",
  abstract =     "The growing need for labeled training data has made
                 crowdsourcing a vital tool for developing machine
                 learning applications. Here, workers on a crowdsourcing
                 platform are typically shown a list of unlabeled items,
                 and for each of these items, are asked \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Benabbou:2020:PQB,
  author =       "Nawal Benabbou and Mithun Chakraborty and Xuan-Vinh Ho
                 and Jakub Sliwinski and Yair Zick",
  title =        "The Price of Quota-based Diversity in Assignment
                 Problems",
  journal =      j-TEAC,
  volume =       "8",
  number =       "3",
  pages =        "14:1--14:32",
  month =        oct,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3411513",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 3 07:43:29 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3411513",
  abstract =     "In this article, we introduce and analyze an extension
                 to the matching problem on a weighted bipartite graph
                 (i.e., the assignment problem): Assignment with Type
                 Constraints. Here, the two parts of the graph are each
                 partitioned into subsets, called \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Cummings:2020:PLP,
  author =       "Rachel Cummings and David M. Pennock and Jennifer
                 Wortman Vaughan",
  title =        "The Possibilities and Limitations of Private
                 Prediction Markets",
  journal =      j-TEAC,
  volume =       "8",
  number =       "3",
  pages =        "15:1--15:24",
  month =        oct,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3412348",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 3 07:43:29 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3412348",
  abstract =     "We consider the design of private prediction markets,
                 financial markets designed to elicit predictions about
                 uncertain events without revealing too much information
                 about market participants' actions or beliefs. Our goal
                 is to design market mechanisms \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}

@Article{Caragiannis:2020:HEC,
  author =       "Ioannis Caragiannis and George A. Krimpas and
                 Alexandros A. Voudouris",
  title =        "How Effective Can Simple Ordinal Peer Grading Be?",
  journal =      j-TEAC,
  volume =       "8",
  number =       "3",
  pages =        "16:1--16:37",
  month =        oct,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3412347",
  ISSN =         "2167-8375 (print), 2167-8383 (electronic)",
  ISSN-L =       "2167-8375",
  bibdate =      "Sat Oct 3 07:43:29 MDT 2020",
  bibsource =    "http://www.math.utah.edu/pub/tex/bib/teac.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3412347",
  abstract =     "Ordinal peer grading has been proposed as a simple and
                 scalable solution for computing reliable information
                 about student performance in massive open online
                 courses. The idea is to outsource the grading task to
                 the students themselves as follows. After \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "ACM Transactions on Economics and Computation",
  journal-URL =  "https://dl.acm.org/loi/teac",
}