@Preamble{
"\hyphenation{Ka-wa-ra-ba-ya-shi Ma-kar-y-chev Thu-ri-mel-la Ver-strae-te}"
# "\input bibnames.sty"
# "\input path.sty"
# "\ifx \undefined \mathbb \def \mathbb #1{{\bf #1}}\fi"
# "\ifx \undefined \MST \def \MST {{\rm MST}}\fi"
# "\ifx \undefined \occ \def \occ {{\rm occ}}\fi"
# "\ifx \undefined \polylog \def \polylog {{\rm polylog}}\fi"
# "\ifx \undefined \polyloglog \def \polyloglog {{\rm polyloglog}}\fi"
# "\ifx \undefined \poly \def \poly {{\rm poly}}\fi"
# "\ifx \undefined \rank \def \rank {{\rm rank}}\fi"
# "\ifx \undefined \select \def \select {{\rm select}}\fi"
# "\ifx \undefined \SORT \def \SORT {{\rm SORT}}\fi"
}
@String{ack-nhfb = "Nelson H. F. Beebe,
University of Utah,
Department of Mathematics, 110 LCB,
155 S 1400 E RM 233,
Salt Lake City, UT 84112-0090, USA,
Tel: +1 801 581 5254,
FAX: +1 801 581 4148,
e-mail: \path|beebe@math.utah.edu|,
\path|beebe@acm.org|,
\path|beebe@computer.org| (Internet),
URL: \path|https://www.math.utah.edu/~beebe/|"}
@String{j-TALG = "ACM Transactions on Algorithms"}
@Article{Gabow:2005:EF,
author = "Harold N. Gabow",
title = "{Editor}'s foreword",
journal = j-TALG,
volume = "1",
number = "1",
pages = "1--1",
month = jul,
year = "2005",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:55 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Yuster:2005:FSM,
author = "Raphael Yuster and Uri Zwick",
title = "Fast sparse matrix multiplication",
journal = j-TALG,
volume = "1",
number = "1",
pages = "2--13",
month = jul,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1077464.1077466",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:55 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Let $A$ and $B$ be two $ n \times n$ matrices over a
ring $R$ (e.g., the reals or the integers) each
containing at most $m$ nonzero elements. We present a
new algorithm that multiplies $A$ and $B$ using $
O(m^{0.7}n^{1.2} + n^2 + o(1))$ algebraic operations
(i.e., multiplications, additions and subtractions)
over $R$. The na{\"\i}ve matrix multiplication
algorithm, on the other hand, may need to perform $
\Omega (m n)$ operations to accomplish the same task.
For $ m \leq n^{1.14}$, the new algorithm performs an
almost optimal number of only $ n^2 + o(1)$ operations.
For $ m \leq n^{1.68}$, the new algorithm is also
faster than the best known matrix multiplication
algorithm for dense matrices which uses $ O(n^{2.38})$
algebraic operations. The new algorithm is obtained
using a surprisingly straightforward combination of a
simple combinatorial idea and existing fast rectangular
matrix multiplication algorithms. We also obtain
improved algorithms for the multiplication of more than
two sparse matrices. As the known fast rectangular
matrix multiplication algorithms are far from being
practical, our result, at least for now, is only of
theoretical value.",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Edmonds:2005:MAL,
author = "Jeff Edmonds and Kirk Pruhs",
title = "A maiden analysis of longest wait first",
journal = j-TALG,
volume = "1",
number = "1",
pages = "14--32",
month = jul,
year = "2005",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:55 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Demaine:2005:FPA,
author = "Erik D. Demaine and Fedor V. Fomin and Mohammadtaghi
Hajiaghayi and Dimitrios M. Thilikos",
title = "Fixed-parameter algorithms for $ (k, r)$-center in
planar graphs and map graphs",
journal = j-TALG,
volume = "1",
number = "1",
pages = "33--47",
month = jul,
year = "2005",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:55 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Adler:2005:PMM,
author = "Micah Adler and Dan Rubenstein",
title = "Pricing multicasting in more flexible network models",
journal = j-TALG,
volume = "1",
number = "1",
pages = "48--73",
month = jul,
year = "2005",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:55 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Even:2005:NDP,
author = "Guy Even and Guy Kortsarz and Wolfgang Slany",
title = "On network design problems: fixed cost flows and the
covering {Steiner} problem",
journal = j-TALG,
volume = "1",
number = "1",
pages = "74--101",
month = jul,
year = "2005",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:55 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Alstrup:2005:BBC,
author = "Stephen Alstrup and Thore Husfeldt and Theis Rauhe and
Mikkel Thorup",
title = "Black box for constant-time insertion in priority
queues (note)",
journal = j-TALG,
volume = "1",
number = "1",
pages = "102--106",
month = jul,
year = "2005",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:55 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Vinkemeier:2005:LTA,
author = "Doratha E. Drake Vinkemeier and Stefan Hougardy",
title = "A linear-time approximation algorithm for weighted
matchings in graphs",
journal = j-TALG,
volume = "1",
number = "1",
pages = "107--122",
month = jul,
year = "2005",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:55 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Grabner:2005:ALC,
author = "Peter J. Grabner and Clemens Heuberger and Helmut
Prodinger and J{\"o}rg M. Thuswaldner",
title = "Analysis of linear combination algorithms in
cryptography",
journal = j-TALG,
volume = "1",
number = "1",
pages = "123--142",
month = jul,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1077464.1077473",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:55 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Several cryptosystems rely on fast calculations of
linear combinations in groups. One way to achieve this
is to use joint signed binary digit expansions of small
``weight.'' We study two algorithms, one based on
nonadjacent forms of the coefficients of the linear
combination, the other based on a certain joint sparse
form specifically adapted to this problem. Both methods
are sped up using the sliding windows approach combined
with precomputed lookup tables. We give explicit and
asymptotic results for the number of group operations
needed, assuming uniform distribution of the
coefficients. Expected values, variances and a central
limit theorem are proved using generating functions.
Furthermore, we provide a new algorithm that calculates
the digits of an optimal expansion of pairs of integers
from left to right. This avoids storing the whole
expansion, which is needed with the previously known
right-to-left methods, and allows an online
computation.",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Cechlarova:2005:GSR,
author = "Katar{\'\i}na Cechl{\'a}rov{\'a} and Tam{\'a}s
Fleiner",
title = "On a generalization of the stable roommates problem",
journal = j-TALG,
volume = "1",
number = "1",
pages = "143--156",
month = jul,
year = "2005",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:55 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Khuller:2005:PC,
author = "Samir Khuller",
title = "Problems column",
journal = j-TALG,
volume = "1",
number = "1",
pages = "157--159",
month = jul,
year = "2005",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:55 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Johnson:2005:NCC,
author = "David S. Johnson",
title = "The {NP}-completeness column",
journal = j-TALG,
volume = "1",
number = "1",
pages = "160--176",
month = jul,
year = "2005",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:55 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Janson:2005:IDL,
author = "Svante Janson",
title = "Individual displacements for linear probing hashing
with different insertion policies",
journal = j-TALG,
volume = "1",
number = "2",
pages = "177--213",
month = oct,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1103963.1103964",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:56 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study the distribution of the individual
displacements in hashing with linear probing for three
different versions: First Come, Last Come and Robin
Hood. Asymptotic distributions and their moments are
found when the size of the hash table tends to infinity
with the proportion of occupied cells converging to
some $ \alpha $, $ 0 < \alpha < 1 $. (In the case of
Last Come, the results are more complicated and less
complete than in the other cases.) We also show, using
the diagonal Poisson transform studied by Poblete,
Viola and Munro, that exact expressions for finite $m$
and $n$ can be obtained from the limits as $ m, n
\rightarrow \infty $. We end with some results,
conjectures and questions about the shape of the limit
distributions. These have some relevance for computer
applications.",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Viola:2005:EDI,
author = "Alfredo Viola",
title = "Exact distribution of individual displacements in
linear probing hashing",
journal = j-TALG,
volume = "1",
number = "2",
pages = "214--242",
month = oct,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1103963.1103965",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:56 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "This paper studies the distribution of individual
displacements for the standard and the Robin Hood
linear probing hashing algorithms. When a table of size
$m$ has $n$ elements, the distribution of the search
cost of a random element is studied for both
algorithms. Specifically, exact distributions for fixed
$m$ and $n$ are found as well as when the table is $
\alpha $-full, and $ \alpha $ strictly smaller than 1.
Moreover, for full tables, limit laws for both
algorithms are derived.",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Alstrup:2005:MIF,
author = "Stephen Alstrup and Jacob Holm and Mikkel Thorup and
Kristian De Lichtenberg",
title = "Maintaining information in fully dynamic trees with
top trees",
journal = j-TALG,
volume = "1",
number = "2",
pages = "243--264",
month = oct,
year = "2005",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:56 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Jothi:2005:AAC,
author = "Raja Jothi and Balaji Raghavachari",
title = "Approximation algorithms for the capacitated minimum
spanning tree problem and its variants in network
design",
journal = j-TALG,
volume = "1",
number = "2",
pages = "265--282",
month = oct,
year = "2005",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:56 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Elkin:2005:CAS,
author = "Michael Elkin",
title = "Computing almost shortest paths",
journal = j-TALG,
volume = "1",
number = "2",
pages = "283--323",
month = oct,
year = "2005",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:56 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Carvalho:2005:VAE,
author = "Marcelo H. {De Carvalho} and Joseph Cheriyan",
title = "An {$ O(V E) $} algorithm for ear decompositions of
matching-covered graphs",
journal = j-TALG,
volume = "1",
number = "2",
pages = "324--337",
month = oct,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1103963.1103969",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:56 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Goel:2005:AMF,
author = "Ashish Goel and Adam Meyerson and Serge Plotkin",
title = "Approximate majorization and fair online load
balancing",
journal = j-TALG,
volume = "1",
number = "2",
pages = "338--349",
month = oct,
year = "2005",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:56 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Chrobak:2005:GAM,
author = "Marek Chrobak and Petr Kolman and Ji{\v{r}}{\'\i}
Sgall",
title = "The greedy algorithm for the minimum common string
partition problem",
journal = j-TALG,
volume = "1",
number = "2",
pages = "350--366",
month = oct,
year = "2005",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Dec 13 18:19:56 MST 2005",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Sawada:2006:GRF,
author = "Joe Sawada",
title = "Generating rooted and free plane trees",
journal = j-TALG,
volume = "2",
number = "1",
pages = "1--13",
month = jan,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Fri May 26 08:40:43 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Hegde:2006:FSE,
author = "Rajneesh Hegde",
title = "Finding $3$-shredders efficiently",
journal = j-TALG,
volume = "2",
number = "1",
pages = "14--43",
month = jan,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Fri May 26 08:40:43 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Gramm:2006:PMA,
author = "Jens Gramm and Jiong Guo and Rolf Niedermeier",
title = "Pattern matching for arc-annotated sequences",
journal = j-TALG,
volume = "2",
number = "1",
pages = "44--65",
month = jan,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Fri May 26 08:40:43 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Hassin:2006:MGV,
author = "Refael Hassin and Asaf Levin",
title = "The minimum generalized vertex cover problem",
journal = j-TALG,
volume = "2",
number = "1",
pages = "66--78",
month = jan,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Fri May 26 08:40:43 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Epstein:2006:OSS,
author = "Leah Epstein and Rob {Van Stee}",
title = "Online scheduling of splittable tasks",
journal = j-TALG,
volume = "2",
number = "1",
pages = "79--94",
month = jan,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Fri May 26 08:40:43 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Gonzalez:2006:MTC,
author = "Teofilo F. Gonzalez and Joseph Y.-T. Leung and Michael
Pinedo",
title = "Minimizing total completion time on uniform machines
with deadline constraints",
journal = j-TALG,
volume = "2",
number = "1",
pages = "95--115",
month = jan,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Fri May 26 08:40:43 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Gandhi:2006:IRD,
author = "Rajiv Gandhi and Magn{\'u}s M. Halld{\'o}rsson and Guy
Kortsarz and Hadas Shachnai",
title = "Improved results for data migration and open shop
scheduling",
journal = j-TALG,
volume = "2",
number = "1",
pages = "116--129",
month = jan,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Fri May 26 08:40:43 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
note = "See corrigendum \cite{Gandhi:2013:CIR}.",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Khuller:2006:PC,
author = "Samir Khuller",
title = "Problems column",
journal = j-TALG,
volume = "2",
number = "1",
pages = "130--134",
month = jan,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Fri May 26 08:40:43 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Korsh:2006:LGC,
author = "James Korsh and Paul Lafollette",
title = "A loopless {Gray} code for rooted trees",
journal = j-TALG,
volume = "2",
number = "2",
pages = "135--152",
month = apr,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Wed Aug 23 05:38:18 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Alon:2006:ACS,
author = "Noga Alon and Dana Moshkovitz and Shmuel Safra",
title = "Algorithmic construction of sets for
{$k$}-restrictions",
journal = j-TALG,
volume = "2",
number = "2",
pages = "153--177",
month = apr,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Wed Aug 23 05:38:18 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Lau:2006:BRG,
author = "Lap Chi Lau",
title = "Bipartite roots of graphs",
journal = j-TALG,
volume = "2",
number = "2",
pages = "178--208",
month = apr,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Wed Aug 23 05:38:18 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Agarwal:2006:EAB,
author = "Pankaj K. Agarwal and Boris Aronov and Vladlen
Koltun",
title = "Efficient algorithms for bichromatic separability",
journal = j-TALG,
volume = "2",
number = "2",
pages = "209--227",
month = apr,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Wed Aug 23 05:38:18 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Epstein:2006:SU,
author = "Leah Epstein and Rob {Van Stee}",
title = "This side up!",
journal = j-TALG,
volume = "2",
number = "2",
pages = "228--243",
month = apr,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Wed Aug 23 05:38:18 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Huo:2006:MMF,
author = "Yumei Huo and Joseph Y.-T. Leung",
title = "Minimizing mean flow time for {UET} tasks",
journal = j-TALG,
volume = "2",
number = "2",
pages = "244--262",
month = apr,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Wed Aug 23 05:38:18 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Hassin:2006:RST,
author = "Refael Hassin and Danny Segev",
title = "Robust subgraphs for trees and paths",
journal = j-TALG,
volume = "2",
number = "2",
pages = "263--281",
month = apr,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Wed Aug 23 05:38:18 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Azar:2006:IAC,
author = "Yossi Azar and Yossi Richter",
title = "An improved algorithm for {CIOQ} switches",
journal = j-TALG,
volume = "2",
number = "2",
pages = "282--295",
month = apr,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Wed Aug 23 05:38:18 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Berend:2006:CMP,
author = "Daniel Berend and Amir Sapir",
title = "The cyclic multi-peg {Tower of Hanoi}",
journal = j-TALG,
volume = "2",
number = "3",
pages = "297--317",
month = jul,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Thu Sep 21 08:13:30 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Drmota:2006:RFA,
author = "Michael Drmota and Helmut Prodinger",
title = "The register function for $t$-ary trees",
journal = j-TALG,
volume = "2",
number = "3",
pages = "318--334",
month = jul,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Thu Sep 21 08:13:30 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Kowalik:2006:OBL,
author = "Lukasz Kowalik and Maciej Kurowski",
title = "Oracles for bounded-length shortest paths in planar
graphs",
journal = j-TALG,
volume = "2",
number = "3",
pages = "335--363",
month = jul,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Thu Sep 21 08:13:30 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Katriel:2006:OTO,
author = "Irit Katriel and Hans L. Bodlaender",
title = "Online topological ordering",
journal = j-TALG,
volume = "2",
number = "3",
pages = "364--379",
month = jul,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Thu Sep 21 08:13:30 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Duncan:2006:OCG,
author = "Christian A. Duncan and Stephen G. Kobourov and V. S.
Anil Kumar",
title = "Optimal constrained graph exploration",
journal = j-TALG,
volume = "2",
number = "3",
pages = "380--402",
month = jul,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Thu Sep 21 08:13:30 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Raman:2006:FFP,
author = "Venkatesh Raman and Saket Saurabh and C. R.
Subramanian",
title = "Faster fixed parameter tractable algorithms for
finding feedback vertex sets",
journal = j-TALG,
volume = "2",
number = "3",
pages = "403--415",
month = jul,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Thu Sep 21 08:13:30 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Jansen:2006:AAS,
author = "Klaus Jansen and Hu Zhang",
title = "An approximation algorithm for scheduling malleable
tasks under general precedence constraints",
journal = j-TALG,
volume = "2",
number = "3",
pages = "416--434",
month = jul,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Thu Sep 21 08:13:30 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Feigenbaum:2006:SMC,
author = "Joan Feigenbaum and Yuval Ishai and Tal Malkin and
Kobbi Nissim and Martin J. Strauss and Rebecca N.
Wright",
title = "Secure multiparty computation of approximations",
journal = j-TALG,
volume = "2",
number = "3",
pages = "435--472",
month = jul,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Thu Sep 21 08:13:30 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Johnson:2006:NCC,
author = "David S. Johnson",
title = "The {NP}-completeness column: {The} many limits on
approximation",
journal = j-TALG,
volume = "2",
number = "3",
pages = "473--489",
month = jul,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Thu Sep 21 08:13:30 MDT 2006",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Lopez-Ortiz:2006:F,
author = "Alejandro L{\'o}pez-Ortiz and J. Ian Munro",
title = "Foreword",
journal = j-TALG,
volume = "2",
number = "4",
pages = "491--491",
month = oct,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Eppstein:2006:QAM,
author = "David Eppstein",
title = "Quasiconvex analysis of multivariate recurrence
equations for backtracking algorithms",
journal = j-TALG,
volume = "2",
number = "4",
pages = "492--509",
month = oct,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Geary:2006:SOT,
author = "Richard F. Geary and Rajeev Raman and Venkatesh
Raman",
title = "Succinct ordinal trees with level-ancestor queries",
journal = j-TALG,
volume = "2",
number = "4",
pages = "510--534",
month = oct,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Mendelson:2006:MPQ,
author = "Ran Mendelson and Robert E. Tarjan and Mikkel Thorup
and Uri Zwick",
title = "Melding priority queues",
journal = j-TALG,
volume = "2",
number = "4",
pages = "535--556",
month = oct,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Baswana:2006:ADO,
author = "Surender Baswana and Sandeep Sen",
title = "Approximate distance oracles for unweighted graphs in
expected {$ O(n^2) $} time",
journal = j-TALG,
volume = "2",
number = "4",
pages = "557--577",
month = oct,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Demetrescu:2006:EAD,
author = "Camil Demetrescu and Giuseppe F. Italiano",
title = "Experimental analysis of dynamic all pairs shortest
path algorithms",
journal = j-TALG,
volume = "2",
number = "4",
pages = "578--601",
month = oct,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Irving:2006:RMM,
author = "Robert W. Irving and Telikepalli Kavitha and Kurt
Mehlhorn and Dimitrios Michail and Katarzyna E.
Paluch",
title = "Rank-maximal matchings",
journal = j-TALG,
volume = "2",
number = "4",
pages = "602--610",
month = oct,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Foschini:2006:WIE,
author = "Luca Foschini and Roberto Grossi and Ankur Gupta and
Jeffrey Scott Vitter",
title = "When indexing equals compression: {Experiments} with
compressing suffix arrays and applications",
journal = j-TALG,
volume = "2",
number = "4",
pages = "611--639",
month = oct,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Alon:2006:GAO,
author = "Noga Alon and Baruch Awerbuch and Yossi Azar and Niv
Buchbinder and Joseph (Seffi) Naor",
title = "A general approach to online network optimization
problems",
journal = j-TALG,
volume = "2",
number = "4",
pages = "640--660",
month = oct,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Evans:2006:OSV,
author = "William Evans and David Kirkpatrick",
title = "Optimally scheduling video-on-demand to minimize delay
when sender and receiver bandwidth may differ",
journal = j-TALG,
volume = "2",
number = "4",
pages = "661--678",
month = oct,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Beier:2006:CES,
author = "Rene Beier and Artur Czumaj and Piotr Krysta and
Berthold V{\"o}cking",
title = "Computing equilibria for a service provider game with
(Im)perfect information",
journal = j-TALG,
volume = "2",
number = "4",
pages = "679--706",
month = oct,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Moore:2006:GQF,
author = "Cristopher Moore and Daniel Rockmore and Alexander
Russell",
title = "Generic quantum {Fourier} transforms",
journal = j-TALG,
volume = "2",
number = "4",
pages = "707--723",
month = oct,
year = "2006",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Archer:2007:FPM,
author = "Aaron Archer and {\'E}va Tardos",
title = "Frugal path mechanisms",
journal = j-TALG,
volume = "3",
number = "1",
pages = "??--??",
month = feb,
year = "2007",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "3",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bhatia:2007:AAB,
author = "Randeep Bhatia and Julia Chuzhoy and Ari Freund and
Joseph (Seffi) Naor",
title = "Algorithmic aspects of bandwidth trading",
journal = j-TALG,
volume = "3",
number = "1",
pages = "??--??",
month = feb,
year = "2007",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "10",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Carmo:2007:QPI,
author = "Renato Carmo and Tom{\'a}s Feder and Yoshiharu
Kohayakawa and Eduardo Laber and Rajeev Motwani and
Liadan O'Callaghan and Rina Panigrahy and Dilys
Thomas",
title = "Querying priced information in databases: {The}
conjunctive case",
journal = j-TALG,
volume = "3",
number = "1",
pages = "??--??",
month = feb,
year = "2007",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "9",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Ciriani:2007:DSS,
author = "Valentina Ciriani and Paolo Ferragina and Fabrizio
Luccio and S. Muthukrishnan",
title = "A data structure for a sequence of string accesses in
external memory",
journal = j-TALG,
volume = "3",
number = "1",
pages = "??--??",
month = feb,
year = "2007",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "6",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Cormode:2007:SED,
author = "Graham Cormode and S. Muthukrishnan",
title = "The string edit distance matching problem with moves",
journal = j-TALG,
volume = "3",
number = "1",
pages = "??--??",
month = feb,
year = "2007",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The edit distance between two strings $S$ and $R$ is
defined to be the minimum number of character inserts,
deletes, and changes needed to convert $R$ to S. Given
a text string $t$ of length $n$, and a pattern string
$p$ of length $m$, informally, the string edit distance
matching problem is to compute the smallest edit
distance between $p$ and substrings of $t$. We relax
the problem so that: (a) we allow an additional
operation, namely, substring moves; and (b) we allow
approximation of this string edit distance. Our result
is a near-linear time deterministic algorithm to
produce a factor of $ O(\log n \log \star n)$
approximation to the string edit distance with moves.
This is the first known significantly subquadratic
algorithm for a string edit distance problem in which
the distance involves nontrivial alignments. Our
results are obtained by embedding strings into $ L_1$
vector space using a simplified parsing technique,
which we call edit-sensitive parsing (ESP).",
acknowledgement = ack-nhfb,
articleno = "2",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Czumaj:2007:TBW,
author = "Artur Czumaj and Berthold V{\"o}cking",
title = "Tight bounds for worst-case equilibria",
journal = j-TALG,
volume = "3",
number = "1",
pages = "??--??",
month = feb,
year = "2007",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "4",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Elkin:2007:IAR,
author = "Michael Elkin and Guy Kortsarz",
title = "An improved algorithm for radio broadcast",
journal = j-TALG,
volume = "3",
number = "1",
pages = "??--??",
month = feb,
year = "2007",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "8",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Eppstein:2007:FSI,
author = "David Eppstein",
title = "Foreword to special issue on {SODA 2002}",
journal = j-TALG,
volume = "3",
number = "1",
pages = "??--??",
month = feb,
year = "2007",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "1",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Hershberger:2007:DSS,
author = "John Hershberger and Subhash Suri and Amit Bhosle",
title = "On the difficulty of some shortest path problems",
journal = j-TALG,
volume = "3",
number = "1",
pages = "??--??",
month = feb,
year = "2007",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "5",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Pandurangan:2007:EBB,
author = "Gopal Pandurangan and Eli Upfal",
title = "Entropy-based bounds for online algorithms",
journal = j-TALG,
volume = "3",
number = "1",
pages = "??--??",
month = feb,
year = "2007",
CODEN = "????",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Apr 14 10:58:14 MDT 2007",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "7",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Voronenko:2007:MMC,
author = "Yevgen Voronenko and Markus P{\"u}schel",
title = "Multiplierless multiple constant multiplication",
journal = j-TALG,
volume = "3",
number = "2",
pages = "11:1--11:??",
month = may,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1240233.1240234",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:54:42 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A variable can be multiplied by a given set of
fixed-point constants using a multiplier block that
consists exclusively of additions, subtractions, and
shifts. The generation of a multiplier block from the
set of constants is known as the multiple constant
multiplication (MCM) problem. Finding the optimal
solution, namely, the one with the fewest number of
additions and subtractions, is known to be NP-complete.
We propose a new algorithm for the MCM problem, which
produces solutions that require up to 20\% less
additions and subtractions than the best previously
known algorithm. At the same time our algorithm, in
contrast to the closest competing algorithm, is not
limited by the constant bitwidths. We present our
algorithm using a unifying formal framework for the
best, graph-based MCM algorithms and provide a detailed
runtime analysis and experimental evaluation. We show
that our algorithm can handle problem sizes as large as
100 32-bit constants in a time acceptable for most
applications. The implementation of the new algorithm
is available at \path =www.spiral.net=.",
acknowledgement = ack-nhfb,
articleno = "11",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Addition chains; directed graph; FIR filter;
fixed-point arithmetic; strength reduction",
}
@Article{Chern:2007:PCR,
author = "Hua-Huai Chern and Michael Fuchs and Hsien-Kuei
Hwang",
title = "Phase changes in random point quadtrees",
journal = j-TALG,
volume = "3",
number = "2",
pages = "12:1--12:??",
month = may,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1240233.1240235",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:54:42 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We show that a wide class of linear cost measures
(such as the number of leaves) in random
$d$-dimensional point quadtrees undergo a change in
limit laws: If the dimension $ d = 1, \ldots, 8 $, then
the limit law is normal; if $ d \geq 9 $ then there is
no convergence to a fixed limit law. Stronger
approximation results such as convergence rates and
local limit theorems are also derived for the number of
leaves, additional phase changes being unveiled. Our
approach is new and very general, and also applicable
to other classes of search trees. A brief discussion of
Devroye's grid trees (covering $m$-ary search trees and
quadtrees as special cases) is given. We also propose
an efficient numerical procedure for computing the
constants involved to high precision.",
acknowledgement = ack-nhfb,
articleno = "12",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "analysis in distribution of algorithms; Asymptotic
transfer; central limit theorems; depth; differential
equations; grid trees; local limit theorems; Mellin
transforms; page usage; phase transitions; quadtrees;
total path length",
}
@Article{Demaine:2007:RDS,
author = "Erik D. Demaine and John Iacono and Stefan Langerman",
title = "Retroactive data structures",
journal = j-TALG,
volume = "3",
number = "2",
pages = "13:1--13:??",
month = may,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1240233.1240236",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:54:42 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We introduce a new data structuring paradigm in which
operations can be performed on a data structure not
only in the present, but also in the past. In this new
paradigm, called retroactive data structures, the
historical sequence of operations performed on the data
structure is not fixed. The data structure allows
arbitrary insertion and deletion of operations at
arbitrary times, subject only to consistency
requirements. We initiate the study of retroactive data
structures by formally defining the model and its
variants. We prove that, unlike persistence, efficient
retroactivity is not always achievable. Thus, we
present efficient retroactive data structures for
queues, doubly ended queues, priority queues,
union-find, and decomposable search structures.",
acknowledgement = ack-nhfb,
articleno = "13",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "History; persistence; point location; rollback; time
travel",
}
@Article{Hayward:2007:IAW,
author = "Ryan B. Hayward and Jeremy P. Spinrad and R.
Sritharan",
title = "Improved algorithms for weakly chordal graphs",
journal = j-TALG,
volume = "3",
number = "2",
pages = "14:1--14:??",
month = may,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1240233.1240237",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:54:42 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We use a new structural theorem on the presence of
two-pairs in weakly chordal graphs to develop improved
algorithms. For the recognition problem, we reduce the
time complexity from {$ O(m n^2) $} to {$ O(m^2) $} and
the space complexity from {$ O(n^3) $} to {$ O(m + n)
$}, and also produce a hole or antihole if the input
graph is not weakly chordal. For the optimization
problems, the complexity of the clique and coloring
problems is reduced from {$ O(m n^2) $} to {$ O(n^3) $}
and the complexity of the independent set and clique
cover problems is improved from {$ O(n^4) $} to {$ O(m
n) $}. The space complexity of our optimization
algorithms is {$ O(m + n) $}.",
acknowledgement = ack-nhfb,
articleno = "14",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "coloring; graph algorithms; Perfect graphs;
recognition; weakly chordal",
}
@Article{Kavitha:2007:SSM,
author = "Telikepalli Kavitha and Kurt Mehlhorn and Dimitrios
Michail and Katarzyna E. Paluch",
title = "Strongly stable matchings in time {$ O(n m) $} and
extension to the hospitals-residents problem",
journal = j-TALG,
volume = "3",
number = "2",
pages = "15:1--15:??",
month = may,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1240233.1240238",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:54:42 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "An instance of the stable marriage problem is an
undirected bipartite graph {$ G = (X \cup W, E) $} with
linearly ordered adjacency lists with ties allowed in
the ordering. A matching {$M$} is a set of edges, no
two of which share an endpoint. An edge {$ e = (a, b)
\in E \setminus M $} is a blocking edge for {$M$} if
{$a$} is either unmatched or strictly prefers {$b$} to
its partner in {$M$}, and {$b$} is unmatched, strictly
prefers {$a$} to its partner in {$M$}, or is
indifferent between them. A matching is strongly stable
if there is no blocking edge with respect to it. We
give an {$ O(n m) $} algorithm for computing strongly
stable matchings, where {$n$} is the number of vertices
and {$m$} the number of edges. The previous best
algorithm had running time {$ O(m^2) $}. We also study
this problem in the hospitals-residents setting, which
is a many-to-one extension of the aforementioned
problem. We give an {$ O(m \sum_{h \in H} p_h) $}
algorithm for computing a strongly stable matching in
the hospitals-residents problem, where {$ p_h $} is the
quota of a hospital {$h$}. The previous best algorithm
had running time {$ O(m^2) $}.",
acknowledgement = ack-nhfb,
articleno = "15",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Bipartite matching; level maximal; stable marriage;
strong stability",
}
@Article{Bagchi:2007:DSR,
author = "Amitabha Bagchi and Amitabh Chaudhary and David
Eppstein and Michael T. Goodrich",
title = "Deterministic sampling and range counting in geometric
data streams",
journal = j-TALG,
volume = "3",
number = "2",
pages = "16:1--16:??",
month = may,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1240233.1240239",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:54:42 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present memory-efficient deterministic algorithms
for constructing $ \epsilon $-nets and $ \epsilon
$-approximations of streams of geometric data. Unlike
probabilistic approaches, these deterministic samples
provide guaranteed bounds on their approximation
factors. We show how our deterministic samples can be
used to answer approximate online iceberg geometric
queries on data streams. We use these techniques to
approximate several robust statistics of geometric data
streams, including Tukey depth, simplicial depth,
regression depth, the Thiel-Sen estimator, and the
least median of squares. Our algorithms use only a
polylogarithmic amount of memory, provided the desired
approximation factors are at least
inverse-polylogarithmic. We also include a lower bound
for noniceberg geometric queries.",
acknowledgement = ack-nhfb,
articleno = "16",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Data streams; epsilon nets; geometric data; iceberg
queries; range counting; robust statistics; sampling;
streaming algorithms",
}
@Article{Arya:2007:SEB,
author = "Sunil Arya and Theocharis Malamatos and David M.
Mount",
title = "A simple entropy-based algorithm for planar point
location",
journal = j-TALG,
volume = "3",
number = "2",
pages = "17:1--17:17",
month = may,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1240233.1240240",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:54:42 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given a planar polygonal subdivision {$S$}, point
location involves preprocessing this subdivision into a
data structure so that given any query point {$q$}, the
cell of the subdivision containing {$q$} can be
determined efficiently. Suppose that for each cell
{$z$} in the subdivision, the probability $ p_z $ that
a query point lies within this cell is also given. The
goal is to design the data structure to minimize the
average search time. This problem has been considered
before, but existing data structures are all quite
complicated. It has long been known that the entropy
{$H$} of the probability distribution is the dominant
term in the lower bound on the average-case search
time. In this article, we show that a very simple
modification of a well-known randomized incremental
algorithm can be applied to produce a data structure of
expected linear size that can answer point-location
queries in {$ O(H) $} average time. We also present
empirical evidence for the practical efficiency of this
approach.",
acknowledgement = ack-nhfb,
articleno = "17",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "entropy; expected-case complexity; Point location;
polygonal subdivision; randomized algorithms;
trapezoidal maps",
}
@Article{Kauers:2007:ADZ,
author = "Manuel Kauers",
title = "An algorithm for deciding zero equivalence of nested
polynomially recurrent sequences",
journal = j-TALG,
volume = "3",
number = "2",
pages = "18:1--18:??",
month = may,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1240233.1240241",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:54:42 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We introduce the class of nested polynomially
recurrent sequences which includes a large number of
sequences that are of combinatorial interest. We
present an algorithm for deciding zero equivalence of
these sequences, thereby providing a new algorithm for
proving identities among combinatorial sequences: In
order to prove an identity, decide by the algorithm
whether the difference of lefthand-side and
righthand-side is identically zero. This algorithm is
able to treat mathematical objects which are not
covered by any other known symbolic method for proving
combinatorial identities. Despite its theoretical
flavor and high complexity, an implementation of the
algorithm can be successfully applied to nontrivial
examples.",
acknowledgement = ack-nhfb,
articleno = "18",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "combinatorial sequences; nested polynomially recurrent
sequences; Symbolic computation; zero equivalence",
}
@Article{Amir:2007:DTS,
author = "Amihood Amir and Gad M. Landau and Moshe Lewenstein
and Dina Sokol",
title = "Dynamic text and static pattern matching",
journal = j-TALG,
volume = "3",
number = "2",
pages = "19:1--19:??",
month = may,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1240233.1240242",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:54:42 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article, we address a new version of dynamic
pattern matching. The dynamic text and static pattern
matching problem is the problem of finding a static
pattern in a text that is continuously being updated.
The goal is to report all new occurrences of the
pattern in the text after each text update. We present
an algorithm for solving the problem where the text
update operation is changing the symbol value of a text
location. Given a text of length $n$ and a pattern of
length $m$, our algorithm preprocesses the text in time
{$ O(n \log \log m) $}, and the pattern in time {$ O(m
\log m) $}. The extra space used is {$ O(n + m \log m)
$}. Following each text update, the algorithm deletes
all prior occurrences of the pattern that no longer
match, and reports all new occurrences of the pattern
in the text in {$ O(\log \log m) $} time. We note that
the complexity is not proportional to the number of
pattern occurrences, since all new occurrences can be
reported in a succinct form.",
acknowledgement = ack-nhfb,
articleno = "19",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "border trees; Dynamic text; static pattern",
}
@Article{Ferragina:2007:CRS,
author = "Paolo Ferragina and Giovanni Manzini and Veli
M{\"a}kinen and Gonzalo Navarro",
title = "Compressed representations of sequences and full-text
indexes",
journal = j-TALG,
volume = "3",
number = "2",
pages = "20:1--20:??",
month = may,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1240233.1240243",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:54:42 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given a sequence {$ S = s_1 s_2 \ldots s_n $} of
integers smaller than {$ r = O(\polylog (n)) $}, we
show how {$S$} can be represented using {$ n H_0 (S) +
o(n) $} bits, so that we can know any {$ s_q $}, as
well as answer rank and select queries on {$S$}, in
constant time. {$ H_0 (S) $} is the zero-order
empirical entropy of {$S$} and {$ n H_0 (S) $} provides
an information-theoretic lower bound to the bit storage
of any sequence {$S$} via a fixed encoding of its
symbols. This extends previous results on binary
sequences, and improves previous results on general
sequences where those queries are answered in {$ O(\log
r) $} time. For larger {$r$}, we can still represent
{$S$} in {$ n H_0 (S) + o(n \log r) $} bits and answer
queries in {$ O(\log r / \log \log n) $}
time.\par
Another contribution of this article is to show how to
combine our compressed representation of integer
sequences with a compression boosting technique to
design compressed full-text indexes that scale well
with the size of the input alphabet {$ \Sigma $}.
Specifically, we design a variant of the FM-index that
indexes a string {$ T[1, n] $} within {$ n H_k(T) +
o(n) $} bits of storage, where {$ H_k(T) $} is the
{$k$} th-order empirical entropy of {$T$}. This space
bound holds simultaneously for all {$ k \leq \alpha
\log | \Sigma | n $}, constant {$ 0 < \alpha < 1 $},
and {$ | \Sigma | = O(\polylog (n)) $}. This index
counts the occurrences of an arbitrary pattern {$ P[1,
p] $} as a substring of {$T$} in {$ O(p) $} time; it
locates each pattern occurrence in {$ O(\log 1 +
\varepsilon n) $} time for any constant {$ 0 <
\varepsilon < 1 $}; and reports a text substring of
length {$ \ell $} in {$ O(\ell + \log 1 + \varepsilon
n) $} time.\par
Compared to all previous works, our index is the first
that removes the alphabet-size dependance from all
query times, in particular, counting time is linear in
the pattern length. Still, our index uses essentially
the same space of the {$k$} th-order entropy of the
text {$T$}, which is the best space obtained in
previous work. We can also handle larger alphabets of
size {$ | \Sigma | = O(n \beta) $}, for any {$ 0 <
\beta < 1 $}, by paying {$ o(n \log | \Sigma |) $}
extra space and multiplying all query times by {$
O(\log | \Sigma | / \log \log n) $}.",
acknowledgement = ack-nhfb,
articleno = "20",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Burrows--Wheeler transform; compression boosting;
entropy; rank and select; text compression; Text
indexing; wavelet tree",
}
@Article{Chan:2007:CID,
author = "Ho-Leung Chan and Wing-Kai Hon and Tak-Wah Lam and
Kunihiko Sadakane",
title = "Compressed indexes for dynamic text collections",
journal = j-TALG,
volume = "3",
number = "2",
pages = "21:1--21:??",
month = may,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1240233.1240244",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:54:42 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Let {$T$} be a string with {$n$} characters over an
alphabet of constant size. A recent breakthrough on
compressed indexing allows us to build an index for
{$T$} in optimal space (i.e., {$ O(n) $} bits), while
supporting very efficient pattern matching [Ferragina
and Manzini 2000; Grossi and Vitter 2000]. Yet the
compressed nature of such indexes also makes them
difficult to update dynamically.\par
This article extends the work on optimal-space indexing
to a dynamic collection of texts. Our first result is a
compressed solution to the library management problem,
where we show an index of {$ O(n) $} bits for a text
collection {$L$} of total length {$n$}, which can be
updated in {$ O(| T | \log n) $} time when a text {$T$}
is inserted or deleted from {$L$}; also, the index
supports searching the occurrences of any pattern {$P$}
in all texts in {$L$} in {$ O(|P| \log n + {\rm occ}
\log 2 n) $} time, where {\rm occ} is the number of
occurrences.\par
Our second result is a compressed solution to the
dictionary matching problem, where we show an index of
{$ O(d) $} bits for a pattern collection {$D$} of total
length {$d$}, which can be updated in {$ O(|P| \log 2
d) $} time when a pattern {$P$} is inserted or deleted
from {$D$}; also, the index supports searching the
occurrences of all patterns of {$D$} in any text {$T$}
in {$ O((|T| + {\rm occ}) \log 2 d) $} time. When
compared with the {$ O(d \log d) $}-bit
suffix-tree-based solution of Amir et al. [1995], the
compact solution increases the query time by roughly a
factor of {$ \log d $} only.\par
The solution to the dictionary matching problem is
based on a new compressed representation of a suffix
tree. Precisely, we give an {$ O(n) $}-bit
representation of a suffix tree for a dynamic
collection of texts whose total length is {$n$}, which
supports insertion and deletion of a text {$T$} in {$
O(|T| \log 2 n) $} time, as well as all suffix tree
traversal operations, including forward and backward
suffix links. This work can be regarded as a
generalization of the compressed representation of
static texts. In the study of the aforementioned
result, we also derive the first {$ O(n) $}-bit
representation for maintaining {$n$} pairs of balanced
parentheses in {$ O(\log n / \log \log n) $} time per
operation, matching the time complexity of the previous
{$ O(n \log n) $}-bit solution.",
acknowledgement = ack-nhfb,
articleno = "21",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Compressed suffix tree; string matching",
}
@Article{Boyar:2007:RWO,
author = "Joan Boyar and Lene M. Favrholdt",
title = "The relative worst order ratio for online algorithms",
journal = j-TALG,
volume = "3",
number = "2",
pages = "22:1--22:??",
month = may,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1240233.1240245",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:54:42 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We define a new measure for the quality of online
algorithms, the relative worst order ratio, using ideas
from the max/max ratio [Ben-David and Borodin 1994] and
from the random order ratio [Kenyon 1996]. The new
ratio is used to compare online algorithms directly by
taking the ratio of their performances on their
respective worst permutations of a worst-case
sequence.\par
Two variants of the bin packing problem are considered:
the classical bin packing problem, where the goal is to
fit all items in as few bins as possible, and the dual
bin packing problem, which is the problem of maximizing
the number of items packed in a fixed number of bins.
Several known algorithms are compared using this new
measure, and a new, simple variant of first-fit is
proposed for dual bin packing.\par
Many of our results are consistent with those
previously obtained with the competitive ratio or the
competitive ratio on accommodating sequences, but new
separations and easier proofs are found.",
acknowledgement = ack-nhfb,
articleno = "22",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "bin packing; dual bin packing; Online; quality
measure; relative worst order ratio",
}
@Article{Becchetti:2007:SCM,
author = "L. Becchetti and J. K{\"o}nemann and S. Leonardi and
M. P{\'a}al",
title = "Sharing the cost more efficiently: {Improved}
approximation for multicommodity rent-or-buy",
journal = j-TALG,
volume = "3",
number = "2",
pages = "23:1--23:??",
month = may,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1240233.1240246",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:54:42 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In the multicommodity rent-or-buy (MROB) network
design problems, we are given a network together with a
set of $k$ terminal pairs $ (s_1, t_1), \ldots, (s_k,
t_k) $. The goal is to provision the network so that a
given amount of flow can be shipped between $ s_i $ and
$ t_i $ for all $ 1 \leq i \leq k $ simultaneously. In
order to provision the network, one can either rent
capacity on edges at some cost per unit of flow, or buy
them at some larger fixed cost. Bought edges have no
incremental, flow-dependent cost. The overall objective
is to minimize the total provisioning
cost.\par
Recently, Gupta et al. [2003a] presented a
12-approximation for the MROB problem. Their algorithm
chooses a subset of the terminal pairs in the graph at
random and then buys the edges of an approximate
Steiner forest for these pairs. This technique had
previously been introduced [Gupta et al. 2003b] for the
single-sink rent-or-buy network design problem.\par
In this article we give a 6.828-approximation for the
MROB problem by refining the algorithm of Gupta et al.
and simplifying their analysis. The improvement in our
article is based on a more careful adaptation and
simplified analysis of the primal-dual algorithm for
the Steiner forest problem due to Agrawal et al.
[1995]. Our result significantly reduces the gap
between the single-sink and multisink case.",
acknowledgement = ack-nhfb,
articleno = "23",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithms; cost sharing; network
design; Steiner forests",
}
@Article{Johnson:2007:NCC,
author = "David S. Johnson",
title = "The {NP}-completeness column: {Finding} needles in
haystacks",
journal = j-TALG,
volume = "3",
number = "2",
pages = "24:1--24:??",
month = may,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1240233.1240247",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:54:42 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "This is the 26th edition of a column that covers new
developments in the theory of NP-completeness. The
presentation is modeled on that which M. R. Garey and I
used in our book ``Computers and Intractability: A
Guide to the Theory of NP-Completeness,'' W. H. Freeman
{\&} Co., New York, 1979, hereinafter referred to as
``[G{\&}J].'' Previous columns, the first 23 of which
appeared in J. Algorithms, will be referred to by a
combination of their sequence number and year of
appearance, e.g., ``Column 1 [1981].'' Full
bibliographic details on the previous columns, as well
as downloadable unofficial versions of them, can be
found at \path
=http://www.research.att.com/~dsj/columns/=. This
column discusses the question of whether finding an
object can be computationally difficult even when we
know that the object exists.",
acknowledgement = ack-nhfb,
articleno = "24",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "fixed point; game theory; local search; Nash
equilibrium; PLS; PPAD",
}
@Article{Feng:2007:FAS,
author = "Jianxing Feng and Daming Zhu",
title = "Faster algorithms for sorting by transpositions and
sorting by block interchanges",
journal = j-TALG,
volume = "3",
number = "3",
pages = "25:1--25:14",
month = aug,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1273340.1273341",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article, we present a new data structure,
called the permutation tree, to improve the running
time of sorting permutation by transpositions and
sorting permutation by block interchanges. The existing
1.5-approximation algorithm for sorting permutation by
transpositions has time complexity {$ O(n^{3 / 2} \sqrt
{\log n}) $}. By means of the permutation tree, we can
improve this algorithm to achieve time complexity {$
O(n \log n) $}. We can also improve the algorithm for
sorting permutation by block interchanges to take its
time complexity from {$ O(n^2) $} down to {$ O(n \log
n) $}.",
acknowledgement = ack-nhfb,
articleno = "25",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Block interchange; genome; permutation; time
complexity; transposition; tree",
}
@Article{Gupta:2007:CPD,
author = "Himanshu Gupta and Rephael Wenger",
title = "Constructing pairwise disjoint paths with few links",
journal = j-TALG,
volume = "3",
number = "3",
pages = "26:1--26:??",
month = aug,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1273340.1273342",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Let {$P$} be a simple polygon and let {$ \{ (u_1,
u{\prime }_1), (u_2, u{\prime }_2), \ldots, (u_m,
u{\prime }_m) \} $} be a set of {$m$} pairs of distinct
vertices of {$P$}, where for every distinct {$ i, j
\leq m $}, there exist pairwise disjoint
(nonintersecting) paths connecting {$ u_i $} to {$ u
\prime_i $} and $ u_j $ to $ u \prime_j $. We wish to
construct $m$ pairwise disjoint paths in the interior
of {$P$} connecting {$ u_i $} to {$ u \prime_i $} for
{$ i = 1, \ldots, m $}, with a minimal total number of
line segments. We give an approximation algorithm that
constructs such a set of paths using {$ O(M) $} line
segments in {$ O(n \log m + M \log m) $} time, where
{$M$} is the number of line segments in the optimal
solution and {$n$} is the size of the polygon.",
acknowledgement = ack-nhfb,
articleno = "26",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "isomorphic triangulations; Link paths; noncrossing;
polygon",
}
@Article{Chekuri:2007:MDF,
author = "Chandra Chekuri and Marcelo Mydlarz and F. Bruce
Shepherd",
title = "Multicommodity demand flow in a tree and packing
integer programs",
journal = j-TALG,
volume = "3",
number = "3",
pages = "27:1--27:??",
month = aug,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1273340.1273343",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider requests for capacity in a given tree
network {$ T = (V, E) $} where each edge {$e$} of the
tree has some integer capacity {$ u_e $}. Each request
{$f$} is a node pair with an integer demand $ d_f $ and
a profit $ w_f $ which is obtained if the request is
satisfied. The objective is to find a set of demands
that can be feasibly routed in the tree and which
provides a maximum profit. This generalizes well-known
problems, including the knapsack and $b$-matching
problems.\par
When all demands are 1, we have the integer
multicommodity flow problem. Garg et al. [1997] had
shown that this problem is NP-hard and gave a
2-approximation algorithm for the cardinality case (all
profits are 1) via a primal-dual algorithm. Our main
result establishes that the integrality gap of the
natural linear programming relaxation is at most 4 for
the case of arbitrary profits. Our proof is based on
coloring paths on trees and this has other applications
for wavelength assignment in optical network
routing.\par
We then consider the problem with arbitrary demands.
When the maximum demand $ d_{\rm max} $ is at most the
minimum edge capacity $ u_{\rm min} $, we show that the
integrality gap of the LP is at most 48. This result is
obtained by showing that the integrality gap for the
demand version of such a problem is at most 11.542
times that for the unit-demand case. We use techniques
of Kolliopoulos and Stein [2004, 2001] to obtain this.
We also obtain, via this method, improved algorithms
for line and ring networks. Applications and
connections to other combinatorial problems are
discussed.",
acknowledgement = ack-nhfb,
articleno = "27",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "approximation algorithm; Integer multicommodity flow;
integrality gap; packing integer program; tree",
}
@Article{Bar-Noy:2007:WSR,
author = "Amotz Bar-Noy and Richard E. Ladner and Tami Tamir",
title = "Windows scheduling as a restricted version of bin
packing",
journal = j-TALG,
volume = "3",
number = "3",
pages = "28:1--28:??",
month = aug,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1273340.1273344",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given a sequence of $n$ positive integers $ w_1, w_2,
\ldots, w_n $ that are associated with the items $ 1,
2, \ldots n $, respectively. In the windows scheduling
problem, the goal is to schedule all the items
(equal-length information pages) on broadcasting
channels such that the gap between two consecutive
appearances of page $i$ on any of the channels is at
most $ w_i $ slots (a slot is the transmission time of
one page). In the unit-fractions bin packing problem,
the goal is to pack all the items in bins of unit size
where the size (width) of item $i$ is $ 1 / w_i $. The
optimization objective is to minimize the number of
channels or bins. In the offline setting, the sequence
is known in advance, whereas in the online setting, the
items arrive in order and assignment decisions are
irrevocable. Since a page requires at least $ 1 / w_i $
of a channel's bandwidth, it follows that windows
scheduling without migration (i.e., all broadcasts of a
page must be from the same channel) is a restricted
version of unit-fractions bin packing.\par
Let {$ H = \lceil \sum_{i = 1}^n (1 / w_i) $} be the
bandwidth lower bound on the required number of bins
(channels). The best-known offline algorithm for the
windows scheduling problem used {$ H + O(\ln H) $}
channels. This article presents an offline algorithm
for the unit-fractions bin packing problem with at most
{$ H + 1 $} bins. In the online setting, this article
presents algorithms for both problems with {$ H +
O(\sqrt {H}) $} channels or bins, where the one for the
unit-fractions bin packing problem is simpler. On the
other hand, this article shows that already for the
unit-fractions bin packing problem, any online
algorithm must use at least {$ H + \Omega (\ln H) $}
bins. For instances in which the window sizes form a
divisible sequence, an optimal online algorithm is
presented. Finally, this article includes a new
NP-hardness proof for the windows scheduling problem.",
acknowledgement = ack-nhfb,
articleno = "28",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "approximation algorithms; bin-packing; online
algorithms; Periodic scheduling",
}
@Article{Hazay:2007:APM,
author = "Carmit Hazay and Moshe Lewenstein and Dina Sokol",
title = "Approximate parameterized matching",
journal = j-TALG,
volume = "3",
number = "3",
pages = "29:1--29:??",
month = aug,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1273340.1273345",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Two equal length strings $s$ and $ s \prime $, over
alphabets {$ \Sigma s $} and {$ \Sigma s \prime $},
parameterize match if there exists a bijection {$ \pi :
\Sigma s \rightarrow \Sigma s \prime $} such that {$
\pi (s) = s \prime $}, where {$ \pi (s) $} is the
renaming of each character of {$s$} via $ \pi $.
Parameterized matching is the problem of finding all
parameterized matches of a pattern string $p$ in a text
$t$, and approximate parameterized matching is the
problem of finding at each location a bijection $ \pi $
that maximizes the number of characters that are mapped
from $p$ to the appropriate $ |p| $-length substring of
$t$.\par
Parameterized matching was introduced as a model for
software duplication detection in software maintenance
systems and also has applications in image processing
and computational biology. For example, approximate
parameterized matching models image searching with
variable color maps in the presence of errors.\par
We consider the problem for which an error threshold,
$k$, is given, and the goal is to find all locations in
$t$ for which there exists a bijection $ \pi $ which
maps $p$ into the appropriate $ |p| $-length substring
of $t$ with at most $k$ mismatched mapped elements. Our
main result is an algorithm for this problem with {$
O(n k^{1.5} + m k \log m) $} time complexity, where {$
m = | p | $} and {$ n = | t | $}. We also show that
when {$ | p | = | t | = m $}, the problem is equivalent
to the maximum matching problem on graphs, yielding a
{$ O(m + k^{1.5}) $} solution.",
acknowledgement = ack-nhfb,
articleno = "29",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Hamming distance; maximum matching; mismatch pair;
parameterize match",
}
@Article{Halldorsson:2007:IAR,
author = "Magn{\'u}s M. Halld{\'o}rsson and Kazuo Iwama and
Shuichi Miyazaki and Hiroki Yanagisawa",
title = "Improved approximation results for the stable marriage
problem",
journal = j-TALG,
volume = "3",
number = "3",
pages = "30:1--30:??",
month = aug,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1273340.1273346",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The stable marriage problem has recently been studied
in its general setting, where both ties and incomplete
lists are allowed. It is NP-hard to find a stable
matching of maximum size, while any stable matching is
a maximal matching and thus trivially we can obtain a
2-approximation algorithm.\par
In this article, we give the first nontrivial result
for approximation of factor less than two. Our
algorithm achieves an approximation ratio of {$ 2 / (1
+ L - 2) $} for instances in which only men have ties
of length at most {$L$}. When both men and women are
allowed to have ties but the lengths are limited to
two, then we show a ratio of {$ 13 / 7 ( < 1.858) $}.
We also improve the lower bound on the approximation
ratio to {$ 21 / 19 ( > 1.1052) $}.",
acknowledgement = ack-nhfb,
articleno = "30",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithms; incomplete lists; stable
marriage problem; ties",
}
@Article{Indyk:2007:NNP,
author = "Piotr Indyk and Assaf Naor",
title = "Nearest-neighbor-preserving embeddings",
journal = j-TALG,
volume = "3",
number = "3",
pages = "31:1--31:??",
month = aug,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1273340.1273347",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article we introduce the notion of
nearest-neighbor-preserving embeddings. These are
randomized embeddings between two metric spaces which
preserve the (approximate) nearest-neighbors. We give
two examples of such embeddings for Euclidean metrics
with low ``intrinsic'' dimension. Combining the
embeddings with known data structures yields the
best-known approximate nearest-neighbor data structures
for such metrics.",
acknowledgement = ack-nhfb,
articleno = "31",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "dimensionality reduction; doubling spaces; embeddings;
Nearest neighbor",
}
@Article{Even-Dar:2007:CTN,
author = "Eyal Even-Dar and Alex Kesselman and Yishay Mansour",
title = "Convergence time to {Nash} equilibrium in load
balancing",
journal = j-TALG,
volume = "3",
number = "3",
pages = "32:1--32:??",
month = aug,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1273340.1273348",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study the number of steps required to reach a pure
Nash equilibrium in a load balancing scenario where
each job behaves selfishly and attempts to migrate to a
machine which will minimize its cost. We consider a
variety of load balancing models, including identical,
restricted, related, and unrelated machines. Our
results have a crucial dependence on the weights
assigned to jobs. We consider arbitrary weights,
integer weights, $k$ distinct weights, and identical
(unit) weights. We look both at an arbitrary schedule
(where the only restriction is that a job migrates to a
machine which lowers its cost) and specific efficient
schedulers (e.g., allowing the largest weight job to
move first). A by-product of our results is
establishing a connection between various scheduling
models and the game-theoretic notion of potential
games. We show that load balancing in unrelated
machines is a generalized ordinal potential game, load
balancing in related machines is a weighted potential
game, and load balancing in related machines and unit
weight jobs is an exact potential game.",
acknowledgement = ack-nhfb,
articleno = "32",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "convergence time; game theory; Nash equilibrium",
}
@Article{Andrews:2007:RSM,
author = "Matthew Andrews and Lisa Zhang",
title = "Routing and scheduling in multihop wireless networks
with time-varying channels",
journal = j-TALG,
volume = "3",
number = "3",
pages = "33:1--33:??",
month = aug,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1273340.1273349",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study routing and scheduling in multihop wireless
networks. When data is transmitted from its source node
to its destination node it may go through other
wireless nodes as intermediate hops. The data
transmission is node constrained, that is, every node
can transmit data to at most one neighboring node per
time step. The transmission rates are time varying as a
result of changing wireless channel conditions.\par
In this article, we assume that data arrivals and
transmission rates are governed by an adversary. The
power of the adversary is limited by an admissibility
condition which forbids the adversary from overloading
any wireless node a priori. The node-constrained
transmission and time-varying nature of the
transmission rates make our model different from and
harder than the standard adversarial queueing model
which relates to wireline networks.\par
For the case in which the adversary specifies the paths
that the data must follow, we design scheduling
algorithms that ensure network stability. These
algorithms try to give priority to the data that is
closest to its source node. However, at each time step
only a subset of the data queued at a node is eligible
for scheduling. One of our algorithms is fully
distributed.\par
For the case in which the adversary does not dictate
the data paths, we show how to route data so that the
admissibility condition is satisfied. We can then
schedule data along the chosen paths using our stable
scheduling algorithms.",
acknowledgement = ack-nhfb,
articleno = "33",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "routing; Scheduling; stability; time-varying; wireless
network",
}
@Article{Naor:2007:NAP,
author = "Moni Naor and Udi Wieder",
title = "Novel architectures for {P2P} applications: {The}
continuous-discrete approach",
journal = j-TALG,
volume = "3",
number = "3",
pages = "34:1--34:??",
month = aug,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1273340.1273350",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We propose a new approach for constructing P2P
networks based on a dynamic decomposition of a
continuous space into cells corresponding to servers.
We demonstrate the power of this approach by suggesting
two new P2P architectures and various algorithms for
them. The first serves as a DHT (distributed hash
table) and the other is a dynamic expander network. The
DHT network, which we call Distance Halving, allows
logarithmic routing and load while preserving constant
degrees. It offers an optimal tradeoff between degree
and path length in the sense that degree $d$ guarantees
a path length of {$ O(\log d n) $}. Another advantage
over previous constructions is its relative simplicity.
A major new contribution of this construction is a
dynamic caching technique that maintains low load and
storage, even under the occurrence of hot spots. Our
second construction builds a network that is guaranteed
to be an expander. The resulting topologies are simple
to maintain and implement. Their simplicity makes it
easy to modify and add protocols. A small variation
yields a DHT which is robust against random Byzantine
faults. Finally we show that, using our approach, it is
possible to construct any family of constant degree
graphs in a dynamic environment, though with worse
parameters. Therefore, we expect that more distributed
data structures could be designed and implemented in a
dynamic environment.",
acknowledgement = ack-nhfb,
articleno = "34",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Peer-to-peer networks; routing",
}
@Article{Khuller:2007:PC,
author = "Samir Khuller",
title = "Problems column",
journal = j-TALG,
volume = "3",
number = "3",
pages = "35:1--35:??",
month = aug,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1273340.1273351",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "35",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Gabow:2007:ISS,
author = "H. N. Gabow and Michael A. Bender and Martin
Farach-Colton",
title = "Introduction to {SODA} 2002 and 2003 special issue",
journal = j-TALG,
volume = "3",
number = "4",
pages = "36:1--36:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290673",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "36",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Aspnes:2007:SG,
author = "James Aspnes and Gauri Shah",
title = "Skip graphs",
journal = j-TALG,
volume = "3",
number = "4",
pages = "37:1--37:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290674",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Skip graphs are a novel distributed data structure,
based on skip lists, that provide the full
functionality of a balanced tree in a distributed
system where resources are stored in separate nodes
that may fail at any time. They are designed for use in
searching peer-to-peer systems, and by providing the
ability to perform queries based on key ordering, they
improve on existing search tools that provide only hash
table functionality. Unlike skip lists or other tree
data structures, skip graphs are highly resilient,
tolerating a large fraction of failed nodes without
losing connectivity. In addition, simple and
straightforward algorithms can be used to construct a
skip graph, insert new nodes into it, search it, and
detect and repair errors within it introduced due to
node failures.",
acknowledgement = ack-nhfb,
articleno = "37",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "overlay networks; Peer-to-peer; skip lists",
}
@Article{Han:2007:OPS,
author = "Yijie Han",
title = "Optimal parallel selection",
journal = j-TALG,
volume = "3",
number = "4",
pages = "38:1--38:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290675",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present an optimal parallel selection algorithm on
the EREW PRAM. This algorithm runs in {$ O(\log n) $}
time with {$ n / \log n $} processors. This complexity
matches the known lower bound for parallel selection on
the EREW PRAM model. We therefore close this problem
which has been open for more than a decade.",
acknowledgement = ack-nhfb,
articleno = "38",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "EREW PRAM; Parallel algorithms; selection",
}
@Article{Bansal:2007:MWF,
author = "Nikhil Bansal and Kedar Dhamdhere",
title = "Minimizing weighted flow time",
journal = j-TALG,
volume = "3",
number = "4",
pages = "39:1--39:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290676",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the problem of minimizing the total
weighted flow time on a single machine with
preemptions. We give an online algorithm that is {$
O(k) $}-competitive for {$k$} weight classes. This
implies an {$ O(\log W) $}-competitive algorithm, where
{$W$} is the maximum to minimum ratio of weights. This
algorithm also implies an {$ O(\log n + \log P)
$}-approximation ratio for the problem, where {$P$} is
the ratio of the maximum to minimum job size and {$n$}
is the number of jobs. We also consider the
nonclairvoyant setting where the size of a job is
unknown upon its arrival and becomes known to the
scheduler only when the job meets its service
requirement. We consider the resource augmentation
model, and give a {$ (1 + \varepsilon) $}-speed, {$ (1
+ 1 / \varepsilon) $}-competitive online algorithm.",
acknowledgement = ack-nhfb,
articleno = "39",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "nonclairvoyant scheduling; online algorithms; response
time; Scheduling",
}
@Article{Fakcharoenphol:2007:TRP,
author = "Jittat Fakcharoenphol and Chris Harrelson and Satish
Rao",
title = "The $k$-traveling repairmen problem",
journal = j-TALG,
volume = "3",
number = "4",
pages = "40:1--40:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290677",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the $k$-traveling repairmen problem, also
known as the minimum latency problem, to multiple
repairmen. We give a polynomial-time $ 8.497 \alpha
$-approximation algorithm for this generalization,
where $ \alpha $ denotes the best achievable
approximation factor for the problem of finding the
least-cost rooted tree spanning $i$ vertices of a
metric. For the latter problem, a $ (2 + \varepsilon)
$-approximation is known. Our results can be compared
with the best-known approximation algorithm using
similar techniques for the case $ k = 1 $, which is $
3.59 \alpha $. Moreover, recent work of Chaudry et al.
[2003] shows how to remove the factor of $ \alpha $,
thus improving all of these results by that factor. We
are aware of no previous work on the approximability of
the present problem. In addition, we give a simple
proof of the $ 3.59 \alpha $-approximation result that
can be more easily extended to the case of multiple
repairmen, and may be of independent interest.",
acknowledgement = ack-nhfb,
articleno = "40",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Traveling salesman; vehicle routing",
}
@Article{Irani:2007:APS,
author = "Sandy Irani and Sandeep Shukla and Rajesh Gupta",
title = "Algorithms for power savings",
journal = j-TALG,
volume = "3",
number = "4",
pages = "41:1--41:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290678",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "This article examines two different mechanisms for
saving power in battery-operated embedded systems. The
first strategy is that the system can be placed in a
sleep state if it is idle. However, a fixed amount of
energy is required to bring the system back into an
active state in which it can resume work. The second
way in which power savings can be achieved is by
varying the speed at which jobs are run. We utilize a
power consumption curve {$ P(s) $} which indicates the
power consumption level given a particular speed. We
assume that {$ P(s) $} is convex, nondecreasing, and
nonnegative for {$ s \geq 0 $}. The problem is to
schedule arriving jobs in a way that minimizes total
energy use and so that each job is completed after its
release time and before its deadline. We assume that
all jobs can be preempted and resumed at no cost.
Although each problem has been considered separately,
this is the first theoretical analysis of systems that
can use both mechanisms. We give an offline algorithm
that is within a factor of 2 of the optimal algorithm.
We also give an online algorithm with a constant
competitive ratio.",
acknowledgement = ack-nhfb,
articleno = "41",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "dynamic speed scaling; online algorithms; Power
savings",
}
@Article{Alon:2007:GSE,
author = "Noga Alon and Venkatesan Guruswami and Tali Kaufman
and Madhu Sudan",
title = "Guessing secrets efficiently via list decoding",
journal = j-TALG,
volume = "3",
number = "4",
pages = "42:1--42:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290679",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the guessing secrets problem defined by
Chung et al. [2001]. This is a variant of the standard
20 questions game where the player has a set of $ k > 1
$ secrets from a universe of {$N$} possible secrets.
The player is asked Boolean questions about the secret.
For each question, the player picks one of the {$k$}
secrets adversarially, and answers according to this
secret.\par
We present an explicit set of {$ O(\log N) $} questions
together with an efficient (i.e., {$ {\rm poly}(\log N)
$} time) algorithm to solve the guessing secrets
problem for the case of 2 secrets. This answers the
main algorithmic question left unanswered by Chung et
al. [2001]. The main techniques we use are small {$
\epsilon $}-biased spaces and the notion of list
decoding.\par
We also establish bounds on the number of questions
needed to solve the {$k$}-secrets game for {$ k > 2 $},
and discuss how list decoding can be used to get
partial information about the secrets, specifically to
find a small core of secrets that must intersect the
actual set of $k$ secrets.",
acknowledgement = ack-nhfb,
articleno = "42",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "$\epsilon$-biased spaces; $k$-universal sets; 20
questions; decoding algorithms; error-correcting
codes",
}
@Article{Raman:2007:SID,
author = "Rajeev Raman and Venkatesh Raman and Srinivasa Rao
Satti",
title = "Succinct indexable dictionaries with applications to
encoding $k$-ary trees, prefix sums and multisets",
journal = j-TALG,
volume = "3",
number = "4",
pages = "43:1--43:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290680",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the indexable dictionary problem, which
consists of storing a set {$ S \subseteq \{ 0, \ldots,
m - 1 \} $} for some integer {$m$} while supporting the
operations of {$ \rank (x) $}, which returns the number
of elements in {$S$} that are less than {$x$} if {$ x
\in S $}, and {$ - 1 $} otherwise; and {$ \select (i)
$}, which returns the {$i$} th smallest element in
{$S$}. We give a data structure that supports both
operations in {$ O(1) $} time on the RAM model and
requires {$ B(n, m) + o(n) + O(\lg \lg m) $} bits to
store a set of size {$n$}, where {$ B(n, m) = \lfloor
\lg (m / n) \rfloor $} is the minimum number of bits
required to store any {$n$}-element subset from a
universe of size {$m$}. Previous dictionaries taking
this space only supported (yes/no) membership queries
in {$ O (1) $} time. In the cell probe model we can
remove the {$ O (\lg \lg m) $} additive term in the
space bound, answering a question raised by Fich and
Miltersen [1995] and Pagh [2001].\par
We present extensions and applications of our indexable
dictionary data structure, including:\par
--- an information-theoretically optimal representation
of a {$k$}-ary cardinal tree that supports standard
operations in constant time;\par
--- a representation of a multiset of size {$n$} from
{$ \{ 0, \ldots, m - 1 \} $} in {$ B(n, m + n) + o(n)
$} bits that supports (appropriate generalizations of)
rank and select operations in constant time; and {$ +
O(\lg \lg m) $}\par
--- a representation of a sequence of {$n$} nonnegative
integers summing up to {$m$} in {$ B(n, m + n) + o(n)
$} bits that supports prefix sum queries in constant
time.",
acknowledgement = ack-nhfb,
articleno = "43",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Dictionaries; multisets; perfect hashing; prefix sums;
sets; succinct data structures; tries",
}
@Article{Janson:2007:PFS,
author = "Svante Janson and Wojciech Szpankowski",
title = "Partial fillup and search time in {LC} tries",
journal = j-TALG,
volume = "3",
number = "4",
pages = "44:1--44:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290681",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Andersson and Nilsson introduced in 1993 a
level-compressed trie (for short, LC trie) in which a
full subtree of a node is compressed to a single node
of degree being the size of the subtree. Recent
experimental results indicated a ``dramatic
improvement'' when full subtrees are replaced by
``partially filled subtrees.'' In this article, we
provide a theoretical justification of these
experimental results, showing, among others, a rather
moderate improvement in search time over the original
LC tries. For such an analysis, we assume that $n$
strings are generated independently by a binary
memoryless source, with $p$ denoting the probability of
emitting a ``1'' (and $ q = 1 - p $ ). We first prove
that the so-called {$ \alpha $}-fillup level {$ F_n
(\alpha) $} (i.e., the largest level in a trie with {$
\alpha $} fraction of nodes present at this level) is
concentrated on two values with high probability:
either {$ F_n(\alpha) = k_n $} or {$ F_n ({\alpha }) =
k_n + 1 $}, where {$ k_n = \log 1 / \sqrt {pq} n - |l
n(p / q)| / 2 l n 3 / 2 (1 \sqrt {pq}) {\Phi } - 1
(\alpha) \sqrt {\ln n} + O(1) $} is an integer and {$
\Phi (x) $} denotes the normal distribution function.
This result directly yields the typical depth (search
time) {$ D_n (\alpha) $} in the {$ \alpha $}-LC tries,
namely, we show that with high probability {$
D_n(\alpha) \sim C_2 \log \log n $}, where {$ C_2 = 1 /
| \log (1 - h / \log (1 / \sqrt {pq}))| $} for {$ p
\neq q $} and {$ h = - p \log p - q \log q $} is the
Shannon entropy rate. This should be compared with
recently found typical depth in the original LC tries,
which is {$ C_1 \log \log n $}, where {$ C_1 = 1 / |
\log (1 - h) / \log (1 / \min \{ p, 1 - p \})| $}. In
conclusion, we observe that {$ \alpha $} affects only
the lower term of the {$ \alpha $}-fillup level {$
F_n(\alpha) $}, and the search time in {$ \alpha $}-LC
tries is of the same order as in the original LC
tries.",
acknowledgement = ack-nhfb,
articleno = "44",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Digital trees; level-compressed tries; partial fillup;
Poissonization; probabilistic analysis; strings;
trees",
}
@Article{Hershberger:2007:FSS,
author = "John Hershberger and Matthew Maxel and Subhash Suri",
title = "Finding the $k$ shortest simple paths: a new algorithm
and its implementation",
journal = j-TALG,
volume = "3",
number = "4",
pages = "45:1--45:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290682",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We describe a new algorithm to enumerate the $k$
shortest simple (loopless) paths in a directed graph
and report on its implementation. Our algorithm is
based on a replacement paths algorithm proposed by
Hershberger and Suri [2001], and can yield a factor {$
\Theta (n) $} improvement for this problem. But there
is a caveat: The fast replacement paths subroutine is
known to fail for some directed graphs. However, the
failure is easily detected, and so our {$k$} shortest
paths algorithm optimistically uses the fast
subroutine, then switches to a slower but correct
algorithm if a failure is detected. Thus, the algorithm
achieves its {$ \Theta (n) $} speed advantage only when
the optimism is justified. Our empirical results show
that the replacement paths failure is a rare
phenomenon, and the new algorithm outperforms the
current best algorithms; the improvement can be
substantial in large graphs. For instance, on GIS map
data with about 5,000 nodes and 12,000 edges, our
algorithm is 4--8 times faster. In synthetic graphs
modeling wireless ad hoc networks, our algorithm is
about 20 times faster.",
acknowledgement = ack-nhfb,
articleno = "45",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "directed paths; Loop-free paths; path equivalence
class; replacement paths",
}
@Article{Chekuri:2007:EDP,
author = "Chandra Chekuri and Sanjeev Khanna",
title = "Edge-disjoint paths revisited",
journal = j-TALG,
volume = "3",
number = "4",
pages = "46:1--46:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290683",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The approximability of the maximum edge-disjoint paths
problem (EDP) in directed graphs was seemingly settled
by an {$ \Omega (m^{1 / 2} - \epsilon) $}-hardness
result of Guruswami et al. [2003], and an {$ O(\sqrt
{m}) $} approximation achievable via a natural
multicommodity-flow-based LP relaxation as well as a
greedy algorithm. Here {$m$} is the number of edges in
the graph. We observe that the {$ \Omega (m^{1 / 2} -
{\epsilon }) $}-hardness of approximation applies to
sparse graphs, and hence when expressed as a function
of {$n$}, that is, the number of vertices, only an {$
\Omega (n^{1 / 2} - \epsilon) $}-hardness follows. On
the other hand, {$ O(\sqrt {m}) $}-approximation
algorithms do not guarantee a sublinear (in terms of
{$n$} ) approximation algorithm for dense graphs. We
note that a similar gap exists in the known results on
the integrality gap of the flow-based LP relaxation: an
{$ \Omega (\sqrt {n}) $} lower bound and {$ O(\sqrt
{m}) $} upper bound. Motivated by this discrepancy in
the upper and lower bounds, we study algorithms for EDP
in directed and undirected graphs and obtain improved
approximation ratios. We show that the greedy algorithm
has an approximation ratio of {$ O(\min (n^{2 / 3},
\sqrt {m})) $} in undirected graphs and a ratio of {$
O(\min (n^{4 / 5}, \sqrt {m})) $} in directed graphs.
For acyclic graphs we give an {$ O(\sqrt {n} \ln n) $}
approximation via LP rounding. These are the first
sublinear approximation ratios for EDP. The results
also extend to EDP with weights and to the
uniform-capacity unsplittable flow problem (UCUFP).",
acknowledgement = ack-nhfb,
articleno = "46",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "approximation algorithm; Edge-disjoint paths; greedy
algorithm; multicommodity flow relaxation",
}
@Article{Cheriyan:2007:PED,
author = "Joseph Cheriyan and Mohammad R. Salavatipour",
title = "Packing element-disjoint {Steiner} trees",
journal = j-TALG,
volume = "3",
number = "4",
pages = "47:1--47:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290684",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given an undirected graph {$ G(V, E) $} with terminal
set {$ T \subseteq V $}, the problem of packing
element-disjoint Steiner trees is to find the maximum
number of Steiner trees that are disjoint on the
nonterminal nodes and on the edges. The problem is
known to be NP-hard to approximate within a factor of
{$ \Omega (\log n) $}, where {$n$} denotes {$ |V| $}.
We present a randomized {$ O(\log n) $}-approximation
algorithm for this problem, thus matching the hardness
lower bound. Moreover, we show a tight upper bound of
{$ O(\log n) $} on the integrality ratio of a natural
linear programming relaxation.",
acknowledgement = ack-nhfb,
articleno = "47",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "approximation algorithms; element-disjoint; hardness
of approximation; Packing; Steiner trees",
}
@Article{Krivelevich:2007:AAH,
author = "Michael Krivelevich and Zeev Nutov and Mohammad R.
Salavatipour and Jacques Verstraete Yuster and Raphael
Yuster",
title = "Approximation algorithms and hardness results for
cycle packing problems",
journal = j-TALG,
volume = "3",
number = "4",
pages = "48:1--48:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290685",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The cycle packing number {$ \nu e(G) $} of a graph
{$G$} is the maximum number of pairwise edge-disjoint
cycles in {$G$}. Computing {$ \nu e(G) $} is an NP-hard
problem. We present approximation algorithms for
computing {$ \nu e (G) $} in both undirected and
directed graphs. In the undirected case we analyze a
variant of the modified greedy algorithm suggested by
Caprara et al. [2003] and show that it has
approximation ratio {$ \Theta (\sqrt {\log n}) $},
where {$ n = |V(G)| $}. This improves upon the previous
{$ O(\log n) $} upper bound for the approximation ratio
of this algorithm. In the directed case we present a {$
\sqrt {n} $}-approximation algorithm. Finally, we give
an {$ O(n^{2 / 3}) $}-approximation algorithm for the
problem of finding a maximum number of edge-disjoint
cycles that intersect a specified subset {$S$} of
vertices. We also study generalizations of these
problems. Our approximation ratios are the currently
best-known ones and, in addition, provide upper bounds
on the integrality gap of standard LP-relaxations of
these problems. In addition, we give lower bounds for
the integrality gap and approximability of {$ \nu e(G)
$} in directed graphs. Specifically, we prove a lower
bound of {$ \Omega (\log n / \log \log n) $} for the
integrality gap of edge-disjoint cycle packing. We also
show that it is quasi-NP-hard to approximate {$ \nu
e(G) $} within a factor of {$ O(\log 1 - \varepsilon n)
$} for any constant {$ \varepsilon > 0 $}. This
improves upon the previously known APX-hardness result
for this problem.",
acknowledgement = ack-nhfb,
articleno = "48",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "approximation algorithms; Cycle packing;
edge-disjoint; hardness of approximation; integrality
gap",
}
@Article{Albers:2007:EEA,
author = "Susanne Albers and Hiroshi Fujiwara",
title = "Energy-efficient algorithms for flow time
minimization",
journal = j-TALG,
volume = "3",
number = "4",
pages = "49:1--49:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290686",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study scheduling problems in battery-operated
computing devices, aiming at schedules with low total
energy consumption. While most of the previous work has
focused on finding feasible schedules in deadline-based
settings, in this article we are interested in
schedules that guarantee good response times. More
specifically, our goal is to schedule a sequence of
jobs on a variable-speed processor so as to minimize
the total cost consisting of the energy consumption and
the total flow time of all jobs.\par
We first show that when the amount of work, for any
job, may take an arbitrary value, then no online
algorithm can achieve a constant competitive ratio.
Therefore, most of the article is concerned with
unit-size jobs. We devise a deterministic constant
competitive online algorithm and show that the offline
problem can be solved in polynomial time.",
acknowledgement = ack-nhfb,
articleno = "49",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "competitive analysis; dynamic programming; flow time;
offline algorithms; online algorithms; Variable-speed
processor",
}
@Article{Chrobak:2007:IOA,
author = "Marek Chrobak and Wojciech Jawor and Ji{\v{r}}{\'\i}
Sgall and Tom{\'a}{\v{s}} Tich{\'y}",
title = "Improved online algorithms for buffer management in
{QoS} switches",
journal = j-TALG,
volume = "3",
number = "4",
pages = "50:1--50:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290687",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the following buffer management problem
arising in QoS networks: Packets with specified weights
and deadlines arrive at a network switch and need to be
forwarded so that the total weight of forwarded packets
is maximized. Packets not forwarded before their
deadlines are lost. The main result of the article is
an online $ 64 / 33 \approx 1.939 $-competitive
algorithm, the first deterministic algorithm for this
problem with competitive ratio below 2. For the
2-uniform case we give an algorithm with ratio $
\approx 1.377 $ and a matching lower bound.",
acknowledgement = ack-nhfb,
articleno = "50",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Online algorithms; scheduling",
}
@Article{Hajiaghayi:2007:ORN,
author = "Mohammad Taghi Hajiaghayi and Robert D. Kleinberg and
Harald R{\"a}cke and Tom Leighton",
title = "Oblivious routing on node-capacitated and directed
graphs",
journal = j-TALG,
volume = "3",
number = "4",
pages = "51:1--51:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290688",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Oblivious routing algorithms for general undirected
networks were introduced by R{\"a}cke [2002], and this
work has led to many subsequent improvements and
applications. Comparatively little is known about
oblivious routing in general directed networks, or even
in undirected networks with node capacities.\par
We present the first nontrivial upper bounds for both
these cases, providing algorithms for $k$-commodity
oblivious routing problems with competitive ratio {$
O(\sqrt {k \log (n)}) $} for undirected
node-capacitated graphs and {$ O(\sqrt {k_n} 1 / 4 \log
(n)) $} for directed graphs. In the special case that
all commodities have a common source or sink, our upper
bound becomes {$ O(\sqrt {n} \log (n)) $} in both
cases, matching the lower bound up to a factor of {$
\log (n) $}. The lower bound (which first appeared in
Azar et al. [2003]) is obtained on a graph with very
high degree. We show that, in fact, the degree of a
graph is a crucial parameter for node-capacitated
oblivious routing in undirected graphs, by providing an
{$ O(\Delta \polylog (n)) $}-competitive oblivious
routing scheme for graphs of degree {$ \Delta $}. For
the directed case, however, we show that the lower
bound of {$ \Omega (\sqrt {n}) $} still holds in
low-degree graphs.\par
Finally, we settle an open question about routing
problems in which all commodities share a common source
or sink. We show that even in this simplified scenario
there are networks in which no oblivious routing
algorithm can achieve a competitive ratio better than
{$ \Omega (\log n) $}.",
acknowledgement = ack-nhfb,
articleno = "51",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "communication networks; directed graphs;
node-capacitated graphs; Oblivious routing",
}
@Article{Auletta:2007:RSU,
author = "Vincenzo Auletta and Roberto {De Prisco} and Paolo
Penna and Giuseppe Persiano",
title = "Routing selfish unsplittable traffic",
journal = j-TALG,
volume = "3",
number = "4",
pages = "52:1--52:??",
month = nov,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1290672.1290689",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:55:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider general resource assignment games
involving selfish users/agents in which users compete
for resources and try to be assigned to those which
maximize their own benefits (e.g., try to route their
traffic through links which minimize the latency of
their own traffic). We propose and study a mechanism
design approach in which an allocation mechanism
assigns users to resources and charges the users for
using the resources so as to induce each user to
truthfully report a private piece of information he/she
holds (e.g., how much traffic he/she needs to
transmit). This information is crucial for computing
optimal (or close to optimal) allocations and an agent
could misreport his/her information to induce the
underlying allocation algorithm to output a solution
which he/she likes more (e.g., which assigns better
resources to him/her).\par
For our resource allocation problems, we give an
algorithmic characterization of the solutions for which
truth-telling is a Nash equilibrium. A natural
application of these results is to a scheduling/routing
problem which is the mechanism design counterpart of
the selfish routing game of Koutsoupias and
Papadimitriou [1999]: Each selfish user wants to route
a piece of unsplittable traffic using one of $m$ links
of different speeds so as to minimize his/her own
latency. Our mechanism design counterpart can be seen
as the problem of scheduling selfish jobs on parallel
related machines and is the dual of the problem of
scheduling (unselfish) jobs on parallel selfish
machines studied by Archer and Tardos
[2001].\par
Koutsoupias and Papadimitriou studied an ``anarchic''
scenario in which each user chooses his/her own link,
and this may produce Nash equilibria of cost {$ \Omega
(\log m / \log \log m) $} times the optimum. Our
mechanism design counterpart is a possible way of
reducing the effect of selfish behavior via suitable
incentives to the agents (i.e., taxes for using the
links). We indeed show that in the resulting game, it
is possible to guarantee an approximation factor of 8
for any number of links/machines (this solution also
works for online settings). However, it remains
impossible to guarantee arbitrarily good approximate
solutions, even for 2 links/machines and even if the
allocation algorithm is allowed superpolynomial time.
This result shows that our scheduling problem with
selfish jobs is more difficult than the scheduling
problem with selfish machines by Archer and Tardos
(which admits exact solutions).\par
We also study some generalizations of this basic
problem.",
acknowledgement = ack-nhfb,
articleno = "52",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Algorithmic mechanism design; Nash equilibrium;
scheduling; selfish routing",
}
@Article{Ruzic:2008:UDD,
author = "Milan Ru{\v{z}}i{\'c}",
title = "Uniform deterministic dictionaries",
journal = j-TALG,
volume = "4",
number = "1",
pages = "1:1--1:??",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328911.1328912",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present a new analysis of the well-known family of
multiplicative hash functions, and improved
deterministic algorithms for selecting ``good'' hash
functions. The main motivation is realization of
deterministic dictionaries with fast lookups and
reasonably fast updates. The model of computation is
the Word RAM, and it is assumed that the machine
word-size matches the size of keys in bits. Many of the
modern solutions to the dictionary problem are weakly
nonuniform, that is, they require a number of constants
to be computed at ``compile time'' for the stated time
bounds to hold. The currently fastest deterministic
dictionary uses constants not known to be computable in
polynomial time. In contrast, our dictionaries do not
require any special constants or instructions, and
running times are independent of word (and key) length.
Our family of dynamic dictionaries achieves a
performance of the following type: lookups in time {$
O(t) $} and updates in amortized time {$ O(n^{1 / t})
$}, for an appropriate parameter function {$t$}. Update
procedures require division, whereas searching uses
multiplication only.",
acknowledgement = ack-nhfb,
articleno = "1",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Deterministic algorithms; perfect hashing",
}
@Article{Franceschini:2008:NSB,
author = "Gianni Franceschini and Roberto Grossi",
title = "No sorting? better searching!",
journal = j-TALG,
volume = "4",
number = "1",
pages = "2:1--2:??",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328911.1328913",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Questions about order versus disorder in systems and
models have been fascinating scientists over the years.
In computer science, order is intimately related to
sorting, commonly meant as the task of arranging keys
in increasing or decreasing order with respect to an
underlying total order relation. The sorted
organization is amenable for searching a set of $n$
keys, since each search requires {$ \Theta (\log n) $}
comparisons in the worst case, which is optimal if the
cost of a single comparison can be considered a
constant. Nevertheless, we prove that disorder
implicitly provides more information than order does.
For the general case of searching an array of
multidimensional keys whose comparison cost is
proportional to their length (and hence which cannot be
considered a constant), we demonstrate that
``suitable'' disorder gives better bounds than those
derivable by using the natural lexicographic
order.\par
We start from previous work done by Andersson et al.
[2001], who proved that {$ \Theta (k \log \log n / \log
\log (4 + k \log \log n / \log n) + k + \log n) $}
character comparisons (or probes) comprise the tight
complexity for searching a plain sorted array of {$n$}
keys, each of length {$k$}, arranged in lexicographic
order. We describe a novel permutation of the {$n$}
keys that is different from the sorted order. When keys
are kept ``unsorted'' in the array according to this
permutation, the complexity of searching drops to {$
\Theta (k + \log n) $} character comparisons (or
probes) in the worst case, which is optimal among all
possible permutations, up to a constant factor.
Consequently, disorder carries more information than
does order; this fact was not observable before, since
the latter two bounds are {$ \Theta (\log n) $} when {$
k = O(1) $}. More implications are discussed in the
article, including searching in the bit-probe model.",
acknowledgement = ack-nhfb,
articleno = "2",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Implicit data structures; in-place algorithms;
searching; sorting",
}
@Article{Kaplan:2008:THT,
author = "Haim Kaplan and Robert Endre Tarjan",
title = "Thin heaps, thick heaps",
journal = j-TALG,
volume = "4",
number = "1",
pages = "3:1--3:??",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328911.1328914",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The Fibonacci heap was devised to provide an
especially efficient implementation of Dijkstra's
shortest path algorithm. Although asymptotically
efficient, it is not as fast in practice as other heap
implementations. Expanding on ideas of H{\o}yer [1995],
we describe three heap implementations (two versions of
thin heaps and one of thick heaps) that have the same
amortized efficiency as Fibonacci heaps, but need less
space and promise better practical performance. As part
of our development, we fill in a gap in H{\o}yer's
analysis.",
acknowledgement = ack-nhfb,
articleno = "3",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "binomial queue; Data structure; decrease key
operation; Fibonacci heap; heap; melding; priority
queue; thick heap; thin heap",
}
@Article{Barbay:2008:ARA,
author = "J{\'e}r{\'e}my Barbay and Claire Kenyon",
title = "Alternation and redundancy analysis of the
intersection problem",
journal = j-TALG,
volume = "4",
number = "1",
pages = "4:1--4:??",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328911.1328915",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The intersection of sorted arrays problem has
applications in search engines such as Google. Previous
work has proposed and compared deterministic algorithms
for this problem, in an adaptive analysis based on the
encoding size of a certificate of the result (cost
analysis). We define the alternation analysis, based on
the nondeterministic complexity of an instance. In this
analysis we prove that there is a deterministic
algorithm asymptotically performing as well as any
randomized algorithm in the comparison model. We define
the redundancy analysis, based on a measure of the
internal redundancy of the instance. In this analysis
we prove that any algorithm optimal in the redundancy
analysis is optimal in the alternation analysis, but
that there is a randomized algorithm which performs
strictly better than any deterministic algorithm in the
comparison model. Finally, we describe how these
results can be extended beyond the comparison model.",
acknowledgement = ack-nhfb,
articleno = "4",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Adaptive analysis; alternation analysis; intersection;
intersection of sorted arrays; randomized algorithm;
redundancy analysis",
}
@Article{Pettie:2008:RMS,
author = "Seth Pettie and Vijaya Ramachandran",
title = "Randomized minimum spanning tree algorithms using
exponentially fewer random bits",
journal = j-TALG,
volume = "4",
number = "1",
pages = "5:1--5:??",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328911.1328916",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "For many fundamental problems there exist randomized
algorithms that are asymptotically optimal and are
superior to the best-known deterministic algorithm.
Among these are the minimum spanning tree (MST)
problem, the MST sensitivity analysis problem, the
parallel connected components and parallel minimum
spanning tree problems, and the local sorting and set
maxima problems. (For the first two problems there are
provably optimal deterministic algorithms with unknown,
and possibly superlinear, running times.) One downside
of the randomized methods for solving these problems is
that they use a number of random bits linear in the
size of input. In this article we develop some general
methods for reducing exponentially the consumption of
random bits in comparison-based algorithms. In some
cases we are able to reduce the number of random bits
from linear to nearly constant, without affecting the
expected running time.\par
Most of our results are obtained by adjusting or
reorganizing existing randomized algorithms to work
well with a pairwise or {$ O(1) $}-wise independent
sampler. The prominent exception, and the main focus of
this article, is a linear-time randomized minimum
spanning tree algorithm that is not derived from the
well-known Karger-Klein-Tarjan algorithm. In many ways
it resembles more closely the deterministic minimum
spanning tree algorithms based on soft heaps. Further,
using our algorithm as a guide, we present a unified
view of the existing ``nongreedy'' minimum spanning
tree algorithms. Concepts from the Karger-Klein-Tarjan
algorithm, such as F-lightness, MST verification, and
sampled graphs, are related to the concepts of edge
corruption, subgraph contractibility, and soft heaps,
which are the basis of the deterministic MST algorithms
of Chazelle and Pettie-Ramachandran.",
acknowledgement = ack-nhfb,
articleno = "5",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Graph algorithms; minimum spanning trees; random
sampling",
}
@Article{Roditty:2008:FSF,
author = "Liam Roditty",
title = "A faster and simpler fully dynamic transitive
closure",
journal = j-TALG,
volume = "4",
number = "1",
pages = "6:1--6:??",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328911.1328917",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We obtain a new fully dynamic algorithm for
maintaining the transitive closure of a directed graph.
Our algorithm maintains the transitive closure matrix
in a total running time of {$ O(m n + ({\rm ins} + {\rm
del}) {\cdot } n^2) $}, where ins(del) is the number of
insert (delete) operations performed. Here {$n$} is the
number of vertices in the graph and {$m$} is the
initial number of edges in the graph. Obviously,
reachability queries can be answered in constant time.
The algorithm uses only {$ O(n^2) $} time which is
essentially optimal for maintaining the transitive
closure matrix. Our algorithm can also support path
queries. If {$v$} is reachable from {$u$}, the
algorithm can produce a path from {$u$} to $v$ in time
proportional to the length of the path. The best
previously known algorithm for the problem is due to
Demetrescu and Italiano [2000]. Their algorithm has a
total running time of {$ O(n^3 + ({\rm ins} + {\rm
del}) {\cdot } n^2) $}. The query time is also
constant. In addition, we also present a simple
algorithm for directed acyclic graphs (DAGs) with a
total running time of {$ O(m n + {\rm ins} {\cdot } n^2
+ {\rm del}) $}. Our algorithms are obtained by
combining some new ideas with techniques of Italiano
[1986, 1988], King [1999], King and Thorup [2001] and
Frigioni et al. [2001]. We also note that our
algorithms are extremely simple and can be easily
implemented.",
acknowledgement = ack-nhfb,
articleno = "6",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "directed graph; Dynamic graph algorithms;
reachability",
}
@Article{Gabow:2008:FLD,
author = "Harold N. Gabow and Shuxin Nie",
title = "Finding a long directed cycle",
journal = j-TALG,
volume = "4",
number = "1",
pages = "7:1--7:??",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328911.1328918",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Consider a digraph with $n$ vertices. For any fixed
value $k$, we present linear- and almost-linear-time
algorithms to find a cycle of length $ \geq k $, if one
exists. We also find a cycle that has length $ \geq
\log n / \log \log n $ in polynomial time, if one
exists. Under an appropriate complexity assumption it
is known to be impossible to improve this guarantee by
more than a $ \log \log n $ factor. Our approach is
based on depth-first search.",
acknowledgement = ack-nhfb,
articleno = "7",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithms; circumference; cycles;
Hamiltonian cycles; long cycles",
}
@Article{Buchsbaum:2008:RLC,
author = "Adam L. Buchsbaum and Emden R. Gansner and Cecilia M.
Procopiuc and Suresh Venkatasubramanian",
title = "Rectangular layouts and contact graphs",
journal = j-TALG,
volume = "4",
number = "1",
pages = "8:1--8:??",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328911.1328919",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Contact graphs of isothetic rectangles unify many
concepts from applications including VLSI and
architectural design, computational geometry, and GIS.
Minimizing the area of their corresponding rectangular
layouts is a key problem. We study the
area-optimization problem and show that it is NP-hard
to find a minimum-area rectangular layout of a given
contact graph. We present {$ O(n) $}-time algorithms
that construct {$ O(n^2) $}-area rectangular layouts
for general contact graphs and {$ O(n \log n) $}-area
rectangular layouts for trees. (For trees, this is an
{$ O(\log n) $}-approximation algorithm.) We also
present an infinite family of graphs (respectively,
trees) that require {$ \Omega (n^2) $} (respectively,
{$ \Omega (n \log n) $}) area.\par
We derive these results by presenting a new
characterization of graphs that admit rectangular
layouts, using the related concept of rectangular
duals. A corollary to our results relates the class of
graphs that admit rectangular layouts to
rectangle-of-influence drawings.",
acknowledgement = ack-nhfb,
articleno = "8",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Contact graphs; rectangular duals; rectangular
layouts",
}
@Article{Arge:2008:PRT,
author = "Lars Arge and Mark {De Berg} and Herman Haverkort and
Ke Yi",
title = "The priority {R}-tree: a practically efficient and
worst-case optimal {R}-tree",
journal = j-TALG,
volume = "4",
number = "1",
pages = "9:1--9:??",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328911.1328920",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present the priority R-tree, or PR-tree, which is
the first R-tree variant that always answers a window
query using {$ O((N / B) 1 - 1 / d + T / B) $} I/Os,
where {$N$} is the number of {$d$}-dimensional (hyper-)
rectangles stored in the R-tree, {$B$} is the disk
block size, and {$T$} is the output size. This is
provably asymptotically optimal and significantly
better than other R-tree variants, where a query may
visit all {$ N / B $} leaves in the tree even when {$ T
= 0 $}. We also present an extensive experimental study
of the practical performance of the PR-tree using both
real-life and synthetic data. This study shows that the
PR-tree performs similarly to the best-known R-tree
variants on real-life and relatively nicely distributed
data, but outperforms them significantly on more
extreme data.",
acknowledgement = ack-nhfb,
articleno = "9",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "R-trees",
}
@Article{Gudmundsson:2008:ADO,
author = "Joachim Gudmundsson and Christos Levcopoulos and Giri
Narasimhan and Michiel Smid",
title = "Approximate distance oracles for geometric spanners",
journal = j-TALG,
volume = "4",
number = "1",
pages = "10:1--10:??",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328911.1328921",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given an arbitrary real constant $ \varepsilon > 0 $,
and a geometric graph {$G$} in {$d$}-dimensional
Euclidean space with {$n$} points, {$ O(n) $} edges,
and constant dilation, our main result is a data
structure that answers {$ (1 + \varepsilon)
$}-approximate shortest-path-length queries in constant
time. The data structure can be constructed in {$ O(n
\log n) $} time using {$ O(n \log n) $} space. This
represents the first data structure that answers {$ (1
+ \varepsilon) $}-approximate shortest-path queries in
constant time, and hence functions as an approximate
distance oracle. The data structure is also applied to
several other problems. In particular, we also show
that approximate shortest-path queries between vertices
in a planar polygonal domain with ``rounded'' obstacles
can be answered in constant time. Other applications
include query versions of closest-pair problems, and
the efficient computation of the approximate dilations
of geometric graphs. Finally, we show how to extend the
main result to answer {$ (1 + \varepsilon)
$}-approximate shortest-path-length queries in constant
time for geometric spanner graphs with {$ m = \omega
(n) $} edges. The resulting data structure can be
constructed in {$ O(m + n \log n) $} time using {$ O(n
\log n) $} space.",
acknowledgement = ack-nhfb,
articleno = "10",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "approximation algorithm; computational geometry;
geometric graphs; Shortest paths; spanners",
}
@Article{Gandhi:2008:IBS,
author = "Rajiv Gandhi and Magn{\'u}s M. Halld{\'o}rsson and Guy
Kortsarz and Hadas Shachnai",
title = "Improved bounds for scheduling conflicting jobs with
minsum criteria",
journal = j-TALG,
volume = "4",
number = "1",
pages = "11:1--11:??",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328911.1328922",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider a general class of scheduling problems
where a set of conflicting jobs needs to be scheduled
(preemptively or nonpreemptively) on a set of machines
so as to minimize the weighted sum of completion times.
The conflicts among jobs are formed as an arbitrary
conflict graph.\par
Building on the framework of Queyranne and Sviridenko
[2002b], we present a general technique for reducing
the weighted sum of completion-times problem to the
classical makespan minimization problem. Using this
technique, we improve the best-known results for
scheduling conflicting jobs with the min-sum objective,
on several fundamental classes of graphs, including
line graphs, $ (k + 1) $-claw-free graphs, and perfect
graphs. In particular, we obtain the first
constant-factor approximation ratio for nonpreemptive
scheduling on interval graphs. We also improve the
results of Kim [2003] for scheduling jobs on line
graphs and for resource-constrained scheduling.",
acknowledgement = ack-nhfb,
articleno = "11",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithms; coloring; linear
programming; LP rounding; scheduling; sum
multicoloring",
}
@Article{Guerraoui:2008:CMA,
author = "Rachid Guerraoui and Ron R. Levy and Bastian Pochon
and Jim Pugh",
title = "The collective memory of amnesic processes",
journal = j-TALG,
volume = "4",
number = "1",
pages = "12:1--12:??",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328911.1328923",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "This article considers the problem of robustly
emulating a shared atomic memory over a distributed
message-passing system where processes can fail by
crashing and possibly recover. We revisit the notion of
atomicity in the crash-recovery context and introduce a
generic algorithm that emulates an atomic memory. The
algorithm is instantiated for various settings
according to whether processes have access to local
stable storage, and whether, in every execution of the
algorithm, a sufficient number of processes are assumed
not to crash. We establish the optimality of specific
instances of our algorithm in terms of resilience, log
complexity (number of stable storage accesses needed in
every read or write operation), as well as time
complexity (number of communication steps needed in
every read or write operation). The article also
discusses the impact of considering a multiwriter
versus a single-writer memory, as well as the impact of
weakening the consistency of the memory by providing
safe or regular semantics instead of atomicity.",
acknowledgement = ack-nhfb,
articleno = "12",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "\log complexity; Atomic registers; crash recovery;
shared-memory emulation",
}
@Article{Karakostas:2008:FAS,
author = "George Karakostas",
title = "Faster approximation schemes for fractional
multicommodity flow problems",
journal = j-TALG,
volume = "4",
number = "1",
pages = "13:1--13:17",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328911.1328924",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present fully polynomial approximation schemes for
concurrent multicommodity flow problems that run in
time of the minimum possible dependencies on the number
of commodities $k$. We show that by modifying the
algorithms by Garg and K{\"o}nemann [1998] and
Fleischer [2000], we can reduce their running time on a
graph with $n$ vertices and $m$ edges from {$ \tilde
{O}(\varepsilon^{ - 2}(m^2 + k m)) $} to {$ \tilde
{O}({\varepsilon^{ - 2m}}^2) $} for an {\em implicit\/}
representation of the output, or {$ \tilde
{O}(\varepsilon^{ - 2}(m^2 + k n)) $} for an {\em
explicit\/} representation, where {$ \tilde {O}(f) $}
denotes a quantity that is {$ O(f \log^{O(1)} m)$}. The
implicit representation consists of a set of trees
rooted at sources (there can be more than one tree per
source), and with sinks as their leaves, together with
flow values for the flow directed from the source to
the sinks in a particular tree. Given this implicit
representation, the approximate value of the concurrent
flow is known, but if we want the explicit flow per
commodity per edge, we would have to combine all these
trees together, and the cost of doing so may be
prohibitive. In case we want to calculate explicitly
the solution flow, we modify our schemes so that they
run in time polylogarithmic in {$ n k $} ({$n$} is the
number of nodes in the network). This is within a
polylogarithmic factor of the trivial lower bound of
time {$ \Omega (n k) $} needed to explicitly write down
a multicommodity flow of {$k$} commodities in a network
of {$n$} nodes. Therefore our schemes are within a
polylogarithmic factor of the minimum possible
dependencies of the running time on the number of
commodities {$k$}.",
acknowledgement = ack-nhfb,
articleno = "13",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "fully-polynomial time approximation schemes;
Multicommodity flows",
}
@Article{Lemire:2008:HBO,
author = "Daniel Lemire and Owen Kaser",
title = "Hierarchical bin buffering: {Online} local moments for
dynamic external memory arrays",
journal = j-TALG,
volume = "4",
number = "1",
pages = "14:1--14:??",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328911.1328925",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "For a massive I/O array of size $n$, we want to
compute the first {$N$} local moments, for some
constant {$N$}. Our simpler algorithms partition the
array into consecutive ranges called bins, and apply
not only to local-moment queries, but also to algebraic
queries. With {$N$} buffers of size {$ \sqrt {n} $},
time complexity drops to {$ O(\sqrt {n}) $}. A more
sophisticated approach uses hierarchical buffering and
has a logarithmic time complexity ({$ O(b \log b n)
$}), when using {$N$} hierarchical buffers of size {$ n
/ b $}. Using overlapped bin buffering, we show that
only one buffer is needed, as with wavelet-based
algorithms, but using much less storage.",
acknowledgement = ack-nhfb,
articleno = "14",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "hierarchical buffers; polynomial fitting; statistical
queries; Very large arrays",
}
@Article{Anshelevich:2008:PDU,
author = "Elliot Anshelevich and Lisa Zhang",
title = "Path decomposition under a new cost measure with
applications to optical network design",
journal = j-TALG,
volume = "4",
number = "1",
pages = "15:1--15:??",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328911.1328926",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We introduce a problem directly inspired by its
application to DWDM (dense wavelength division
multiplexing) network design. We are given a set of
demands to be carried over a network. Our goal is to
choose a route for each demand and to decompose the
network into a collection of edge-disjoint simple
paths. These paths are called optical line systems. The
cost of routing one unit of demand is the number of
line systems with which the demand route overlaps; our
design objective is to minimize the total cost over all
demands. This cost metric is motivated by the need to
minimize O-E-O(optical-electrical-optical) conversions
in optical transmission.\par
For given line systems, it is easy to find the optimal
demand routes. On the other hand, for given demand
routes designing the optimal line systems can be
NP-hard. We first present a 2-approximation for general
network topologies. As optical networks often have low
node degrees, we offer an algorithm that finds the
optimal solution for the special case in which the node
degree is at most 3. Our solution is based on a local
greedy approach.\par
If neither demand routes nor line systems are fixed,
the situation becomes much harder. Even for a
restricted scenario on a 3-regular Hamiltonian network,
no efficient algorithm can guarantee a constant
approximation better than 2. For general topologies, we
offer a simple algorithm with an {$ O(\log K) $}- and
an {$ O(\log n) $}-approximation, where {$K$} is the
number of demands and {$n$} the number of nodes. This
approximation ratio is almost tight. For rings, a
common special topology, we offer a more complex
3/2-approximation algorithm.",
acknowledgement = ack-nhfb,
articleno = "15",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "approximation algorithms; Optical network design; path
decomposition",
}
@Article{Buchsbaum:2008:GE,
author = "Adam L. Buchsbaum",
title = "Guest editorial",
journal = j-TALG,
volume = "4",
number = "2",
pages = "16:1--16:??",
month = may,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1361192.1361193",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:51 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "16",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Blandford:2008:CDV,
author = "Daniel K. Blandford and Guy E. Blelloch",
title = "Compact dictionaries for variable-length keys and data
with applications",
journal = j-TALG,
volume = "4",
number = "2",
pages = "17:1--17:??",
month = may,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1361192.1361194",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:51 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the problem of maintaining a dynamic
dictionary {$T$} of keys and associated data for which
both the keys and data are bit strings that can vary in
length from zero up to the length {$w$} of a machine
word. We present a data structure for this
variable-bit-length dictionary problem that supports
constant time lookup and expected amortized
constant-time insertion and deletion. It uses {$ O(m +
3 n - n \log 2 n) $} bits, where {$n$} is the number of
elements in {$T$}, and {$m$} is the total number of
bits across all strings in {$T$} (keys and data). Our
dictionary uses an array {$ A[1 \ldots n] $} in which
locations store variable-bit-length strings. We present
a data structure for this variable-bit-length array
problem that supports worst-case constant-time lookups
and updates and uses {$ O(m + n) $} bits, where {$m$}
is the total number of bits across all strings stored
in {$A$}.\par
The motivation for these structures is to support
applications for which it is helpful to efficiently
store short varying-length bit strings. We present
several applications, including representations for
semidynamic graphs, order queries on integers sets,
cardinal trees with varying cardinality, and simplicial
meshes of {$d$} dimensions. These results either
generalize or simplify previous results.",
acknowledgement = ack-nhfb,
articleno = "17",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Compression",
}
@Article{Kolluri:2008:PGM,
author = "Ravikrishna Kolluri",
title = "Provably good moving least squares",
journal = j-TALG,
volume = "4",
number = "2",
pages = "18:1--18:??",
month = may,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1361192.1361195",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:51 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We analyze a moving least squares (MLS) interpolation
scheme for reconstructing a surface from point cloud
data. The input is a sufficiently dense set of sample
points that lie near a closed surface F with
approximate surface normals. The output is a
reconstructed surface passing near the sample points.
For each sample point $s$ in the input, we define a
linear point function that represents the local shape
of the surface near $s$. These point functions are
combined by a weighted average, yielding a
three-dimensional function {$I$}. The reconstructed
surface is implicitly defined as the zero set of
{$I$}.\par
We prove that the function {$I$} is a good
approximation to the signed distance function of the
sampled surface {$F$} and that the reconstructed
surface is geometrically close to and isotopic to
{$F$}. Our sampling requirements are derived from the
local feature size function used in Delaunay-based
surface reconstruction algorithms. Our analysis can
handle noisy data provided the amount of noise in the
input dataset is small compared to the feature size of
{$F$}.",
acknowledgement = ack-nhfb,
articleno = "18",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "implicit surfaces; interpolation; Reconstruction",
}
@Article{Fusy:2008:DOT,
author = "{\'E}ric Fusy and Gilles Schaeffer and Dominique
Poulalhon",
title = "Dissections, orientations, and trees with applications
to optimal mesh encoding and random sampling",
journal = j-TALG,
volume = "4",
number = "2",
pages = "19:1--19:??",
month = may,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1361192.1361196",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:51 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present a bijection between some quadrangular
dissections of an hexagon and unrooted binary trees
with interesting consequences for enumeration, mesh
compression, and graph sampling. Our bijection yields
an efficient uniform random sampler for 3-connected
planar graphs, which turns out to be determinant for
the quadratic complexity of the current best-known
uniform random sampler for labelled planar graphs. It
also provides an encoding for the set {$ P(n) $} of
{$n$}-edge 3-connected planar graphs that matches the
entropy bound {$ 1 / n \log 2 | P(n)| = 2 + o (1) $}
bits per edge (bpe). This solves a theoretical problem
recently raised in mesh compression as these graphs
abstract the combinatorial part of meshes with
spherical topology. We also achieve the optimal
parametric rate {$ 1 / n \log 2 | P(n, i, j)| $} bpe
for graphs of {$ P(n) $} with {$i$} vertices and {$j$}
faces, matching in particular the optimal rate for
triangulations. Our encoding relies on a linear time
algorithm to compute an orientation associated with the
minimal Schnyder wood of a 3-connected planar map. This
algorithm is of independent interest, and it is, for
instance, a key ingredient in a recent straight line
drawing algorithm for 3-connected planar graphs.",
acknowledgement = ack-nhfb,
articleno = "19",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Bijection; coding; counting; random generation",
}
@Article{VeghVegh:2008:PDA,
author = "L{\'a}szl{\'o} A. V{\'e}ghV{\'e}gh and Andr{\'a}s A.
Bencz{\'u}r",
title = "Primal-dual approach for directed vertex connectivity
augmentation and generalizations",
journal = j-TALG,
volume = "4",
number = "2",
pages = "20:1--20:??",
month = may,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1361192.1361197",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:51 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In their seminal paper, Frank and Jord{\'a}n [1995]
show that a large class of optimization problems,
including certain directed graph augmentation, fall
into the class of covering supermodular functions over
pairs of sets. They also give an algorithm for such
problems, however, it relies on the ellipsoid method.
Prior to our result, combinatorial algorithms existed
only for the 0--1 valued problem. Our key result is a
combinatorial algorithm for the general problem that
includes directed vertex or S-T connectivity
augmentation. The algorithm is based on Bencz{\'u}r's
previous algorithm for the 0--1 valued case
[Bencz{\'u}r 2003].\par
Our algorithm uses a primal-dual scheme for finding
covers of partially ordered sets that satisfy natural
abstract properties as in Frank and Jord{\'a}n. For an
initial (possibly greedy) cover, the algorithm searches
for witnesses for the necessity of each element in the
cover. If no two (weighted) witnesses have a common
cover, the solution is optimal. As long as this is not
the case, the witnesses are gradually exchanged for
smaller ones. Each witness change defines an
appropriate change in the solution; these changes are
finally unwound in a shortest-path manner to obtain a
solution of size one less.",
acknowledgement = ack-nhfb,
articleno = "20",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "combinatorial algorithm; Vertex connectivity
augmentation",
}
@Article{Sanders:2008:AAS,
author = "Peter Sanders and David Steurer",
title = "An asymptotic approximation scheme for multigraph edge
coloring",
journal = j-TALG,
volume = "4",
number = "2",
pages = "21:1--21:??",
month = may,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1361192.1361198",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:51 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The edge coloring problem considers the assignment of
colors from a minimum number of colors to edges of a
graph such that no two edges with the same color are
incident to the same node. We give polynomial time
algorithms for approximate edge coloring of
multigraphs, that is, parallel edges are allowed. The
best previous algorithms achieve a fixed constant
approximation factor plus a small additive offset. One
of our algorithms achieves solution quality $ {\rm opt}
+ \sqrt {9 {\rm opt} / 2} $ and has execution time
polynomial in the number of nodes and the logarithm of
the maximum edge multiplicity.",
acknowledgement = ack-nhfb,
articleno = "21",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "chromatic index; data migration; Edge coloring;
multigraphs",
}
@Article{Chawla:2008:ENT,
author = "Shuchi Chawla and Anupam Gupta and Harald R{\"a}cke",
title = "Embeddings of negative-type metrics and an improved
approximation to generalized sparsest cut",
journal = j-TALG,
volume = "4",
number = "2",
pages = "22:1--22:??",
month = may,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1361192.1361199",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:51 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article, we study metrics of negative type,
which are metrics {$ (V, d) $} such that {$ \sqrt {d}
$} is an Euclidean metric; these metrics are thus also
known as {$ \ell_2 $}-squared metrics. We show how to
embed {$n$}-point negative-type metrics into Euclidean
space $ \ell_2 $ with distortion {$ D = O(\log 3 / 4 n)
$}. This embedding result, in turn, implies an {$
O(\log 3 / 4 k) $}-approximation algorithm for the
Sparsest Cut problem with nonuniform demands. Another
corollary we obtain is that {$n$}-point subsets of {$
\ell_1 $} embed into {$ \ell_2 $} with distortion {$
O(\log 3 / 4 n) $}.",
acknowledgement = ack-nhfb,
articleno = "22",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithm; embedding; metrics;
negative-type metric; sparsest cut",
}
@Article{Chuzhoy:2008:ASN,
author = "Julia Chuzhoy and Anupam Gupta and Joseph (Seffi) Naor
and Amitabh Sinha",
title = "On the approximability of some network design
problems",
journal = j-TALG,
volume = "4",
number = "2",
pages = "23:1--23:??",
month = may,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1361192.1361200",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:51 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Consider the following classical network design
problem: a set of terminals {$ T = \{ t_i \} $} wishes
to send traffic to a root {$r$} in an {$n$}-node graph
{$ G = (V, E) $}. Each terminal {$ t_i $} sends {$ d_i
$} units of traffic and enough bandwidth has to be
allocated on the edges to permit this. However,
bandwidth on an edge {$e$} can only be allocated in
integral multiples of some base capacity $ u_e $ and
hence provisioning $ k {\times } u_e $ bandwidth on
edge $e$ incurs a cost of $ \lceil k \rceil $ times the
cost of that edge. The objective is a minimum-cost
feasible solution.\par
This is one of many network design problems widely
studied where the bandwidth allocation is governed by
side constraints: edges can only allow a subset of
cables to be purchased on them or certain
quality-of-service requirements may have to be
met.\par
In this work, we show that this problem and, in fact,
several basic problems in this general network design
framework cannot be approximated better than {$ \Omega
(\log \log n) $} unless {$ {\rm NP} \subseteq {\rm
DTIME}(n O(\log \log \log n)) $}, where {$ |V| = n $}.
In particular, we show that this inapproximability
threshold holds for (i) the Priority-Steiner Tree
problem, (ii) the (single-sink) Cost-Distance problem,
and (iii) the single-sink version of an even more
fundamental problem, Fixed Charge Network Flow. Our
results provide a further breakthrough in the
understanding of the level of complexity of network
design problems. These are the first nonconstant
hardness results known for all these problems.",
acknowledgement = ack-nhfb,
articleno = "23",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "cost-distance; fixed charge network flow; Hardness of
approximation; network design; priority Steiner tree",
}
@Article{Immorlica:2008:LCM,
author = "Nicole Immorlica and Mohammad Mahdian and Vahab S.
Mirrokni",
title = "Limitations of cross-monotonic cost-sharing schemes",
journal = j-TALG,
volume = "4",
number = "2",
pages = "24:1--24:??",
month = may,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1361192.1361201",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Mon Jun 16 11:56:51 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A cost-sharing scheme is a set of rules defining how
to share the cost of a service (often computed by
solving a combinatorial optimization problem) among
serviced customers. A cost-sharing scheme is
cross-monotonic if it satisfies the property that
everyone is better off when the set of people who
receive the service expands. In this article, we
develop a novel technique for proving upper bounds on
the budget-balance factor of cross-monotonic
cost-sharing schemes or the worst-case ratio of
recovered cost to total cost. We apply this technique
to games defined, based on several combinatorial
optimization problems, including the problems of edge
cover, vertex cover, set cover, and metric facility
location and, in each case, derive tight or
nearly-tight bounds. In particular, we show that for
the facility location game, there is no cross-monotonic
cost-sharing scheme that recovers more than a third of
the total cost. This result, together with a recent
1/3-budget-balanced cross-monotonic cost-sharing scheme
of P{\'a}l and Tardos [2003] closes the gap for the
facility location game. For the vertex cover and set
cover games, we show that no cross-monotonic
cost-sharing scheme can recover more than a {$ O(n - 1
/ 3) $} and {$ O(1 / n) $} fraction of the total cost,
respectively. Finally, we study the implications of our
results on the existence of group-strategyproof
mechanisms. We show that every group-strategyproof
mechanism corresponds to a cost-sharing scheme that
satisfies a condition weaker than cross-monotonicity.
Using this, we prove that group-strategyproof
mechanisms satisfying additional properties give rise
to cross-monotonic cost-sharing schemes and therefore
our upper bounds hold.",
acknowledgement = ack-nhfb,
articleno = "24",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Cross-monotonic cost-sharing schemes;
group-strategyproof mechanism design; probabilistic
method",
}
@Article{Dinitz:2008:OAS,
author = "Yefim Dinitz and Shay Solomon",
title = "Optimality of an algorithm solving the {Bottleneck
Tower of Hanoi} problem",
journal = j-TALG,
volume = "4",
number = "3",
pages = "25:1--25:??",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1367064.1367065",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:06 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study the Bottleneck Tower of Hanoi puzzle posed by
D. Wood in 1981. There, a relaxed placement rule allows
a larger disk to be placed {\em higher\/} than a
smaller one if their size difference is less than a
pregiven value $k$. A shortest sequence of moves
(optimal algorithm) transferring all the disks placed
on some peg in decreasing order of size, to another peg
in the same order is in question. In 1992, D. Poole
suggested a natural disk-moving strategy for this
problem, and computed the length of the shortest move
sequence under its framework. However, other strategies
were overlooked, so the lower bound/optimality question
remained open. In 1998, Benditkis, Berend, and Safro
proved the optimality of Poole's algorithm for the
first nontrivial case $ k = 2 $. We prove Poole's
algorithm to be optimal in the general case.",
acknowledgement = ack-nhfb,
articleno = "25",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Optimality proofs; Tower of Hanoi",
}
@Article{Alonso:2008:DP,
author = "Laurent Alonso and Edward M. Reingold",
title = "Determining plurality",
journal = j-TALG,
volume = "4",
number = "3",
pages = "26:1--26:??",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1367064.1367066",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:06 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given a set of $n$ elements, each of which is colored
one of $c$ colors, we must determine an element of the
plurality (most frequently occurring) color by pairwise
equal/unequal color comparisons of elements. We prove
that $ (c - 1)(n - c) / 2 $ color comparisons are
necessary in the worst case to determine the plurality
color and give an algorithm requiring {$ (0.775 c +
5.9) n + O(c^2) $} color comparisons for {$ c \geq 9
$}.",
acknowledgement = ack-nhfb,
articleno = "26",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Algorithm analysis; majority problem; plurality
problem",
}
@Article{Alonso:2008:ACL,
author = "Laurent Alonso and Edward M. Reingold",
title = "Average-case lower bounds for the plurality problem",
journal = j-TALG,
volume = "4",
number = "3",
pages = "27:1--27:??",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1367064.1367067",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:06 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given a set of $n$ elements, each of which is colored
one of $ c \geq 2 $ colors, we have to determine an
element of the plurality (most frequently occurring)
color by pairwise equal/unequal color comparisons of
elements. We derive lower bounds for the expected
number of color comparisons when the $ c^n $ colorings
are equally probable. We prove a general lower bound of
{$ c / 3 n - O(\sqrt n) $} for {$ c \geq 2 $}; we prove
the stronger particular bounds of {$ 7 / 6 n - O(\sqrt
n) $} for {$ c = 3 $}, {$ 54 / 35 n - O(\sqrt n) $} for
{$ c = 4 $}, {$ 607 / 315 n O(\sqrt n) $} for {$ c = 5
$}, {$ 1592 / 693 n - O(\sqrt n) $} for {$ c = 6 $}, {$
7985 / 3003 n - O(\sqrt n) $} for {$ c = 7 $}, and {$
19402 / 6435 n - O(\sqrt n) $} for {$ c = 8 $}.",
acknowledgement = ack-nhfb,
articleno = "27",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Algorithm analysis; majority problem; plurality
problem",
}
@Article{Lu:2008:BPS,
author = "Hsueh-I Lu and Chia-Chi Yeh",
title = "Balanced parentheses strike back",
journal = j-TALG,
volume = "4",
number = "3",
pages = "28:1--28:??",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1367064.1367068",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:06 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "An {\em ordinal tree\/} is an arbitrary rooted tree
where the children of each node are ordered. Succinct
representations for ordinal trees with efficient query
support have been extensively studied. The best
previously known result is due to Geary et al. [2004b,
pages 1--10]. The number of bits required by their
representation for an $n$-node ordinal tree {$T$} is {$
2 n + o(n) $}, whose first-order term is
information-theoretically optimal. Their representation
supports a large set of {$ O(1) $}-time queries on
{$T$}. Based upon a balanced string of {$ 2 n $}
parentheses, we give an improved {$ 2 n + o(n) $}-bit
representation for {$T$}. Our improvement is two-fold:
First, the set of {$ O(1) $}-time queries supported by
our representation is a proper superset of that
supported by the representation of Geary, Raman, and
Raman. Second, it is also much easier for our
representation to support new queries by simply adding
new auxiliary strings.",
acknowledgement = ack-nhfb,
articleno = "28",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Succinct data structures; XML document
representation",
}
@Article{Roditty:2008:RSR,
author = "Iam Roditty and Mikkel Thorup and Uri Zwick",
title = "Roundtrip spanners and roundtrip routing in directed
graphs",
journal = j-TALG,
volume = "4",
number = "3",
pages = "29:1--29:??",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1367064.1367069",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:06 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We introduce the notion of {\em roundtrip-spanners\/}
of weighted {\em directed\/} graphs and describe
efficient algorithms for their construction. We show
that for every integer $ k \geq 1 $ and any $ \epsilon
> 0 $, any directed graph on $n$ vertices with edge
weights in the range {$ [1, W] $} has a {$ (2 k +
\epsilon) $}-roundtrip-spanner with {$ O(\min (k^2 /
\epsilon)) n^{1 + 1 / k} (\log (n W), (k / \epsilon)^2
n^{1 + 1 / k}, (\log n)^{2 - 1 / k}) $} edges. We then
extend these constructions and obtain compact roundtrip
routing schemes. For every integer {$ k \geq 1 $} and
every {$ \epsilon > 0 $}, we describe a roundtrip
routing scheme that has stretch {$ 4 k + \epsilon $},
and uses at each vertex a routing table of size {$
\tilde {O}((k^2 / \epsilon) n^{1 / k} \log (n W)) $}.
We also show that any weighted directed graph with {\em
arbitrary / \/} positive edge weights has a
3-roundtrip-spanner with {$ O(n^{3 / 2}) $} edges. This
result is optimal. Finally, we present a stretch 3
roundtrip routing scheme that uses local routing tables
of size {$ \tilde {O}(n^{1 / 2}) $}. This routing
scheme is essentially optimal. The roundtrip-spanner
constructions and the roundtrip routing schemes for
directed graphs that we describe are only slightly
worse than the best available spanners and routing
schemes for undirected graphs. Our roundtrip routing
schemes substantially improve previous results of Cowen
and Wagner. Our results are obtained by combining ideas
of Cohen, Cowen and Wagner, Thorup and Zwick, with some
new ideas.",
acknowledgement = ack-nhfb,
articleno = "29",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "distances; roundtrip; Routing; shortest paths;
spanners",
}
@Article{Gu:2008:OBD,
author = "Qian-Ping Gu and Hisao Tamaki",
title = "Optimal branch-decomposition of planar graphs in {$
O(n^3) $} time",
journal = j-TALG,
volume = "4",
number = "3",
pages = "30:1--30:??",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1367064.1367070",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:06 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We give an {$ O(n^3) $} time algorithm for
constructing a minimum-width branch-decomposition of a
given planar graph with {$n$} vertices. This is
achieved through a refinement to the previously best
known algorithm of Seymour and Thomas, which runs in {$
O(n^4) $} time.",
acknowledgement = ack-nhfb,
articleno = "30",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Branch-decompositions; planar graphs",
}
@Article{Czumaj:2008:TEM,
author = "Artur Czumaj and Christian Sohler",
title = "Testing {Euclidean} minimum spanning trees in the
plane",
journal = j-TALG,
volume = "4",
number = "3",
pages = "31:1--31:??",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1367064.1367071",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:06 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given a Euclidean graph {$G$} over a set {$P$} of
{$n$} points in the plane, we are interested in
verifying whether {$G$} is a Euclidean minimum spanning
tree (EMST) of {$P$} or {$G$} differs from it in more
than {$ \epsilon n $} edges. We assume that {$G$} is
given in adjacency list representation and the
point/vertex set {$P$} is given in an array. We present
a property testing algorithm that accepts graph {$G$}
if it is an EMST of {$P$} and that rejects with
probability at least {$ 2 / 3 $} if {$G$} differs from
every EMST of {$P$} in more than {$ \epsilon, n $}
edges. Our algorithm runs in {$ O(\sqrt n / \epsilon
\cdot \log^2 (n / \epsilon)) $} time and has a query
complexity of {$ O(\sqrt n / \epsilon \cdot \log (n /
\epsilon)) $}.",
acknowledgement = ack-nhfb,
articleno = "31",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Euclidean minimum spanning tree; property testing;
randomized algorithms",
}
@Article{Makinen:2008:DEC,
author = "Veli M{\"a}kinen and Gonzalo Navarro",
title = "Dynamic entropy-compressed sequences and full-text
indexes",
journal = j-TALG,
volume = "4",
number = "3",
pages = "32:1--32:??",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1367064.1367072",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:06 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We give new solutions to the Searchable Partial Sums
with Indels problem. Given a sequence of $n$ $k$-bit
numbers, we present a structure taking $ k n + o(k n) $
bits of space, able of performing operations {\em sum},
{\em search}, {\em insert}, and {\em delete}, all in {$
O(\log n) $} worst-case time, for any {$ k = O(\log n)
$}. This extends previous results by Hon et al. [2003c]
achieving the same space and {$ O(\log n / \log \log n)
$} time complexities for the queries, yet offering
complexities for {\em insert\/} and {\em delete\/} that
are amortized and worse than ours, and supported only
for {$ k = O(1) $}. Our result matches an existing
lower bound for large values of {$k$}.\par
We also give new solutions to the Dynamic Sequence
problem. Given a sequence of {$n$} symbols in the range
{$ [1, \sigma] $} with binary zero-order entropy {$ H_0
$}, we present a dynamic data structure that requires
{$ n_0 + o(n \log \sigma) $} bits of space, which is
able of performing {\em rank\/} and {\em select}, as
well as inserting and deleting symbols at arbitrary
positions, in {$ O(\log n \log \sigma) $} time. Our
result is the {\em first\/} entropy-bound dynamic data
structure for {\em rank\/} and {\em select\/} over
general sequences.\par
In the case {$ \sigma = 2 $}, where both previous
problems coincide, we improve the dynamic solution of
Hon et al. [2003c] in that we compress the sequence.
The only previous result with entropy-bound space for
dynamic binary sequences is by Blandford and Blelloch
[2004], which has the same complexities as our
structure, but does not achieve constant 1 multiplying
the entropy term in the space complexity.\par
Finally, we present a new dynamic compressed full-text
self-index, for a collection of texts over an alphabet
of size {$ \sigma $}, of overall length {$n$} and $h$
th order empirical entropy {$ H_h $}. The index
requires {$ n H_h + o(n \log \sigma) $} bits of space,
for any {$ h \leq \alpha \log_\sigma n $} and constant
{$0$}.\par
An important result we prove in this paper is that the
wavelet tree of the Burrows--Wheeler transform of a
text, if compressed with a technique that achieves
zero-order compression locally (e.g., Raman et al.
[2002]), automatically achieves $h$ th order entropy
space for any $h$. This unforeseen relation is
essential for the results of the previous paragraph,
but it also derives into significant simplifications on
many existing static compressed full-text self-indexes
that build on wavelet trees.",
acknowledgement = ack-nhfb,
articleno = "32",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Compressed dynamic data structures; compressed text
databases; entropy; partial sums; sequences",
}
@Article{Kowalski:2008:WAD,
author = "Dariusz R. Kowalski and Alexander A. Shvartsman",
title = "Writing-all deterministically and optimally using a
nontrivial number of asynchronous processors",
journal = j-TALG,
volume = "4",
number = "3",
pages = "33:1--33:??",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1367064.1367073",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:06 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The problem of performing $n$ tasks on $p$
asynchronous or undependable processors is a basic
problem in distributed computing. This article
considers an abstraction of this problem called {\em
Write-All: using $p$ processors write 1's into all
locations of an array of size n}. In this problem
writing 1 abstracts the notion of performing a simple
task. Despite substantial research, there is a dearth
of efficient deterministic asynchronous algorithms for
{\em Write-All/}. Efficiency of algorithms is measured
in terms of {\em work\/} that accounts for all local
steps performed by the processors in solving the
problem. Thus, an optimal algorithm would have work {$
\Theta (n) $}, however it is known that optimality
cannot be achieved when {$ p = \Omega (n) $}. The quest
then is to obtain work-optimal solutions for this
problem using a nontrivial, compared to {$n$}, number
of processors {$p$}. The algorithm presented in this
article has work complexity of {$ O(n + p^{2 + \epsilon
}) $}, and it achieves work optimality for {$ p =
O(n^{1 / (2 + \epsilon)}) $} for any {$ \epsilon > 0
$}, while the previous best result achieved optimality
for {$ p \leq 4 \sqrt n / \log n $}. Additionally, the
new result uses {\em only\/} the atomic read/write
memory, without resorting to using the test-and-set
primitive that was necessary in the previous
solution.",
acknowledgement = ack-nhfb,
articleno = "33",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Asynchrony; distributed algorithms; shared memory;
work; Write-All",
}
@Article{Even:2008:ACR,
author = "Guy Even and Retsef Levi and Dror Rawitz and Baruch
Schieber and Shimon (Moni) Shahar and Maxim
Sviridenko",
title = "Algorithms for capacitated rectangle stabbing and lot
sizing with joint set-up costs",
journal = j-TALG,
volume = "4",
number = "3",
pages = "34:1--34:??",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1367064.1367074",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:06 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In the rectangle stabbing problem, we are given a set
of axis parallel rectangles and a set of horizontal and
vertical lines, and our goal is to find a minimum size
subset of lines that intersect all the rectangles. In
this article, we study the capacitated version of this
problem in which the input includes an integral
capacity for each line. The capacity of a line bounds
the number of rectangles that the line can cover. We
consider two versions of this problem. In the first,
one is allowed to use only a single copy of each line
({\em hard capacities\/}), and in the second, one is
allowed to use multiple copies of every line, but the
multiplicities are counted in the size (or weight) of
the solution ({\em soft capacities\/}).\par
We present an exact polynomial-time algorithm for the
weighted one dimensional case with hard capacities that
can be extended to the one dimensional weighted case
with soft capacities. This algorithm is also extended
to solve a certain capacitated multi-item {\em
lot-sizing\/} inventory problem with joint set-up
costs. For the case of $d$-dimensional rectangle
stabbing with soft capacities, we present a $ 3 d
$-approximation algorithm for the unweighted case. For
$d$-dimensional rectangle stabbing problem with hard
capacities, we present a bi-criteria algorithm that
computes $ 4 d $-approximate solutions that use at most
two copies of every line. Finally, we present hardness
results for rectangle stabbing when the dimension is
part of the input and for a two-dimensional weighted
version with hard capacities.",
acknowledgement = ack-nhfb,
articleno = "34",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithms; capacitated covering; lot
sizing; rectangle stabbing",
}
@Article{Zhang:2008:CCP,
author = "Cun-Quan Zhang and Yongbin Ou",
title = "Clustering, community partition and disjoint spanning
trees",
journal = j-TALG,
volume = "4",
number = "3",
pages = "35:1--35:??",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1367064.1367075",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:06 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Clustering method is one of the most important tools
in statistics. In a graph theory model, clustering is
the process of finding all dense subgraphs. A
mathematically well-defined measure for graph density
is introduced in this article as follows. Let {$ G =
(V, E) $} be a graph (or multi-graph) and {$H$} be a
subgraph of {$G$}. The dynamic density of {$H$} is the
greatest integer {$k$} such that {$ \min_\forall P \{ |
E (H / P)| / | V (H / P)| - 1 \} > k $} where the
minimum is taken over all possible partitions {$P$} of
the vertex set of {$H$}, and {$ H / P $} is the graph
obtained from {$H$} by contracting each part of {$P$}
into a single vertex. A subgraph {$H$} of {$G$} is a
level-{$k$} community if {$H$} is a maximal subgraph of
{$G$} with dynamic density at least {$k$}. An algorithm
is designed in this paper to detect all level-{$h$}
communities of an input multi-graph {$G$}. The
worst-case complexity of this algorithm is upper
bounded by {$ O(|V(G)|^2 h^2) $}. This new method is
one of few available clustering methods that are
mathematically well-defined, supported by rigorous
mathematical proof and able to achieve the optimization
goal with polynomial complexity. As a byproduct, this
algorithm also can be applied for finding edge-disjoint
spanning trees of a multi-graph. The worst-case
complexity is lower than all known algorithms for
multi-graphs.",
acknowledgement = ack-nhfb,
articleno = "35",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "clustering; community; dense subgraph; dynamic
density; hierarchical clustering; polynomial algorithm;
Spanning trees",
}
@Article{Yu:2008:IAM,
author = "Hung-I. Yu and Tzu-Chin Lin and Biing-Feng Wang",
title = "Improved algorithms for the minmax-regret 1-center and
1-median problems",
journal = j-TALG,
volume = "4",
number = "3",
pages = "36:1--36:??",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1367064.1367076",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:06 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article, efficient algorithms are presented
for the minmax-regret 1-center and 1-median problems on
a general graph and a tree with uncertain vertex
weights. For the minmax-regret 1-center problem on a
general graph, we improve the previous upper bound from
{$ O(m n^2 \log n) $} to {$ O(m n \log n) $}. For the
problem on a tree, we improve the upper bound from {$
O(n^2) $} to {$ O(n \log^2 n) $}. For the minmax-regret
1-median problem on a general graph, we improve the
upper bound from {$ O(m n^2 \log n) $} to {$ O(m n^2 +
n^3 \log n) $}. For the problem on a tree, we improve
the upper bound from {$ O(n \log^2 n) $} to {$ O(n \log
n) $}.",
acknowledgement = ack-nhfb,
articleno = "36",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "centers; general graphs; Location theory; medians;
minmax-regret optimization; trees",
}
@Article{Abraham:2008:CNI,
author = "Ittai Abraham and Cyril Gavoille and Dahlia Malkhi and
Noam Nisan and Mikkel Thorup",
title = "Compact name-independent routing with minimum
stretch",
journal = j-TALG,
volume = "4",
number = "3",
pages = "37:1--37:??",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1367064.1367077",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:06 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given a weighted undirected network with arbitrary
node names, we present a compact routing scheme, using
a {$ \tilde {O}(\sqrt n) $} space routing table at each
node, and routing along paths of stretch 3, that is, at
most thrice as long as the minimum cost paths. This is
optimal in a very strong sense. It is known that no
compact routing using {$ o(n) $} space per node can
route with stretch below 3. Also, it is known that any
stretch below 5 requires {$ \Omega (\sqrt n) $} space
per node.",
acknowledgement = ack-nhfb,
articleno = "37",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Compact routing",
}
@Article{Pruhs:2008:GBR,
author = "Kirk Pruhs and Patchrawat Uthaisombut and Gerhard
Woeginger",
title = "Getting the best response for your erg",
journal = j-TALG,
volume = "4",
number = "3",
pages = "38:1--38:??",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1367064.1367078",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:06 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the speed scaling problem of minimizing
the average response time of a collection of
dynamically released jobs subject to a constraint {$A$}
on energy used. We propose an algorithmic approach in
which an energy optimal schedule is computed for a huge
{$A$}, and then the energy optimal schedule is
maintained as {$A$} decreases. We show that this
approach yields an efficient algorithm for equi-work
jobs. We note that the energy optimal schedule has the
surprising feature that the job speeds are not monotone
functions of the available energy. We then explain why
this algorithmic approach is problematic for arbitrary
work jobs. Finally, we explain how to use the algorithm
for equi-work jobs to obtain an algorithm for arbitrary
work jobs that is {$ O(1) $}-approximate with respect
to average response time, given an additional factor of
{$ (1 + \epsilon) $} energy.",
acknowledgement = ack-nhfb,
articleno = "38",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "frequency scaling; power management; scheduling; Speed
scaling; voltage scaling",
}
@Article{Ajwani:2008:AIT,
author = "Deepak Ajwani and Tobias Friedrich and Ulrich Meyer",
title = "An {$ O(n^{2.75}) $} algorithm for incremental
topological ordering",
journal = j-TALG,
volume = "4",
number = "4",
pages = "39:1--39:??",
month = aug,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1383369.1383370",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:43 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present a simple algorithm which maintains the
topological order of a directed acyclic graph (DAG)
with $n$ nodes, under an online edge insertion
sequence, in {$ O(n^{2.75}) $} time, independent of the
number {$m$} of edges inserted. For dense DAGs, this is
an improvement over the previous best result of {$
O(\min m^{3 / 2} \log n, m^{3 / 2} + n^2 \log n) $} by
Katriel and Bodlaender [2006]. We also provide an
empirical comparison of our algorithm with other
algorithms for incremental topological sorting.",
acknowledgement = ack-nhfb,
articleno = "39",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Dynamic algorithms; graphs; online algorithms;
topological order",
}
@Article{Ibarra:2008:FDA,
author = "Louis Ibarra",
title = "Fully dynamic algorithms for chordal graphs and split
graphs",
journal = j-TALG,
volume = "4",
number = "4",
pages = "40:1--40:??",
month = aug,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1383369.1383371",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:43 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present the first dynamic algorithm that maintains
a clique tree representation of a chordal graph and
supports the following operations: (1) query whether
deleting or inserting an arbitrary edge preserves
chordality; and (2) delete or insert an arbitrary edge,
provided it preserves chordality. We give two
implementations. In the first, each operation runs in
{$ O(n) $} time, where {$n$} is the number of vertices.
In the second, an insertion query runs in {$ O(\log^2
n) $} time, an insertion in {$ O(n) $} time, a deletion
query in {$ O(n) $} time, and a deletion in {$ O(n \log
n) $} time. We also present a data structure that
allows a deletion query to run in {$ O(\sqrt m) $} time
in either implementation, where {$m$} is the current
number of edges. Updating this data structure after a
deletion or insertion requires {$ O(m) $} time.\par
We also present a very simple dynamic algorithm that
supports each of the following operations in {$ O(1) $}
time on a general graph: (1) query whether the graph is
split, and (2) delete or insert an arbitrary edge.",
acknowledgement = ack-nhfb,
articleno = "40",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "chordal graphs; clique trees; Dynamic graph
algorithms; split graphs",
}
@Article{Korman:2008:DRS,
author = "Amos Korman and David Peleg",
title = "Dynamic routing schemes for graphs with low local
density",
journal = j-TALG,
volume = "4",
number = "4",
pages = "41:1--41:??",
month = aug,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1383369.1383372",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:43 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "This article studies approximate distributed routing
schemes on dynamic communication networks. The work
focuses on dynamic weighted general graphs where the
vertices of the graph are fixed, but the weights of the
edges may change. Our main contribution concerns
bounding the cost of adapting to dynamic changes. The
update efficiency of a routing scheme is measured by
the time needed in order to update the routing scheme
following a weight change. A naive dynamic routing
scheme, which updates all vertices following a weight
change, requires {$ \Omega (\hbox {\em Diam \/ }) $}
time in order to perform the updates after every weight
change, where {\em Diam\/} is the diameter of the
underlying graph. In contrast, this article presents
approximate dynamic routing schemes with average time
complexity {$ \tilde {\Theta }(d) $} per topological
change, where {$d$} is the local density parameter of
the underlying graph. Following a weight change, our
scheme never incurs more than {\em Diam\/} time; thus,
our scheme is particularly efficient on graphs which
have low local density and large diameter. The article
also establishes upper and lower bounds on the size of
the databases required by the scheme at each site.",
acknowledgement = ack-nhfb,
articleno = "41",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "distributed algorithms; dynamic networks; Routing
schemes",
}
@Article{Cohen:2008:LGG,
author = "Reuven Cohen and Pierre Fraigniaud and David Ilcinkas
and Amos Korman and David Peleg",
title = "Label-guided graph exploration by a finite automaton",
journal = j-TALG,
volume = "4",
number = "4",
pages = "42:1--42:??",
month = aug,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1383369.1383373",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:43 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A finite automaton, simply referred to as a {\em
robot}, has to explore a graph, that is, visit all the
nodes of the graph. The robot has no a priori knowledge
of the topology of the graph, nor of its size. It is
known that for any $k$-state robot, there exists a
graph of maximum degree 3 that the robot cannot
explore. This article considers the effects of allowing
the system designer to add short labels to the graph
nodes in a preprocessing stage, for helping the
exploration by the robot. We describe an exploration
algorithm that, given appropriate 2-bit labels (in
fact, only 3-valued labels), allows a robot to explore
all graphs. Furthermore, we describe a suitable
labeling algorithm for generating the required labels
in linear time. We also show how to modify our labeling
scheme so that a robot can explore all graphs of
bounded degree, given appropriate 1-bit labels. In
other words, although there is no robot able to explore
all graphs of maximum degree 3, there is a robot {$R$},
and a way to color in black or white the nodes of any
bounded-degree graph {$G$}, so that {$R$} can explore
the colored graph {$G$}. Finally, we give impossibility
results regarding graph exploration by a robot with no
internal memory (i.e., a single-state automaton).",
acknowledgement = ack-nhfb,
articleno = "42",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Distributed algorithms; graph exploration; labeling
schemes",
}
@Article{Suzuki:2008:DSP,
author = "Akiko Suzuki and Takeshi Tokuyama",
title = "Dense subgraph problems with output-density
conditions",
journal = j-TALG,
volume = "4",
number = "4",
pages = "43:1--43:??",
month = aug,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1383369.1383374",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:43 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the dense subgraph problem that extracts a
subgraph, with a prescribed number of vertices, having
the maximum number of edges (or total edge weight, in
the weighted case) in a given graph. We give
approximation algorithms with improved theoretical
approximation ratios assuming that the density of the
optimal output subgraph is high, where density is the
ratio of number of edges (or sum of edge weights) to
the number of edges in the clique on the same number of
vertices. Moreover, we investigate the case where the
input graph is bipartite and design a randomized
pseudopolynomial time approximation scheme that can
become a randomized PTAS, even if the size of the
optimal output graph is comparatively small. This is a
significant improvement in a theoretical sense, since
no constant-ratio approximation algorithm was known
previously if the output graph has o(n) vertices.",
acknowledgement = ack-nhfb,
articleno = "43",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "approximation algorithms; Combinatorial optimization;
dense subgraph; randomized algorithms",
}
@Article{Bar-Noy:2008:DCF,
author = "Amotz Bar-Noy and Panagiotis Cheilaris and Shakhar
Smorodinsky",
title = "Deterministic conflict-free coloring for intervals:
{From} offline to online",
journal = j-TALG,
volume = "4",
number = "4",
pages = "44:1--44:18",
month = aug,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1383369.1383375",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:43 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We investigate deterministic algorithms for a
frequency assignment problem in cellular networks. The
problem can be modeled as a special vertex coloring
problem for hypergraphs: In every hyperedge there must
exist a vertex with a color that occurs exactly once in
the hyperedge (the conflict-free property). We
concentrate on a special case of the problem, called
conflict-free coloring for intervals. We introduce a
hierarchy of four models for the aforesaid problem: (i)
static, (ii) dynamic offline, (iii) dynamic online with
absolute positions, and (iv) dynamic online with
relative positions. In the dynamic offline model, we
give a deterministic algorithm that uses at most $
\log_{3 / 2} n + 1 \approx 1.71 \log_2 n $ colors and
show inputs that force any algorithm to use at least $
3 \log_5 n + 1 \approx 1.29 \log_2 n $ colors. For the
online absolute-positions model, we give a
deterministic algorithm that uses at most $ 3 \lceil
\log_3 n \rceil \approx 1.89 \log_2 n $ colors. To the
best of our knowledge, this is the first deterministic
online algorithm using {$ O(\log n) $} colors in a
nontrivial online model. In the online
relative-positions model, we resolve an open problem by
showing a tight analysis on the number of colors used
by the first-fit greedy online algorithm. We also
consider conflict-free coloring only with respect to
intervals that contain at least one of the two extreme
points.",
acknowledgement = ack-nhfb,
articleno = "44",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "cellular networks; coloring; conflict free; frequency
assignment; Online algorithms",
}
@Article{Chandran:2008:IAO,
author = "Nishanth Chandran and Ryan Moriarty and Rafail
Ostrovsky and Omkant Pandey and Mohammad Ali Safari and
Amit Sahai",
title = "Improved algorithms for optimal embeddings",
journal = j-TALG,
volume = "4",
number = "4",
pages = "45:1--45:14",
month = aug,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1383369.1383376",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:43 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In the last decade, the notion of metric embeddings
with small distortion has received wide attention in
the literature, with applications in combinatorial
optimization, discrete mathematics, and
bio-informatics. The notion of embedding is, given two
metric spaces on the same number of points, to find a
bijection that minimizes maximum Lipschitz and
bi-Lipschitz constants. One reason for the popularity
of the notion is that algorithms designed for one
metric space can be applied to a different one, given
an embedding with small distortion. The better
distortion, the better the effectiveness of the
original algorithm applied to a new metric space.\par
The goal recently studied by Kenyon et al. [2004] is to
consider all possible embeddings between two {\em
finite\/} metric spaces and to find the best possible
one; that is, consider a single objective function over
the space of all possible embeddings that minimizes the
distortion. In this article we continue this important
direction. In particular, using a theorem of Albert and
Atkinson [2005], we are able to provide an algorithm to
find the optimal bijection between two line metrics,
provided that the optimal distortion is smaller than
13.602. This improves the previous bound of $ 3 + 2
\sqrt {2} $, solving an open question posed by Kenyon
et al. [2004]. Further, we show an inherent limitation
of algorithms using the ``forbidden pattern'' based
dynamic programming approach, in that they cannot find
optimal mapping if the optimal distortion is more than
$ 7 + 4 \sqrt {3} (\simeq 13.928) $. Thus, our results
are almost optimal for this method. We also show that
previous techniques for general embeddings apply to a
(slightly) more general class of metrics.",
acknowledgement = ack-nhfb,
articleno = "45",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "dynamic programming; forbidden patterns; line
embeddings; metric spaces; Optimal metric embeddings;
shape matching",
}
@Article{Alon:2008:OEM,
author = "Noga Alon and Mihai B{\~a}doiu and Erik D. Demaine and
Martin Farach-Colton and Mohammadtaghi Hajiaghayi and
Anastasios Sidiropoulos",
title = "Ordinal embeddings of minimum relaxation: {General}
properties, trees, and ultrametrics",
journal = j-TALG,
volume = "4",
number = "4",
pages = "46:1--46:??",
month = aug,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1383369.1383377",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:43 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We introduce a new notion of embedding, called {\em
minimum-relaxation ordinal embedding}, parallel to the
standard notion of minimum-distortion (metric)
embedding. In an ordinal embedding, it is the relative
order between pairs of distances, and not the distances
themselves, that must be preserved as much as possible.
The (multiplicative) relaxation of an ordinal embedding
is the maximum ratio between two distances whose
relative order is inverted by the embedding. We develop
several worst-case bounds and approximation algorithms
on ordinal embedding. In particular, we establish that
ordinal embedding has many qualitative differences from
metric embedding, and we capture the ordinal behavior
of ultrametrics and shortest-path metrics of unweighted
trees.",
acknowledgement = ack-nhfb,
articleno = "46",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "distortion; Metrics; ordinal embedding; relaxation",
}
@Article{Blaser:2008:NAA,
author = "Markus Bl{\"a}ser",
title = "A new approximation algorithm for the asymmetric {TSP}
with triangle inequality",
journal = j-TALG,
volume = "4",
number = "4",
pages = "47:1--47:??",
month = aug,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1383369.1383378",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:43 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present a polynomial time factor $ 0.999 \cdot \log
n $ approximation algorithm for the asymmetric
traveling salesperson problem with triangle
inequality.",
acknowledgement = ack-nhfb,
articleno = "47",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithm; cycle cover; traveling
salesman problem; TSP",
}
@Article{Boyar:2008:RWO,
author = "Joan Boyar and Paul Medvedev",
title = "The relative worst order ratio applied to seat
reservation",
journal = j-TALG,
volume = "4",
number = "4",
pages = "48:1--48:??",
month = aug,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1383369.1383379",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:43 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The seat reservation problem is the problem of
assigning passengers to seats on a train with $n$ seats
and $k$ stations enroute in an online manner. The
performance of algorithms for this problem is studied
using the relative worst order ratio, a fairly new
measure for the quality of online algorithms, which
allows for direct comparisons between algorithms. This
study has yielded new separations between algorithms.
For example, for both variants of the problem
considered, using the relative worst order ratio,
First-Fit and Best-Fit are shown to be better than
Worst-Fit.",
acknowledgement = ack-nhfb,
articleno = "48",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Online; quality measure; relative worst order ratio;
seat reservation",
}
@Article{Nieberg:2008:ASW,
author = "Tim Nieberg and Johann Hurink and Walter Kern",
title = "Approximation schemes for wireless networks",
journal = j-TALG,
volume = "4",
number = "4",
pages = "49:1--49:??",
month = aug,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1383369.1383380",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:43 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Wireless networks are created by the communication
links between a collection of radio transceivers. The
nature of wireless transmissions does not lead to
arbitrary undirected graphs but to structured graphs
which we characterize by the polynomially bounded
growth property. In contrast to many existing graph
models for wireless networks, the property of
polynomially bounded growth is defined independently of
geometric data such as positional information.\par
On such wireless networks, we present an approach that
can be used to create polynomial-time approximation
schemes for several optimization problems called the
local neighborhood-based scheme. We apply this approach
to the problems of seeking maximum (weight) independent
sets and minimum dominating sets. These are two
important problems in the area of wireless
communication networks and are also used in many
applications ranging from clustering to routing
strategies. However, the approach is presented in a
general fashion since it can be applied to other
problems as well.\par
The approach for the approximation schemes is robust in
the sense that it accepts any undirected graph as input
and either outputs a solution of desired quality or
correctly asserts that the graph presented as input
does not satisfy the structural assumption of a
wireless network (an NP-hard problem).",
acknowledgement = ack-nhfb,
articleno = "49",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "bounded growth; maximum independent set; minimum
dominating set; PTAS; Wireless ad-hoc networks",
}
@Article{Massberg:2008:AAF,
author = "Jens Ma{\ss}berg and Jens Vygen",
title = "Approximation algorithms for a facility location
problem with service capacities",
journal = j-TALG,
volume = "4",
number = "4",
pages = "50:1--50:15",
month = aug,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1383369.1383381",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:43 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present the first constant-factor approximation
algorithms for the following problem. Given a metric
space {$ (V, c) $}, a finite set {$ d \subseteq V $} of
terminals\slash customers with demands {$ d : d
\rightarrow \mathbb {R}_+ $}, a facility opening cost
{$ f \in \mathbb {R}_+ $} and a capacity {$ u \in
\mathbb {R}_+ $}, find a partition {$ d = D_1 \dot
{\cup } \cdots {} \dot {\cup } D_k $} and Steiner trees
{$ T_i $} for {$ D_i (i = 1, \ldots {}, k) $} with {$
c(E(T_i)) + d(D_i) \leq u $} for {$ i = 1, \ldots {}, k
$} such that {$ \sum_{i = 1}^k c(E(T_i)) + k f $} is
minimum. This problem arises in VLSI design. It
generalizes the bin-packing problem and the Steiner
tree problem. In contrast to other network design and
facility location problems, it has the additional
feature of upper bounds on the service cost that each
facility can handle. Among other results, we obtain a
4.1-approximation in polynomial time, a
4.5-approximation in cubic time, and a 5-approximation
as fast as computing a minimum spanning tree on {$ (D,
c) $}.",
acknowledgement = ack-nhfb,
articleno = "50",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithm; facility location; network
design; VLSI design",
}
@Article{Swamy:2008:FTF,
author = "Chaitanya Swamy and David B. Shmoys",
title = "Fault-tolerant facility location",
journal = j-TALG,
volume = "4",
number = "4",
pages = "51:1--51:??",
month = aug,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1383369.1383382",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:43 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider a fault-tolerant generalization of the
classical uncapacitated facility location problem,
where each client $j$ has a requirement that $ r_j $
{\em distinct\/} facilities serve it, instead of just
one. We give a 2.076-approximation algorithm for this
problem using LP rounding, which is currently the
best-known performance guarantee. Our algorithm
exploits primal and dual complementary slackness
conditions and is based on {\em clustered randomized
rounding}. A technical difficulty that we overcome is
the presence of terms with negative coefficients in the
dual objective function, which makes it difficult to
bound the cost in terms of dual variables. For the case
where all requirements are the same, we give a
primal-dual 1.52-approximation algorithm.\par
We also consider a fault-tolerant version of the
$k$-median problem. In the metric $k$-median problem,
we are given $n$ points in a metric space. We must
select $k$ of these to be centers, and then assign each
input point $j$ to the selected center that is closest
to it. In the fault-tolerant version we want $j$ to be
assigned to $ r_j $ distinct centers. The goal is to
select the $k$ centers so as to minimize the sum of
assignment costs. The primal-dual algorithm for
fault-tolerant facility location with uniform
requirements also yields a 4-approximation algorithm
for the fault-tolerant $k$-median problem for this
case. This the first constant-factor approximation
algorithm for the uniform requirements case.",
acknowledgement = ack-nhfb,
articleno = "51",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithms; facility location; k-median
problem",
}
@Article{Fotakis:2008:ACG,
author = "Dimitris Fotakis and Spyros Kontogiannis and Paul
Spirakis",
title = "Atomic congestion games among coalitions",
journal = j-TALG,
volume = "4",
number = "4",
pages = "52:1--52:??",
month = aug,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1383369.1383383",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:03:43 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider algorithmic questions concerning the
existence, tractability, and quality of Nash
equilibria, in atomic congestion games among users
participating in selfish coalitions.\par
We introduce a coalitional congestion model among
atomic players and demonstrate many interesting
similarities with the noncooperative case. For example,
there exists a potential function proving the existence
of pure Nash equilibria (PNE) in the unrelated parallel
links setting; in the network setting, the finite
improvement property collapses as soon as we depart
from linear delays, but there is an exact potential
(and thus PNE) for linear delays. The price of anarchy
on identical parallel links demonstrates a quite
surprising threshold behavior: It persists on being
asymptotically equal to that in the case of the
noncooperative KP-model, unless the number of
coalitions is {\em sublogarithmic}.\par
We also show crucial differences, mainly concerning the
hardness of algorithmic problems that are solved
efficiently in the noncooperative case. Although we
demonstrate convergence to robust PNE, we also prove
the hardness of computing them. On the other hand, we
propose a generalized fully mixed Nash equilibrium that
can be efficiently constructed in most cases. Finally,
we propose a natural improvement policy and prove its
convergence in pseudopolynomial time to PNE which are
robust against (even dynamically forming) coalitions of
small size.",
acknowledgement = ack-nhfb,
articleno = "52",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Algorithmic game theory; congestion games; convergence
to equilibria; price of anarchy",
}
@Article{Torng:2008:SOU,
author = "Eric Torng and Jason McCullough",
title = "{SRPT} optimally utilizes faster machines to minimize
flow time",
journal = j-TALG,
volume = "5",
number = "1",
pages = "1:1--1:??",
month = nov,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1435375.1435376",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:04:20 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We analyze the shortest remaining processing time
(SRPT) algorithm with respect to the problem of
scheduling $n$ jobs with release times on $m$ identical
machines to minimize total flow time. It is known that
SRPT is optimal if $ m = 1$ but that SRPT has a
worst-case approximation ratio of $ \Theta (\min (\log
n / m, \log \Delta)) $ for this problem, where $ \Delta
$ is the ratio of the length of the longest job divided
by the length of the shortest job. It has previously
been shown that SRPT is able to use faster machines to
produce a schedule {\em as good as\/} an optimal
algorithm using slower machines. We now show that SRPT
{\em optimally\/} uses these faster machines with
respect to the worst-case approximation ratio. That is,
if SRPT is given machines that are $ s \geq 2 - 1 / m $
times as fast as those used by an optimal algorithm,
SRPT's flow time is at least $s$ {\em times smaller\/}
than the flow time incurred by the optimal algorithm.
Clearly, no algorithm can offer a better worst-case
guarantee, and we show that existing algorithms with
similar performance guarantees to SRPT without resource
augmentation do not optimally use extra resources.",
acknowledgement = ack-nhfb,
articleno = "1",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "flow time; parallel machines; resource augmentation;
scheduling; SRPT",
}
@Article{Goldwasser:2008:ONS,
author = "Michael H. Goldwasser and Mark Pedigo",
title = "Online nonpreemptive scheduling of equal-length jobs
on two identical machines",
journal = j-TALG,
volume = "5",
number = "1",
pages = "2:1--2:18",
month = nov,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1435375.1435377",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:04:20 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the nonpreemptive scheduling of two
identical machines for jobs with equal processing times
yet arbitrary release dates and deadlines. Our
objective is to maximize the number of jobs completed
by their deadlines. Using standard nomenclature, this
problem is denoted as {$ P 2 \mid p_j = p, 4_j \mid
\sum {\bar {U}}_j $}. The problem is known to be
polynomially solvable in an offline setting.\par
In an online variant of the problem, a job's existence
and parameters are revealed to the scheduler only upon
that job's release date. We present an online
deterministic algorithm for the problem and prove that
it is {$ 3 / 2 $}-competitive. A simple lower bound
shows that this is the optimal deterministic
competitiveness.",
acknowledgement = ack-nhfb,
articleno = "2",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Admission control; competitive analysis; scheduling",
}
@Article{Aiello:2008:CBM,
author = "William Aiello and Alex Kesselman and Yishay Mansour",
title = "Competitive buffer management for shared-memory
switches",
journal = j-TALG,
volume = "5",
number = "1",
pages = "3:1--3:??",
month = nov,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1435375.1435378",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:04:20 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider buffer management policies for shared
memory switches. We study the case of overloads
resulting in packet loss, where the constraint is the
limited shared memory capacity. The goal of the buffer
management policy is that of maximizing the number of
packets transmitted. The problem is online in nature,
and thus we use competitive analysis to measure the
performance of the buffer management policies. Our main
result is to show that the well-known preemptive
Longest Queue Drop ({\em LQD\/}) policy is at most
2-competitive and at least $ \sqrt 2 $-competitive. We
also demonstrate a general lower bound of $ 4 / 3 $ on
the performance of any deterministic online policy.
Finally, we consider some other popular non-preemptive
policies including Complete Partition, Complete
Sharing, Static Threshold and Dynamic Threshold and
derive almost tight bounds on their performance.",
acknowledgement = ack-nhfb,
articleno = "3",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Buffer management; competitive analysis; shared
memory",
}
@Article{Agarwal:2008:KDD,
author = "Pankaj K. Agarwal and Haim Kaplan and Micha Sharir",
title = "Kinetic and dynamic data structures for closest pair
and all nearest neighbors",
journal = j-TALG,
volume = "5",
number = "1",
pages = "4:1--4:??",
month = nov,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1435375.1435379",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:04:20 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present simple, fully dynamic and kinetic data
structures, which are variants of a dynamic
two-dimensional range tree, for maintaining the closest
pair and all nearest neighbors for a set of $n$ moving
points in the plane; insertions and deletions of points
are also allowed. If no insertions or deletions take
place, the structure for the closest pair uses {$ O(n
\log n) $} space, and processes {$ O(n^2 \beta_+ 2 (n)
\log n) $} critical events, each in {$ O(\log^2 n) $}
time. Here {$s$} is the maximum number of times where
the distances between any two specific pairs of points
can become equal, {$ \beta_s(q) = \lambda_s(q) / q $},
and {$ \lambda_s(q) $} is the maximum length of
Davenport--Schinzel sequences of order $s$ on $q$
symbols. The dynamic version of the problem incurs a
slight degradation in performance: If $ m \geq n $
insertions and deletions are performed, the structure
still uses {$ O(n \log n) $} space, and processes {$
O(m n \beta_s + 2 (n) \log^3 n) $} events, each in {$
O(\log^3 n) $} time.\par
Our kinetic data structure for all nearest neighbors
uses {$ O(n \log^2 n) $} space, and processes {$ O(n^2
\beta^{2_s + 2}(n) \log^3 n) $} critical events. The
expected time to process all events is {$ O(n^2
\beta_{s + 2}^2 (n) \log^4 n) $}, though processing a
single event may take {$ \Theta (n) $} expected time in
the worst case. If {$ m \geq n $} insertions and
deletions are performed, then the expected number of
events is {$ O(m n \beta^2_{s + 2}(n) \log^3 n) $} and
processing them all takes {$ O(m n \beta^2_{s + 2} (n)
\log^4 n) $}. An insertion or deletion takes {$ O(n) $}
expected time.",
acknowledgement = ack-nhfb,
articleno = "4",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "closest pair; computational geometry; Kinetic data
structures; nearest neighbors",
}
@Article{Agarwal:2008:ACT,
author = "Pankaj K. Agarwal and Micha Sharir and Emo Welzl",
title = "Algorithms for center and {Tverberg} points",
journal = j-TALG,
volume = "5",
number = "1",
pages = "5:1--5:??",
month = nov,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1435375.1435380",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:04:20 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given a set $s$ of $n$ points in {$ R^3 $}, a point
{$x$} in {$ R^3 $} is called {\em center point of $S$
\/} if every closed halfspace whose bounding hyperplane
passes through {$x$} contains at least {$ \lceil n / 4
\rceil $} points from {$S$}. We present a
near-quadratic algorithm for computing the {\em center
region}, that is the set of all center points, of a set
of {$n$} points in {$ R^3 $}. This is nearly tight in
the worst case since the center region can have {$
\Omega (n^2) $} complexity.\par
We then consider sets {$s$} of {$ 3 n $} points in the
plane which are the union of three disjoint sets
consisting respectively of {$n$} red, $n$ blue, and $n$
green points. A point $x$ in {$ R^2 $} is called a {\em
colored Tverberg point of $S$ \/} if there is a
partition of {$s$} into {$n$} triples with one point of
each color, so that {$x$} lies in all triangles spanned
by these triples. We present a first polynomial-time
algorithm for recognizing whether a given point is a
colored Tverberg point of such a 3-colored set {$S$}.",
acknowledgement = ack-nhfb,
articleno = "5",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Arrangements; center point; Tverberg point",
}
@Article{Grandoni:2008:DWV,
author = "Fabrizio Grandoni and Jochen K{\"o}nemann and
Alessandro Panconesi",
title = "Distributed weighted vertex cover via maximal
matchings",
journal = j-TALG,
volume = "5",
number = "1",
pages = "6:1--6:12",
month = nov,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1435375.1435381",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:04:20 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article, we consider the problem of computing
a minimum-weight vertex-cover in an $n$-node, weighted,
undirected graph {$ G = (V, E) $}. We present a fully
distributed algorithm for computing vertex covers of
weight at most twice the optimum, in the case of
integer weights. Our algorithm runs in an expected
number of {$ O(\log n + \log \hat {W}) $} communication
rounds, where {$ \hat {W} $} is the average
vertex-weight. The previous best algorithm for this
problem requires {$ O(\log n (\log n + \log \hat {W}))
$} rounds and it is not fully distributed.\par
For a maximal matching {$m$} in {$G$}, it is a
well-known fact that any vertex-cover in {$G$} needs to
have at least {$ |m| $} vertices. Our algorithm is
based on a generalization of this combinatorial
lower-bound to the weighted setting.",
acknowledgement = ack-nhfb,
articleno = "6",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithms; distributed algorithms;
maximal matching; vertex cover",
}
@Article{Vishwanathan:2008:HIA,
author = "Sundar Vishwanathan",
title = "On hard instances of approximate vertex cover",
journal = j-TALG,
volume = "5",
number = "1",
pages = "7:1--7:??",
month = nov,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1435375.1435382",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:04:20 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We show that if there is a $ 2 - \epsilon $
approximation algorithm for vertex cover on graphs with
vector chromatic number at most $ 2 + \delta $, then
there is a $ 2 - f(\epsilon, \delta) $ approximation
algorithm for vertex cover for all graphs.",
acknowledgement = ack-nhfb,
articleno = "7",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithms; vertex cover",
}
@Article{Berend:2008:CDG,
author = "Daniel Berend and Steven S. Skiena and Yochai Twitto",
title = "Combinatorial dominance guarantees for problems with
infeasible solutions",
journal = j-TALG,
volume = "5",
number = "1",
pages = "8:1--8:??",
month = nov,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1435375.1435383",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:04:20 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The design and analysis of approximation algorithms
for {\em NP\/}-hard problems is perhaps the most active
research area in the theory of combinatorial
algorithms. In this article, we study the notion of a
{\em combinatorial dominance guarantee\/} as a way for
assessing the performance of a given approximation
algorithm. An $ f(n) $ dominance bound is a guarantee
that the heuristic always returns a solution not worse
than at least $ f(n) $ solutions. We give tight
analysis of many heuristics, and establish novel and
interesting dominance guarantees even for certain
inapproximable problems and heuristic search
algorithms. For example, we show that the maximal
matching heuristic of VERTEX COVER offers a
combinatorial dominance guarantee of $ 2^n - (1.839 +
o(1))^n $. We also give inapproximability results for
most of the problems we discuss.",
acknowledgement = ack-nhfb,
articleno = "8",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "algorithms analysis; approximation algorithms;
Computation complexity; dominance analysis",
}
@Article{Fomin:2008:CBM,
author = "Fedor V. Fomin and Fabrizio Grandoni and Artem V.
Pyatkin and Alexey A. Stepanov",
title = "Combinatorial bounds via measure and conquer:
{Bounding} minimal dominating sets and applications",
journal = j-TALG,
volume = "5",
number = "1",
pages = "9:1--9:??",
month = nov,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1435375.1435384",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:04:20 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We provide an algorithm listing all minimal dominating
sets of a graph on $n$ vertices in time {$ O(1.7159^n)
$}. This result can be seen as an algorithmic proof of
the fact that the number of minimal dominating sets in
a graph on {$n$} vertices is at most {$ 1.7159^n $},
thus improving on the trivial {$ O(2^n / \sqrt n) $}
bound. Our result makes use of the measure-and-conquer
technique which was recently developed in the area of
exact algorithms.\par
Based on this result, we derive an {$ O(2.8718^n) $}
algorithm for the domatic number problem.",
acknowledgement = ack-nhfb,
articleno = "9",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "domatic number; Exact exponential algorithms; listing
algorithms; measure and conquer; minimum dominating
set; minimum set cover",
}
@Article{Oum:2008:ARW,
author = "Sang-Il Oum",
title = "Approximating rank-width and clique-width quickly",
journal = j-TALG,
volume = "5",
number = "1",
pages = "10:1--10:??",
month = nov,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1435375.1435385",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:04:20 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Rank-width was defined by Oum and Seymour [2006] to
investigate clique-width. They constructed an algorithm
that either outputs a rank-decomposition of width at
most $ f(k) $ for some function f or confirms that
rank-width is larger than $k$ in time {$ O(|V|^9 \log
|V|) $} for an input graph {$ G = (V, E) $} and a fixed
{$k$}. We develop three separate algorithms of this
kind with faster running time. We construct an {$
O(|V|^4) $}-time algorithm with {$ f(k) = 3 k + 1 $} by
constructing a subroutine for the previous algorithm;
we avoid generic algorithms minimizing submodular
functions used by Oum and Seymour. Another one is an {$
O(|V|^3) $}-time algorithm with {$ f(k) = 24 k $},
achieved by giving a reduction from graphs to binary
matroids; then we use an approximation algorithm for
matroid branch-width by Hlin{\^e}n{\'y} [2005]. Finally
we construct an {$ O(|V|^3) $}-time algorithm with {$
f(k) = 3 k - 1 $} by combining the ideas of the two
previously cited papers.",
acknowledgement = ack-nhfb,
articleno = "10",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithms; branch-width; clique-width;
matroids; rank-width",
}
@Article{Brandstadt:2008:SLT,
author = "Andreas Brandst{\"a}dt and Van Bang Le and R.
Sritharan",
title = "Structure and linear-time recognition of 4-leaf
powers",
journal = j-TALG,
volume = "5",
number = "1",
pages = "11:1--11:??",
month = nov,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1435375.1435386",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:04:20 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A graph {$G$} is the {$k$}-{\em leaf power\/} of a
tree {$T$} if its vertices are leaves of {$T$} such
that two vertices are adjacent in {$G$} if and only if
their distance in {$T$} is at most {$k$}. Then {$T$} is
a {$k$}-{\em leaf root\/} of {$G$}. This notion was
introduced and studied by Nishimura, Ragde, and
Thilikos [2002], motivated by the search for underlying
phylogenetic trees. Their results imply an {$ O(n^3)
$}-time recognition algorithm for 4-leaf powers.
Recently, Rautenbach [2006] as well as Dom et al.
[2005] characterized 4-leaf powers without true twins
in terms of forbidden subgraphs. We give new
characterizations for 4-leaf powers and squares of
trees by a complete structural analysis. As a
consequence, we obtain a conceptually simple
linear-time recognition of 4-leaf powers.",
acknowledgement = ack-nhfb,
articleno = "11",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Graph powers; leaf powers; phylogenetic trees; squares
of trees; trees",
}
@Article{Chen:2008:MCI,
author = "Xin Chen and Lan Liu and Zheng Liu and Tao Jiang",
title = "On the minimum common integer partition problem",
journal = j-TALG,
volume = "5",
number = "1",
pages = "12:1--12:??",
month = nov,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1435375.1435387",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:04:20 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We introduce a new combinatorial optimization problem
in this article, called the {\em minimum common integer
partition\/} (MCIP) problem, which was inspired by
computational biology applications including ortholog
assignment and DNA fingerprint assembly. A {\em
partition\/} of a positive integer $n$ is a multiset of
positive integers that add up to exactly $n$, and an
{\em integer partition\/} of a multiset $s$ of integers
is defined as the multiset union of partitions of
integers in {$S$}. Given a sequence of multisets {$
s_1, s_2, \ldots, S_k $} of integers, where {$ k \geq 2
$}, we say that a multiset is a {\em common integer
partition\/} if it is an integer partition of every
multiset {$ S_i, 1 \leq i \leq k $}. The MCIP problem
is thus defined as to find a common integer partition
of {$ s_1, s_2, \ldots, S_k $} with the minimum
cardinality, denoted as MCIP({$ s_1 $}, {$ S_2 $},
\ldots {}, {$ S_k $}). It is easy to see that the MCIP
problem is NP-hard, since it generalizes the well-known
subset sum problem. We can in fact show that it is
APX-hard. We will also present a {$ 5 / 4
$}-approximation algorithm for the MCIP problem when {$
k = 2 $}, and a {$ 3 k (k - 1) / 3 k - 2
$}-approximation algorithm for $ k \geq 3 $.",
acknowledgement = ack-nhfb,
articleno = "12",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "approximation algorithm; combinatorial optimization;
computational biology; integer partition; NP-hard;
Subset sum",
}
@Article{Azriel:2008:IFS,
author = "Dany Azriel and Noam Solomon and Shay Solomon",
title = "On an infinite family of solvable {Hanoi} graphs",
journal = j-TALG,
volume = "5",
number = "1",
pages = "13:1--13:??",
month = nov,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1435375.1435388",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:04:20 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The Tower of Hanoi problem is generalized by placing
pegs on the vertices of a given directed graph {$G$}
with two distinguished vertices, {$s$} and {$D$}, and
allowing moves only along arcs of this graph. An
optimal solution for such a graph {$G$} is an algorithm
that completes the task of moving a tower of any given
number of disks from {$s$} to {$d$} in a minimal number
of disk moves.\par
In this article we present an algorithm which solves
the problem for two infinite families of graphs, and
prove its optimality. To the best of our knowledge,
this is the first optimality proof for an {\em
infinite\/} family of graphs.\par
Furthermore, we present a unified algorithm that solves
the problem for a wider family of graphs and conjecture
its optimality.",
acknowledgement = ack-nhfb,
articleno = "13",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Optimality proofs; Tower of Hanoi",
}
@Article{Elmasry:2008:MPQ,
author = "Amr Elmasry and Claus Jensen and Jyrki Katajainen",
title = "Multipartite priority queues",
journal = j-TALG,
volume = "5",
number = "1",
pages = "14:1--14:??",
month = nov,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1435375.1435389",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:04:20 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We introduce a framework for reducing the number of
element comparisons performed in priority-queue
operations. In particular, we give a priority queue
which guarantees the worst-case cost of {$ O(1) $} per
minimum finding and insertion, and the worst-case cost
of {$ O(\log n) $} with at most {$ \log n + O(1) $}
element comparisons per deletion, improving the bound
of {$ 2 \log n + O(1) $} known for binomial queues.
Here, {$n$} denotes the number of elements stored in
the data structure prior to the operation in question,
and {$ \log n $} equals {$ \log_2 (\max \{ 2, n \}) $}.
As an immediate application of the priority queue
developed, we obtain a sorting algorithm that is
optimally adaptive with respect to the inversion
measure of disorder, and that sorts a sequence having
$n$ elements and {$I$} inversions with at most {$ n
\log (I / n) + O(n) $} element comparisons.",
acknowledgement = ack-nhfb,
articleno = "14",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "constant factors; heaps; meticulous analysis; Priority
queues",
}
@Article{Eppstein:2009:TBG,
author = "David Eppstein",
title = "Testing bipartiteness of geometric intersection
graphs",
journal = j-TALG,
volume = "5",
number = "2",
pages = "15:1--15:??",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1497290.1497291",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:05:00 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We show how to test the bipartiteness of an
intersection graph of $n$ line segments or simple
polygons in the plane, or of an intersection graph of
balls in $d$-dimensional Euclidean space, in time {$
O(n \log n) $}. More generally, we find subquadratic
algorithms for connectivity and bipartiteness testing
of intersection graphs of a broad class of geometric
objects. Our algorithms for these problems return
either a bipartition of the input or an odd cycle in
its intersection graph. We also consider lower bounds
for connectivity and {$k$}-colorability problems of
geometric intersection graphs. For unit balls in {$d$}
dimensions, connectivity testing has equivalent
randomized complexity to construction of Euclidean
minimum spanning trees, and for line segments in the
plane connectivity testing has the same lower bounds as
Hopcroft's point-line incidence testing problem;
therefore, for these problems, connectivity is unlikely
to be solved as efficiently as bipartiteness. For line
segments or planar disks, testing {$k$}-colorability of
intersection graphs for $k$ > 2 is NP-complete.",
acknowledgement = ack-nhfb,
articleno = "15",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Bipartite graph; coin graph; disks; geometric
thickness; graph coloring; Hopcroft's problem;
intersection graph; line segments; minimum spanning
tree",
}
@Article{Chen:2009:OCF,
author = "Ke Chen and Haim Kaplan and Micha Sharir",
title = "Online conflict-free coloring for halfplanes,
congruent disks, and axis-parallel rectangles",
journal = j-TALG,
volume = "5",
number = "2",
pages = "16:1--16:??",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1497290.1497292",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:05:00 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present randomized algorithms for online
conflict-free coloring (CF in short) of points in the
plane, with respect to halfplanes, congruent disks, and
nearly-equal axis-parallel rectangles. In all three
cases, the coloring algorithms use {$ O(\log n) $}
colors, with high probability.\par
We also present a deterministic algorithm for online CF
coloring of points in the plane with respect to
nearly-equal axis-parallel rectangles, using {$
O(\log^3 n) $} colors. This is the first efficient
(i.e., using {$ \polylog (n) $} colors) deterministic
online CF coloring algorithm for this problem.",
acknowledgement = ack-nhfb,
articleno = "16",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "coloring; Conflict free coloring; online algorithms",
}
@Article{Alonso:2009:ACA,
author = "Laurent Alonso and Edward M. Reingold",
title = "Average-case analysis of some plurality algorithms",
journal = j-TALG,
volume = "5",
number = "2",
pages = "17:1--17:??",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1497290.1497293",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:05:00 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given a set of $n$ elements, each of which is colored
one of $c$ colors, we must determine an element of the
plurality (most frequently occurring) color by pairwise
equal/unequal color comparisons of elements. We focus
on the expected number of color comparisons when the $
c^n $ colorings are equally probable. We analyze an
obvious algorithm, showing that its expected
performance is {$ c^2 + c - 2 / 2 c n - O(c^2) $}, with
variance {$ \Theta (c^2 n) $}. We present and analyze
an algorithm for the case {$ c = 3 $} colors whose
average complexity on the {$ 3^n $} equally probable
inputs is {$ 7083 / 5425 n + O(\sqrt n) = 1.3056 \ldots
{} n + O(\sqrt n) $}, substantially better than the
expected complexity {$ 5 / 3 n + O(1) = 1.6666 \ldots
{} n + O(1) $} of the obvious algorithm. We describe a
similar algorithm for {$ c = 4 $} colors whose average
complexity on the {$ 4^n $} equally probable inputs is
{$ 761311 / 402850 n + O(\log n) = 1.8898 \ldots {} n +
O(\log n) $}, substantially better than the expected
complexity {$ 9 / 4 n + O(1) = 2.25 n + O(1) $} of the
obvious algorithm.",
acknowledgement = ack-nhfb,
articleno = "17",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Algorithm analysis; majority problem; plurality
problem",
}
@Article{Bar-Noy:2009:TMR,
author = "Amotz Bar-Noy and Sudipto Guha and Yoav Katz and
Joseph (Seffi) Naor and Baruch Schieber and Hadas
Shachnai",
title = "Throughput maximization of real-time scheduling with
batching",
journal = j-TALG,
volume = "5",
number = "2",
pages = "18:1--18:??",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1497290.1497294",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:05:00 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the following scheduling with batching
problem that has many applications, for example, in
multimedia-on-demand and manufacturing of integrated
circuits. The input to the problem consists of $n$ jobs
and $k$ parallel machines. Each job is associated with
a set of time intervals in which it can be scheduled
(given either explicitly or nonexplicitly), a weight,
and a family. Each family is associated with a
processing time. Jobs that belong to the same family
can be batched and executed together on the same
machine. The processing time of each batch is the
processing time of the family of jobs it contains. The
goal is to find a nonpreemptive schedule with batching
that maximizes the weight of the scheduled jobs. We
give constant factor ($4$ or $ 4 + \epsilon $ )
approximation algorithms for two variants of the
problem, depending on the precise representation of the
input. When the batch size is unbounded and each job is
associated with a time window in which it can be
processed, these approximation ratios reduce to $2$ and
$ 2 + \epsilon $, respectively. We also give
approximation algorithms for two special cases when all
release times are the same.",
acknowledgement = ack-nhfb,
articleno = "18",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "batching; local ratio technique; Scheduling",
}
@Article{Rabani:2009:BAT,
author = "Yuval Rabani and Gabriel Scalosub",
title = "Bicriteria approximation tradeoff for the node-cost
budget problem",
journal = j-TALG,
volume = "5",
number = "2",
pages = "19:1--19:??",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1497290.1497295",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:05:00 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider an optimization problem consisting of an
undirected graph, with cost and profit functions
defined on all vertices. The goal is to find a
connected subset of vertices with maximum total profit,
whose total cost does not exceed a given budget. The
best result known prior to this work guaranteed a $ (2,
O(\log n)) $ bicriteria approximation, that is, the
solution's profit is at least a fraction of $ 1 /
O(\log n) $ of an optimum solution respecting the
budget, while its cost is at most twice the given
budget. We improve these results and present a
bicriteria tradeoff that, given any $ \epsilon \in (0,
1] $, guarantees a $ (1 + \epsilon, O(1 / \epsilon \log
n)) $-approximation.",
acknowledgement = ack-nhfb,
articleno = "19",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithms; bicriteria approximation",
}
@Article{Li:2009:PTA,
author = "Guojun Li and Xiaotie Deng and Ying Xu",
title = "A polynomial-time approximation scheme for embedding
hypergraph in a cycle",
journal = j-TALG,
volume = "5",
number = "2",
pages = "20:1--20:??",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1497290.1497296",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:05:00 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the problem of embedding hyperedges of a
hypergraph as paths in a cycle such that the maximum
congestion, namely the maximum number of paths that use
any single edge in a cycle, is minimized.\par
The {\em minimum congestion hypergraph embedding in a
cycle\/} problem is known to be NP-hard and its graph
version, the {\em minimum congestion graph embedding in
a cycle}, is solvable in polynomial-time. Furthermore,
for the graph problem, a polynomial-time approximation
scheme for the weighted version is known. For the
hypergraph model, several approximation algorithms with
a ratio of two have been previously published. A recent
paper reduced the approximation ratio to 1.5. We
present a polynomial-time approximation scheme in this
article, settling the debate regarding whether the
problem is polynomial-time approximable.",
acknowledgement = ack-nhfb,
articleno = "20",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Hypergraph embedding; minimum congestion; NP-hard;
polynomial-time approximation scheme",
}
@Article{Even:2009:AAA,
author = "Guy Even and Jon Feldman and Guy Kortsarz and Zeev
Nutov",
title = "A 1.8 approximation algorithm for augmenting
edge-connectivity of a graph from 1 to 2",
journal = j-TALG,
volume = "5",
number = "2",
pages = "21:1--21:??",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1497290.1497297",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:05:00 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present a 1.8-approximation algorithm for the
following NP-hard problem: Given a connected graph {$ G
= (V, E) $} and an edge set {$E$} on {$V$} disjoint to
{$E$}, find a minimum-size subset of edges {$ F
\subseteq E $} such that {$ (V, E \cup f) $} is
2-edge-connected. Our result improves and significantly
simplifies the approximation algorithm with ratio {$
1.875 + \epsilon $} of Nagamochi.",
acknowledgement = ack-nhfb,
articleno = "21",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithms; connectivity; graphs",
}
@Article{Marko:2009:ADP,
author = "Sharon Marko and Dana Ron",
title = "Approximating the distance to properties in
bounded-degree and general sparse graphs",
journal = j-TALG,
volume = "5",
number = "2",
pages = "22:1--22:??",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1497290.1497298",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:05:00 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We address the problem of approximating the distance
of bounded-degree and general sparse graphs from having
some predetermined graph property $p$. That is, we are
interested in sublinear algorithms for estimating the
fraction of edge modifications (additions or deletions)
that must be performed on a graph so that it obtains
$p$. This fraction is taken with respect to a given
upper bound $m$ on the number of edges. In particular,
for graphs with degree bound $d$ over $n$ vertices, $ m
= d n $. To perform such an approximation the algorithm
may ask for the degree of any vertex of its choice, and
may ask for the neighbors of any vertex.\par
The problem of estimating the distance to having a
property was first explicitly addressed by Parnas et
al. [2006]. In the context of graphs this problem was
studied by Fischer and Newman [2007] in the dense
graphs model. In this model the fraction of edge
modifications is taken with respect to $ n^2 $, and the
algorithm may ask for the existence of an edge between
any pair of vertices of its choice. Fischer and Newman
showed that every graph property that has a testing
algorithm in this model, with query complexity
independent of the size of the graph, also has a
distance approximation algorithm with query complexity
that is independent of the size of graph.\par
In this work we focus on bounded-degree and general
sparse graphs, and give algorithms for all properties
shown to have efficient testing algorithms by Goldreich
and Ron [2002]. Specifically, these properties are
$k$-edge connectivity, subgraph freeness (for
constant-size subgraphs), being an Eulerian graph, and
cycle freeness. A variant of our subgraph-freeness
algorithm approximates the size of a minimum vertex
cover of a graph in sublinear time. This approximation
improves on a recent result of Parnas and Ron [2007].",
acknowledgement = ack-nhfb,
articleno = "22",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "distance approximation; graph properties; property
testing; Sublinear approximation algorithms",
}
@Article{Berry:2009:LTA,
author = "Vincent Berry and Christophe Paul and Sylvain
Guillemot and Fran{\c{c}}ois Nicolas",
title = "Linear time 3-approximation for the {MAST} problem",
journal = j-TALG,
volume = "5",
number = "2",
pages = "23:1--23:??",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1497290.1497299",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:05:00 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given a set of leaf-labeled trees with identical leaf
sets, the well-known Maximum Agreement SubTree (MAST)
problem consists in finding a subtree homeomorphically
included in all input trees and with the largest number
of leaves. MAST and its variant called Maximum
Compatible Tree (MCT) are of particular interest in
computational biology. This article presents a
linear-time approximation algorithm to solve the
complement version of MAST, namely identifying the
smallest set of leaves to remove from input trees to
obtain isomorphic trees. We also present an {$ O(n^2 +
k n) $} algorithm to solve the complement version of
MCT. For both problems, we thus achieve significantly
lower running times than previously known algorithms.
Fast running times are especially important in
phylogenetics where large collections of trees are
routinely produced by resampling procedures, such as
the nonparametric bootstrap or Bayesian MCMC methods.",
acknowledgement = ack-nhfb,
articleno = "23",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithm; maximum agreement subtree;
maximum compatible subtree; phylogenetic tree",
}
@Article{Condon:2009:ADA,
author = "Anne Condon and Amol Deshpande and Lisa Hellerstein
and Ning Wu",
title = "Algorithms for distributional and adversarial
pipelined filter ordering problems",
journal = j-TALG,
volume = "5",
number = "2",
pages = "24:1--24:??",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1497290.1497300",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Jul 14 19:05:00 MDT 2009",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Pipelined filter ordering is a central problem in
database query optimization. The problem is to
determine the optimal order in which to apply a given
set of commutative filters (predicates) to a set of
elements (the tuples of a relation), so as to find, as
efficiently as possible, the tuples that satisfy all of
the filters. Optimization of pipelined filter ordering
has recently received renewed attention in the context
of environments such as the Web, continuous high-speed
data streams, and sensor networks. Pipelined filter
ordering problems are also studied in areas such as
fault detection and machine learning under names such
as learning with attribute costs, minimum-sum set
cover, and satisfying search. We present algorithms for
two natural extensions of the classical pipelined
filter ordering problem: (1) a {\em
distributional-type\/} problem where the filters run in
parallel and the goal is to maximize throughput, and
(2) an {\em adversarial-type\/} problem where the goal
is to minimize the expected value of {\em
multiplicative regret}. We present two related
algorithms for solving (1), both running in time {$
O(n^2) $}, which improve on the {$ O(n 3 \log n) $}
algorithm of Kodialam. We use techniques from our
algorithms for (1) to obtain an algorithm for 1.",
acknowledgement = ack-nhfb,
articleno = "24",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "flow algorithms; Pipelined filter ordering; query
optimization; selection ordering",
}
@Article{Gabow:2009:FSI,
author = "Harold Gabow",
title = "Foreword to special issue on {SODA 2007}",
journal = j-TALG,
volume = "5",
number = "3",
pages = "25:1--25:??",
month = jul,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1541885.1541886",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:27 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "25",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Ruzic:2009:MDS,
author = "Milan Ru{\v{z}}i{\'c}",
title = "Making deterministic signatures quickly",
journal = j-TALG,
volume = "5",
number = "3",
pages = "26:1--26:??",
month = jul,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1541885.1541887",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:27 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present a new technique of universe reduction.
Primary applications are the dictionary problem and the
predecessor problem. We give several new results on
static dictionaries in different computational models:
the word RAM, the practical RAM, and the
cache-oblivious model. All algorithms and data
structures are deterministic and use linear space.
Representative results are: a dictionary with a lookup
time of {$ O(\log \log n) $} and construction time of
{$ O(n) $} on sorted input on a word RAM, and a static
predecessor structure for variable- and unbounded
length binary strings that in the cache-oblivious model
has a query performance of {$ O(| s | / B + \log | s |)
$} I/Os, for query argument {$s$}.",
acknowledgement = ack-nhfb,
articleno = "26",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Carr:2009:CCN,
author = "Robert D. Carr and Goran Konjevod and Greg Little and
Venkatesh Natarajan and Ojas Parekh",
title = "Compacting cuts: a new linear formulation for minimum
cut",
journal = j-TALG,
volume = "5",
number = "3",
pages = "27:1--27:??",
month = jul,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1541885.1541888",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:27 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "For a graph (V, E), existing compact linear
formulations for the minimum cut problem require {$
\Theta (|V| |E|) $} variables and constraints and can
be interpreted as a composition of {$ |V| - 1 $}
polyhedra for minimum {$s$}--{$t$} cuts in much the
same way as early approaches to finding globally
minimum cuts relied on {$ |V| - 1 $} calls to a minimum
{$s$}--{$t$} cut algorithm. We present the first
formulation to beat this bound, one that uses {$
O(|V|^2) $} variables and {$ O(|V|^3) $} constraints.
An immediate consequence of our result is a compact
linear relaxation with {$ O(|V|^2) $} constraints and
{$ O(|V|^3) $} variables for enforcing global
connectivity constraints. This relaxation is as strong
as standard cut-based relaxations and has applications
in solving traveling salesman problems by integer
programming as well as finding approximate solutions
for survivable network design problems using Jain's
iterative rounding method. Another application is a
polynomial-time verifiable certificate of size {$n$}
for for the NP-complete problem of {$ l_1
$}-embeddability of a rational metric on an {$n$}-set
(as opposed to a certificate of size $ n^2 $ known
previously).",
acknowledgement = ack-nhfb,
articleno = "27",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Giora:2009:ODV,
author = "Yoav Giora and Haim Kaplan",
title = "{Optimal} dynamic vertical ray shooting in rectilinear
planar subdivisions",
journal = j-TALG,
volume = "5",
number = "3",
pages = "28:1--28:??",
month = jul,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1541885.1541889",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:27 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the dynamic vertical ray shooting problem
against horizontal disjoint segments, that is, the task
of maintaining a dynamic set {$S$} of {$n$}
nonintersecting horizontal line segments in the plane
under a query that reports the first segment in {$S$}
intersecting a vertical ray from a query point. We
develop a linear-size structure that supports queries,
insertions, and deletion in {$ O(\log n) $} worst-case
time. Our structure works in the comparison model on a
random access machine.",
acknowledgement = ack-nhfb,
articleno = "28",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Eppstein:2009:STS,
author = "David Eppstein",
title = "Squarepants in a tree: {Sum} of subtree clustering and
hyperbolic pants decomposition",
journal = j-TALG,
volume = "5",
number = "3",
pages = "29:1--29:??",
month = jul,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1541885.1541890",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:27 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We provide efficient constant-factor approximation
algorithms for the problems of finding a hierarchical
clustering of a point set in any metric space,
minimizing the sum of minimimum spanning tree lengths
within each cluster, and in the hyperbolic or Euclidean
planes, minimizing the sum of cluster perimeters. Our
algorithms for the hyperbolic and Euclidean planes can
also be used to provide a pants decomposition, that is,
a set of disjoint simple closed curves partitioning the
plane minus the input points into subsets with exactly
three boundary components, with approximately minimum
total length. In the Euclidean case, these curves are
squares; in the hyperbolic case, they combine our
Euclidean square pants decomposition with our tree
clustering method for general metric spaces.",
acknowledgement = ack-nhfb,
articleno = "29",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Demaine:2009:MM,
author = "Erik D. Demaine and Mohammadtaghi Hajiaghayi and Hamid
Mahini and Amin S. Sayedi-Roshkhar and Shayan
Oveisgharan and Morteza Zadimoghaddam",
title = "Minimizing movement",
journal = j-TALG,
volume = "5",
number = "3",
pages = "30:1--30:??",
month = jul,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1541885.1541891",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:27 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We give approximation algorithms and inapproximability
results for a class of movement problems. In general,
these problems involve planning the coordinated motion
of a large collection of objects (representing anything
from a robot swarm or firefighter team to map labels or
network messages) to achieve a global property of the
network while minimizing the maximum or average
movement. In particular, we consider the goals of
achieving connectivity (undirected and directed),
achieving connectivity between a given pair of
vertices, achieving independence (a dispersion
problem), and achieving a perfect matching (with
applications to multicasting). This general family of
movement problems encompasses an intriguing range of
graph and geometric algorithms, with several real-world
applications and a surprising range of approximability.
In some cases, we obtain tight approximation and
inapproximability results using direct techniques
(without use of PCP), assuming just that P $ \neq $
NP.",
acknowledgement = ack-nhfb,
articleno = "30",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Borradaile:2009:ASS,
author = "Glencora Borradaile and Philip Klein and Claire
Mathieu",
title = "An {$ {O}(n \log n) $} approximation scheme for
{Steiner} tree in planar graphs",
journal = j-TALG,
volume = "5",
number = "3",
pages = "31:1--31:??",
month = jul,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1541885.1541892",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Tue Mar 16 09:37:25 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "31",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Borradaile:2009:LAS,
author = "Glencora Borradaile and Philip Klein and Claire
Mathieu",
title = "An {$ O(n \log n) $} approximation scheme for
{Steiner} tree in planar graphs",
journal = j-TALG,
volume = "5",
number = "3",
pages = "31:1--31:??",
month = jul,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1541885.1541892",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:27 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We give a Polynomial-Time Approximation Scheme (PTAS)
for the Steiner tree problem in planar graphs. The
running time is {$ O(n \log n) $}.",
acknowledgement = ack-nhfb,
articleno = "31",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Charikar:2009:NOA,
author = "Moses Charikar and Konstantin Makarychev and Yury
Makarychev",
title = "Near-optimal algorithms for maximum constraint
satisfaction problems",
journal = j-TALG,
volume = "5",
number = "3",
pages = "32:1--32:??",
month = jul,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1541885.1541893",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:27 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article, we present two approximation
algorithms for the maximum constraint satisfaction
problem with $k$ variables in each constraint (MAX
$k$-CSP). Given a $ (1 - \epsilon) $ satisfiable 2CSP
our first algorithm finds an assignment of variables
satisfying a {$ 1 - O(\sqrt \epsilon) $} fraction of
all constraints. The best previously known result, due
to Zwick, was {$ 1 - O(\epsilon^{1 / 3}) $}. The second
algorithm finds a {$ c k / 2^k $} approximation for the
MAX {$k$}-CSP problem (where {$ c > 0.44 $} is an
absolute constant). This result improves the previously
best known algorithm by Hast, which had an
approximation guarantee of {$ \Omega (k / (2^k \log k))
$}. Both results are optimal assuming the unique games
conjecture and are based on rounding natural
semidefinite programming relaxations. We also believe
that our algorithms and their analysis are simpler than
those previously known.",
acknowledgement = ack-nhfb,
articleno = "32",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Andrews:2009:IFP,
author = "Matthew Andrews",
title = "Instability of {FIFO} in the permanent sessions model
at arbitrarily small network loads",
journal = j-TALG,
volume = "5",
number = "3",
pages = "33:1--33:??",
month = jul,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1541885.1541894",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:27 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We show that for any $ r > 0 $, there is a network of
First-In-First-Out servers and a fixed set of sessions
such that:\par --- The network load is $r$ with respect
to the permanent sessions model with bounded
arrivals.\par --- The network can be made unstable.",
acknowledgement = ack-nhfb,
articleno = "33",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Golubchik:2009:AAD,
author = "Leana Golubchik and Sanjeev Khanna and Samir Khuller
and Ramakrishna Thurimella and An Zhu",
title = "Approximation algorithms for data placement on
parallel disks",
journal = j-TALG,
volume = "5",
number = "4",
pages = "34:1--34:??",
month = oct,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1597036.1597037",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:29 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study an optimization problem that arises in the
context of data placement in a multimedia storage
system. We are given a collection of {$M$} multimedia
objects (data objects) that need to be assigned to a
storage system consisting of {$N$} disks {$ d_1 $}, {$
d_2 $}, \ldots {}, {$ d_N $}. We are also given sets {$
U_1 $}, {$ U_2 $}, \ldots {}, {$ U_M $} such that {$
U_i $} is the set of clients seeking the {$i$} th data
object. Each disk {$ d_j $} is characterized by two
parameters, namely, its storage capacity {$ C_j $}
which indicates the maximum number of data objects that
may be assigned to it, and a load capacity {$ L_j $}
which indicates the maximum number of clients that it
can serve. The goal is to find a placement of data
objects to disks and an assignment of clients to disks
so as to maximize the total number of clients served,
subject to the capacity constraints of the storage
system. We study this data placement problem for two
natural classes of storage systems, namely, homogeneous
and uniform ratio. We show that an algorithm developed
by Shachnai and Tamir [2000a] for data placement
achieves the best possible absolute bound regarding the
number of clients that can always be satisfied. We also
show how to implement the algorithm so that it has a
running time of {$ O((N + M) \log (N + M)) $}. In
addition, we design a polynomial-time approximation
scheme, solving an open problem posed in the same
paper.",
acknowledgement = ack-nhfb,
articleno = "34",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Guha:2009:SEE,
author = "Sudipto Guha and Andrew McGregor and Suresh
Venkatasubramanian",
title = "Sublinear estimation of entropy and information
distances",
journal = j-TALG,
volume = "5",
number = "4",
pages = "35:1--35:??",
month = oct,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1597036.1597038",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:29 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In many data mining and machine learning problems, the
data items that need to be clustered or classified are
not arbitrary points in a high-dimensional space, but
are distributions, that is, points on a
high-dimensional simplex. For distributions, natural
measures are not l$_p$ distances, but
information-theoretic measures such as the
Kullback--Leibler and Hellinger divergences. Similarly,
quantities such as the entropy of a distribution are
more natural than frequency moments. Efficient
estimation of these quantities is a key component in
algorithms for manipulating distributions. Since the
datasets involved are typically massive, these
algorithms need to have only sublinear complexity in
order to be feasible in practice. We present a range of
sublinear-time algorithms in various oracle models in
which the algorithm accesses the data via an oracle
that supports various queries. In particular, we answer
a question posed by Batu et al. on testing whether two
distributions are close in an information-theoretic
sense given independent samples. We then present
optimal algorithms for estimating various
information-divergences and entropy with a more
powerful oracle called the combined oracle that was
also considered by Batu et al. Finally, we consider
sublinear-space algorithms for these quantities in the
data-stream model. In the course of doing so, we
explore the relationship between the aforementioned
oracle models and the data-stream model. This continues
work initiated by Feigenbaum et al. An important
additional component to the study is considering data
streams that are ordered randomly rather than just
those which are ordered adversarially.",
acknowledgement = ack-nhfb,
articleno = "35",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Levin:2009:GMC,
author = "Asaf Levin",
title = "A generalized minimum cost $k$-clustering",
journal = j-TALG,
volume = "5",
number = "4",
pages = "36:1--36:??",
month = oct,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1597036.1597039",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:29 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the problems of set partitioning into $k$
clusters with minimum total cost and minimum of the
maximum cost of a cluster. The cost function is given
by an oracle, and we assume that it satisfies some
natural structural constraints. That is, we assume that
the cost function is monotone, the cost of a singleton
is zero, and we assume that for all {$ S \cap S' \neq
\oslash $} the following holds {$ c(S) + c(S') \geq c(S
\cup S') $}. For the problem of minimizing the maximum
cost of a cluster we present a {$ (2 k - 1)
$}-approximation algorithm for {$ k \geq 3 $}, a
2-approximation algorithm for {$ k = 2 $}, and we also
show a lower bound of $k$ on the performance guarantee
of any polynomial-time algorithm. For the problem of
minimizing the total cost of all the clusters, we
present a 2-approximation algorithm for the case where
$k$ is a fixed constant, a $ (4 k - 3) $-approximation
where $k$ is unbounded, and we show a lower bound of
$2$ on the approximation ratio of any polynomial-time
algorithm. Our lower bounds do not depend on the common
assumption that P $ \neq $ NP.",
acknowledgement = ack-nhfb,
articleno = "36",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Farach-Colton:2009:BHO,
author = "Martin Farach-Colton and Rohan J. Fernandes and Miguel
A. Mosteiro",
title = "Bootstrapping a hop-optimal network in the weak sensor
model",
journal = j-TALG,
volume = "5",
number = "4",
pages = "37:1--37:??",
month = oct,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1597036.1597040",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:29 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Sensor nodes are very weak computers that get
distributed at random on a surface. Once deployed, they
must wake up and form a radio network. Sensor network
bootstrapping research thus has three parts: One must
model the restrictions on sensor nodes; one must prove
that the connectivity graph of the sensors has a
subgraph that would make a good network; and one must
give a distributed protocol for finding such a network
subgraph that can be implemented on sensor nodes.
Although many particular restrictions on sensor nodes
are implicit or explicit in many papers, there remain
many inconsistencies and ambiguities from paper to
paper. The lack of a clear model means that solutions
to the network bootstrapping problem in both the theory
and systems literature all violate constraints on
sensor nodes. For example, random geometric graph
results on sensor networks predict the existence of
subgraphs on the connectivity graph with good
route-stretch, but these results do not address the
degree of such a graph, and sensor networks must have
constant degree. Furthermore, proposed protocols for
actually finding such graphs require that nodes have
too much memory, whereas others assume the existence of
a contention-resolution mechanism. We present a formal
Weak Sensor model that summarizes the literature on
sensor node restrictions, taking the most restrictive
choices when possible. We show that sensor connectivity
graphs have low-degree subgraphs with good hop-stretch,
as required by the Weak Sensor model. Finally, we give
a Weak Sensor model-compatible protocol for finding
such graphs. Ours is the first network initialization
algorithm that is implementable on sensor nodes.",
acknowledgement = ack-nhfb,
articleno = "37",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Eppstein:2009:AMI,
author = "David Eppstein",
title = "All maximal independent sets and dynamic dominance for
sparse graphs",
journal = j-TALG,
volume = "5",
number = "4",
pages = "38:1--38:??",
month = oct,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1597036.1597042",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:29 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We describe algorithms, based on Avis and Fukuda's
reverse search paradigm, for listing all maximal
independent sets in a sparse graph in polynomial time
and delay per output. For bounded degree graphs, our
algorithms take constant time per set generated; for
minor-closed graph families, the time is {$ O(n) $} per
set, and for more general sparse graph families we
achieve subquadratic time per set. We also describe new
data structures for maintaining a dynamic vertex set
{$S$} in a sparse or minor-closed graph family, and
querying the number of vertices not dominated by {$S$};
for minor-closed graph families the time per update is
constant, while it is sublinear for any sparse graph
family. We can also maintain a dynamic vertex set in an
arbitrary {$m$}-edge graph and test the independence of
the maintained set in time {$ O(\sqrt m) $} per update.
We use the domination data structures as part of our
enumeration algorithms.",
acknowledgement = ack-nhfb,
articleno = "38",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Reed:2009:LTA,
author = "Bruce Reed and David R. Wood",
title = "A linear-time algorithm to find a separator in a graph
excluding a minor",
journal = j-TALG,
volume = "5",
number = "4",
pages = "39:1--39:??",
month = oct,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1597036.1597043",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:29 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Let {$G$} be an {$n$}-vertex {$m$}-edge graph with
weighted vertices. A pair of vertex sets {$ A, B
\subseteq V(G) $} is a {$ 2 / 3 $}-separation of order
{$ |A \cap B| $} if {$ A \cup B = V(G) $}, there is no
edge between {$A$}--{$B$} and {$B$}--{$A$}, and both
{$A$}--{$B$} and {$B$}--{$A$} have weight at most {$ 2
/ 3 $} the total weight of {$G$}. Let {$ l \in Z^+ $}
be fixed. Alon et al. [1990] presented an algorithm
that in {$ O(n^{1 / 2m}) $} time, outputs either a {$
K_l $}-minor of {$G$}, or a separation of {$G$} of
order {$ O(n^{1 / 2}) $}. Whether there is a {$ O(n +
m) $}-time algorithm for this theorem was left as an
open problem. In this article, we obtain a {$ O(n + m)
$}-time algorithm at the expense of a {$ O(n^{2 / 3})
$} separator. Moreover, our algorithm exhibits a
trade-off between time complexity and the order of the
separator. In particular, for any given {$ \epsilon \in
[0, 1 / 2] $}, our algorithm outputs either a {$ K_l
$}-minor of {$G$}, or a separation of {$G$} with order
{$ O(n^{(2 - \epsilon) / 3}) $} in {$ O(n^{1 + \epsilon
} + m) $} time. As an application we give a fast
approximation algorithm for finding an independent set
in a graph with no {$ K_l $}-minor.",
acknowledgement = ack-nhfb,
articleno = "39",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Ito:2009:EIC,
author = "Hiro Ito and Kazuo Iwama",
title = "Enumeration of isolated cliques and pseudo-cliques",
journal = j-TALG,
volume = "5",
number = "4",
pages = "40:1--40:??",
month = oct,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1597036.1597044",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:29 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article, we consider isolated cliques and
isolated dense subgraphs. For a given graph {$G$}, a
vertex subset {$S$} of size {$k$} (and also its induced
subgraph {$ G(S) $}) is said to be {$c$}-isolated if
{$G$} (S) is connected to its outside via less than {$
c k $} edges. The number {$c$} is sometimes called the
isolation factor. The subgraph appears more isolated if
the isolation factor is smaller. The main result in
this work shows that for a fixed constant {$c$}, we can
enumerate all $c$-isolated maximal cliques (including a
maximum one, if any) in linear time. In more detail, we
show that, for a given graph {$G$} of {$n$} vertices
and {$m$} edges, and a positive real number {$c$}, all
$c$-isolated maximal cliques can be enumerated in time
{$ O(c^4 2^{2c} m) $}. From this, we can see that: (1)
if {$c$} is a constant, all {$c$}-isolated maximal
cliques can be enumerated in linear time, and (2) if {$
c = O(\log n) $}, all {$c$}-isolated maximal cliques
can be enumerated in polynomial time. Moreover, we show
that these bounds are tight. That is, if {$ f(n) $} is
an increasing function not bounded by any constant,
then there is a graph of {$n$} vertices and $m$ edges
for which the number of $ f(n) $-isolated maximal
cliques is superlinear in $ n + m $. Furthermore, if $
f(n) = \omega (\log n) $, there is a graph of $n$
vertices and $m$ edges for which the number of $ f(n)
$-isolated maximal cliques is superpolynomial in $ n +
m $. We next introduce the idea of pseudo-cliques. A
pseudo-clique having an average degree $ \alpha $ and a
minimum degree $ \beta $, denoted by {$ {\rm
PC}(\alpha, \beta) $}, is a set {$ V' \subseteq V $}
such that the subgraph induced by {$ V' $} has an
average degree of at least {$ \alpha $} and a minimum
degree of at least {$ \beta $}. This article
investigates these, and obtains some cases that can be
solved in polynomial time and some other cases that
have a superpolynomial number of solutions. Especially,
we show the following results, where {$k$} is the
number of vertices of the isolated pseudo-cliques: (1)
For any $ \epsilon > 0 $ there is a graph of $n$
vertices for which the number of $1$-isolated {$ {\rm
PC}(k - (\log k)^{1 + \epsilon }, k / (\log k)^{1 +
\epsilon }) $} is superpolynomial, and (2) there is a
polynomial-time algorithm which enumerates all
{$c$}-isolated {$ {\rm PC}(k - \log k, k / \log k) $},
for any constant {$c$}.",
acknowledgement = ack-nhfb,
articleno = "40",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Karakostas:2009:BAR,
author = "George Karakostas",
title = "A better approximation ratio for the vertex cover
problem",
journal = j-TALG,
volume = "5",
number = "4",
pages = "41:1--41:??",
month = oct,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1597036.1597045",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:29 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We reduce the approximation factor for the vertex
cover to {$ 2 - \Theta (1 / \sqrt {\log n}) $} (instead
of the previous {$ 2 - \Theta (\ln \ln n / 2 \ln n) $}
obtained by Bar-Yehuda and Even [1985] and Monien and
Speckenmeyer [1985]). The improvement of the vanishing
factor comes as an application of the recent results of
Arora et al. [2004] that improved the approximation
factor of the sparsest cut and balanced cut problems.
In particular, we use the existence of two big and
well-separated sets of nodes in the solution of the
semidefinite relaxation for balanced cut, proven by
Arora et al. [2004]. We observe that a solution of the
semidefinite relaxation for vertex cover, when
strengthened with the triangle inequalities, can be
transformed into a solution of a balanced cut problem,
and therefore the existence of big well-separated sets
in the sense of Arora et al. [2004] translates into the
existence of a big independent set.",
acknowledgement = ack-nhfb,
articleno = "41",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Berend:2009:LAC,
author = "Daniel Berend and Vladimir Braverman",
title = "A linear algorithm for computing convex hulls for
random lines",
journal = j-TALG,
volume = "5",
number = "4",
pages = "42:1--42:??",
month = oct,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1597036.1597046",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:29 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Finding the convex hull of $n$ points in the plane
requires {$ O(n \log n) $} time in general. In Devroye
and Toussaint [1993] and Golin et al. [2002] the
problem of computing the convex hull of the
intersection points of {$n$} lines was considered,
where the lines are chosen randomly according to two
various models. In both models, linear-time algorithms
were developed. Here we improve the results of Devroye
and Toussaint [1993] by giving a universal algorithm
for a wider range of distributions.",
acknowledgement = ack-nhfb,
articleno = "42",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Kao:2009:RFD,
author = "Ming-Yang Kao and Manan Sanghi and Robert Schweller",
title = "Randomized fast design of short {DNA} words",
journal = j-TALG,
volume = "5",
number = "4",
pages = "43:1--43:??",
month = oct,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1597036.1597047",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:29 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the problem of efficiently designing sets
(codes) of equal-length DNA strings (words) that
satisfy certain combinatorial constraints. This problem
has numerous motivations including DNA self-assembly
and DNA computing. Previous work has extended results
from coding theory to obtain bounds on code size for
new biologically motivated constraints and has applied
heuristic local search and genetic algorithm techniques
for code design. This article proposes a natural
optimization formulation of the DNA code design problem
in which the goal is to design $n$ strings that satisfy
a given set of constraints while minimizing the length
of the strings. For multiple sets of constraints, we
provide simple randomized algorithms that run in time
polynomial in $n$ and any given constraint parameters,
and output strings of length within a constant factor
of the optimal with high probability. To the best of
our knowledge, this work is the first to consider this
type of optimization problem in the context of DNA code
design.",
acknowledgement = ack-nhfb,
articleno = "43",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Fernandez-Baca:2009:PAU,
author = "David Fern{\'a}ndez-Baca and Balaji Venkatachalam",
title = "Parametric analysis for ungapped {Markov} models of
evolution",
journal = j-TALG,
volume = "5",
number = "4",
pages = "44:1--44:??",
month = oct,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1597036.1597048",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:29 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Efficient sensitivity analysis algorithms are
presented for two problems arising in the study of
Markov models of sequence evolution: ancestral
reconstruction in evolutionary trees and local ungapped
alignment under log-odds scoring. The algorithms
generate complete descriptions of the optimum solutions
for all possible values of the evolutionary distance.
The running time for the parametric ancestral
reconstruction problem under the Kimura 2-parameter
model is {$ O(k n + k n^{2 / 3} \log k) $}, where {$n$}
is the number of sequences and {$k$} is their length,
assuming all edges have the same length. For the
parametric gapless alignment problem under the
Jukes-Cantor model, the running time is {$ O(m n + m
n^{2 / 3} \log m) $}, where {$m$} and {$n$} are the
sequence lengths and {$ n \leq m $}.",
acknowledgement = ack-nhfb,
articleno = "44",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Scott:2009:PCS,
author = "Alexander D. Scott and Gregory B. Sorkin",
title = "{Polynomial} constraint satisfaction problems, graph
bisection, and the {Ising} partition function",
journal = j-TALG,
volume = "5",
number = "4",
pages = "45:1--45:??",
month = oct,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1597036.1597049",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:29 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We introduce a problem class we call Polynomial
Constraint Satisfaction Problems, or PCSP. Where the
usual CSPs from computer science and optimization have
real-valued score functions, and partition functions
from physics have monomials, PCSP has scores that are
arbitrary multivariate formal polynomials, or indeed
take values in an arbitrary ring. Although PCSP is much
more general than CSP, remarkably, all (exact,
exponential-time) algorithms we know of for 2-CSP
(where each score depends on at most 2 variables)
extend to 2-PCSP, at the expense of just a polynomial
factor in running time. Specifically, we extend the
reduction-based algorithm of Scott and Sorkin [2007];
the specialization of that approach to sparse random
instances, where the algorithm runs in polynomial
expected time; dynamic-programming algorithms based on
tree decompositions; and the split-and-list
matrix-multiplication algorithm of Williams [2004].
This gives the first polynomial-space exact algorithm
more efficient than exhaustive enumeration for the
well-studied problems of finding a maximum bisection of
a graph, and calculating the partition function of an
Ising model. It also yields the most efficient
algorithm known for certain instances of counting
and/or weighted Maximum Independent Set. Furthermore,
PCSP solves both optimization and counting versions of
a wide range of problems, including all CSPs, and thus
enables samplers including uniform sampling of optimal
solutions and Gibbs sampling of all solutions.",
acknowledgement = ack-nhfb,
articleno = "45",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Nguyen:2009:LDL,
author = "Phong Q. Nguyen and Damien Stehl{\'e}",
title = "Low-dimensional lattice basis reduction revisited",
journal = j-TALG,
volume = "5",
number = "4",
pages = "46:1--46:??",
month = oct,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1597036.1597050",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:29 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Lattice reduction is a geometric generalization of the
problem of computing greatest common divisors. Most of
the interesting algorithmic problems related to lattice
reduction are NP-hard as the lattice dimension
increases. This article deals with the low-dimensional
case. We study a greedy lattice basis reduction
algorithm for the Euclidean norm, which is arguably the
most natural lattice basis reduction algorithm because
it is a straightforward generalization of an old
two-dimensional algorithm of Lagrange, usually known as
Gauss' algorithm, and which is very similar to Euclid's
gcd algorithm. Our results are twofold. From a
mathematical point of view, we show that up to
dimension four, the output of the greedy algorithm is
optimal: The output basis reaches all the successive
minima of the lattice. However, as soon as the lattice
dimension is strictly higher than four, the output
basis may be arbitrarily bad as it may not even reach
the first minimum. More importantly, from a
computational point of view, we show that up to
dimension four, the bit-complexity of the greedy
algorithm is quadratic without fast integer arithmetic,
just like Euclid's gcd algorithm. This was already
proved by Semaev up to dimension three using rather
technical means, but it was previously unknown whether
or not the algorithm was still polynomial in dimension
four. We propose two different analyzes: a global
approach based on the geometry of the current basis
when the length decrease stalls, and a local approach
showing directly that a significant length decrease
must occur every {$ O(1) $} consecutive steps. Our
analyzes simplify Semaev's analysis in dimensions two
and three, and unify the cases of dimensions two to
four. Although the global approach is much simpler, we
also present the local approach because it gives
further information on the behavior of the algorithm.",
acknowledgement = ack-nhfb,
articleno = "46",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Finocchi:2009:RD,
author = "Irene Finocchi and Fabrizio Grandoni and Giuseppe F.
Italiano",
title = "Resilient dictionaries",
journal = j-TALG,
volume = "6",
number = "1",
pages = "1:1--1:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644016",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We address the problem of designing data structures in
the presence of faults that may arbitrarily corrupt
memory locations. More precisely, we assume that an
adaptive adversary can arbitrarily overwrite the
content of up to $ \delta $ memory locations, that
corrupted locations cannot be detected, and that only
{$ O(1) $} memory locations are safe. In this
framework, we call a data structure resilient if it is
able to operate correctly (at least) on the set of
uncorrupted values. We present a resilient dictionary,
implementing search, insert, and delete operations. Our
dictionary has {$ O(\log n + \delta) $} expected
amortized time per operation, and {$ O(n) $} space
complexity, where {$n$} denotes the current number of
keys in the dictionary. We also describe a
deterministic resilient dictionary, with the same
amortized cost per operation over a sequence of at
least {$ \delta^\epsilon $} operations, where {$
\epsilon > 0 $} is an arbitrary constant. Finally, we
show that any resilient comparison-based dictionary
must take {$ \Omega (\log n + \delta) $} expected time
per search. Our results are achieved by means of
simple, new techniques which might be of independent
interest for the design of other resilient
algorithms.",
acknowledgement = ack-nhfb,
articleno = "1",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Demaine:2009:ODA,
author = "Erik D. Demaine and Shay Mozes and Benjamin Rossman
and Oren Weimann",
title = "An optimal decomposition algorithm for tree edit
distance",
journal = j-TALG,
volume = "6",
number = "1",
pages = "2:1--2:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644017",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The edit distance between two ordered rooted trees
with vertex labels is the minimum cost of transforming
one tree into the other by a sequence of elementary
operations consisting of deleting and relabeling
existing nodes, as well as inserting new nodes. In this
article, we present a worst-case {$ O(n^3) $}-time
algorithm for the problem when the two trees have size
{$n$}, improving the previous best {$ O(n^3 \log n)
$}-time algorithm. Our result requires a novel adaptive
strategy for deciding how a dynamic program divides
into subproblems, together with a deeper understanding
of the previous algorithms for the problem. We prove
the optimality of our algorithm among the family of
decomposition strategy algorithms-which also includes
the previous fastest algorithms-by tightening the known
lower bound of {$ \Omega (n^2 \log^2 n) $} to {$ \Omega
(n^3) $}, matching our algorithm's running time.
Furthermore, we obtain matching upper and lower bounds
for decomposition strategy algorithms of {$ \Theta (n
m^2 (1 + \log n / m)) $} when the two trees have sizes
{$m$} and {$n$} and {$ m < n $}.",
acknowledgement = ack-nhfb,
articleno = "2",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bille:2009:IAS,
author = "Philip Bille and Rolf Fagerberg and Inge Li G{\o}rtz",
title = "Improved approximate string matching and regular
expression matching on {Ziv--Lempel} compressed texts",
journal = j-TALG,
volume = "6",
number = "1",
pages = "3:1--3:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644018",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study the approximate string matching and regular
expression matching problem for the case when the text
to be searched is compressed with the Ziv--Lempel
adaptive dictionary compression schemes. We present a
time-space trade-off that leads to algorithms improving
the previously known complexities for both problems. In
particular, we significantly improve the space bounds,
which in practical applications are likely to be a
bottleneck.",
acknowledgement = ack-nhfb,
articleno = "3",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Duch:2009:URK,
author = "Amalia Duch and Conrado Mart{\'\i}nez",
title = "Updating relaxed {$ {K} $}-d trees",
journal = j-TALG,
volume = "6",
number = "1",
pages = "4:1--4:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644019",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this work we present an in-depth study of
randomized relaxed $k$--$d$ trees. It covers two
fundamental aspects: the randomized algorithms that
allow to preserve the random properties of relaxed
$k$--$d$ trees and the mathematical analysis of the
expected performance of these algorithms. In
particular, we describe randomized update algorithms
for $k$--$d$ trees based on the split and join
algorithms of Duch et al. [1998]. We carry out an
analysis of the expected cost of all these algorithms,
using analytic combinatorics techniques. We show that
the average cost of split and join is of the form {$
\zeta (K) \cdot n^{\phi (K)} + o(n^{\phi (K)}) $}, with
{$ 1 \leq \phi (K) < 1.561552813 $}, and we give
explicit formul{\ae} for both {$ \zeta (K) $} and {$
\phi (K) $}. These results on the average performance
of split and join imply that the expected cost of an
insertion or a deletion is {$ \Theta (n^{\phi (K) - 1})
$} when {$ K > 2 $} and {$ \Theta (\log n) $} for {$ K
= 2 $}.",
acknowledgement = ack-nhfb,
articleno = "4",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Nutov:2009:ACA,
author = "Zeev Nutov",
title = "Approximating connectivity augmentation problems",
journal = j-TALG,
volume = "6",
number = "1",
pages = "5:1--5:19",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644020",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Let {$ G = (V, E) $} be an undirected graph and let {$
S \subseteq V $}. The {$S$}-connectivity {$
\lambda^S_G(u, v) $} of a node pair {$ (u, v) $} in
{$G$} is the maximum number of {$ u v $}-paths that no
two of them have an edge or a node in {$ S - \{ u, v \}
$} in common. The corresponding Connectivity
Augmentation (CA) problem is: given a graph {$ G = (V,
E) $}, a node subset {$ S \subseteq V $}, and a
nonnegative integer requirement function {$ r(u, v) $}
on {$ V \times V $}, add a minimum size set F of new
edges to {$G$} so that {$ \lambda^S_{G + F}(u, v) \geq
r(u, v) $} for all {$ (u, v) \in V \times V $}. Three
extensively studied particular cases are: the Edge-CA
({$ S = \oslash $}), the Node-CA ({$ S = V $}), and the
Element-CA {$ r(u, v) = 0 $} whenever {$ u \in S $} or
{$ v \in S $}. A polynomial-time algorithm for Edge-CA
was developed by Frank. In this article we consider the
Element-CA and the Node-CA, that are NP-hard even for
{$ r(u, v) \in \{ 0, 2 \} $}. The best known ratios for
these problems were: 2 for Element-CA and {$ O(r_{\rm
max} \cdot \ln n) $} for Node-CA, where {$ r_{\rm max}
= \max_{u,_v} \in V r(u, v) $} and {$ n = |V| $}. Our
main result is a 7/4-approximation algorithm for the
Element-CA, improving the previously best known
2-approximation. For Element-CA with {$ r(u, v) \in \{
0, 1, 2 \} $} we give a {$ 3 / 2 $}-approximation
algorithm. These approximation ratios are based on a
new splitting-off theorem, which implies an improved
lower bound on the number of edges needed to cover a
skew-supermodular set function. For Node-CA we
establish the following approximation threshold:
Node-CA with {$ r(u, v) \in \{ 0, k \} $} cannot be
approximated within {$ O(2^{\log^{1 - \epsilon } n}) $}
for any fixed {$ \epsilon > 0 $}, unless NP {$
\subseteq $} DTIME({$ n^{\polylog (n)} $} ).",
acknowledgement = ack-nhfb,
articleno = "5",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Demetrescu:2009:TSP,
author = "Camil Demetrescu and Irene Finocchi and Andrea
Ribichini",
title = "Trading off space for passes in graph streaming
problems",
journal = j-TALG,
volume = "6",
number = "1",
pages = "6:1--6:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644021",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Data stream processing has recently received
increasing attention as a computational paradigm for
dealing with massive data sets. Surprisingly, no
algorithm with both sublinear space and passes is known
for natural graph problems in classical read-only
streaming. Motivated by technological factors of modern
storage systems, some authors have recently started to
investigate the computational power of less restrictive
models where writing streams is allowed. In this
article, we show that the use of intermediate temporary
streams is powerful enough to provide effective
space-passes tradeoffs for natural graph problems. In
particular, for any space restriction of $s$ bits, we
show that single-source shortest paths in directed
graphs with small positive integer edge weights can be
solved in {$ O((n \log^{3 / 2} n) / \sqrt s) $} passes.
The result can be generalized to deal with multiple
sources within the same bounds. This is the first known
streaming algorithm for shortest paths in directed
graphs. For undirected connectivity, we devise an {$
O((n \log n) / s) $} passes algorithm. Both problems
require {$ \Omega (n / s) $} passes under the
restrictions we consider. We also show that the model
where intermediate temporary streams are allowed can be
strictly more powerful than classical streaming for
some problems, while maintaining all of its hardness
for others.",
acknowledgement = ack-nhfb,
articleno = "6",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Pettie:2009:LDS,
author = "Seth Pettie",
title = "{Low} distortion spanners",
journal = j-TALG,
volume = "6",
number = "1",
pages = "7:1--7:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644022",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A spanner of an undirected unweighted graph is a
subgraph that approximates the distance metric of the
original graph with some specified accuracy.
Specifically, we say {$ H \subseteq G $} is an
{$f$}-spanner of {$G$} if any two vertices {$ u, v $}
at distance {$d$} in {$G$} are at distance at most {$
f(d) $} in {$H$}. There is clearly some trade-off
between the sparsity of {$H$} and the distortion
function {$f$}, though the nature of the optimal
trade-off is still poorly understood. In this article
we present a simple, modular framework for constructing
sparse spanners that is based on interchangeable
components called connection schemes. By assembling
connection schemes in different ways we can recreate
the additive 2- and 6-spanners of Aingworth et al.
[1999] and Baswana et al. [2009], and give spanners
whose multiplicative distortion quickly tends toward 1.
Our results rival the simplicity of all previous
algorithms and provide substantial improvements (up to
a doubly exponential reduction in edge density) over
the comparable spanners of Elkin and Peleg [2004] and
Thorup and Zwick [2006].",
acknowledgement = ack-nhfb,
articleno = "7",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Mehlhorn:2009:MCB,
author = "Kurt Mehlhorn and Dimitrios Michail",
title = "Minimum cycle bases: {Faster} and simpler",
journal = j-TALG,
volume = "6",
number = "1",
pages = "8:1--8:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644023",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the problem of computing exact or
approximate minimum cycle bases of an undirected (or
directed) graph {$G$} with {$m$} edges, {$n$} vertices
and nonnegative edge weights. In this problem, a {$ \{
0, 1 \} ( - 1, 0, 1) $} incidence vector is associated
with each cycle and the vector space over {$ F_2 (Q) $}
generated by these vectors is the cycle space of {$G$}.
A set of cycles is called a cycle basis of {$G$} if it
forms a basis for its cycle space. A cycle basis where
the sum of the weights of the cycles is minimum is
called a minimum cycle basis of {$G$}. Cycle bases of
low weight are useful in a number of contexts, for
example, the analysis of electrical networks,
structural engineering, chemistry, and surface
reconstruction. There exists a set of {$ \Theta (m n)
$} cycles which is guaranteed to contain a minimum
cycle basis. A minimum basis can be extracted by
Gaussian elimination. The resulting algorithm [Horton
1987] was the first polynomial-time algorithm. Faster
and more complicated algorithms have been found since
then. We present a very simple method for extracting a
minimum cycle basis from the candidate set with running
time {$ O(m^2 n) $}, which improves the running time
for sparse graphs. Furthermore, in the undirected case
by using bit-packing we improve the running time also
in the case of dense graphs. For undirected graphs we
derive an {$ O(m^2 n / \log n + n^2 m) $} algorithm.
For directed graphs we get an {$ O(m^3 n) $}
deterministic and an {$ O(m^2 n) $} randomized
algorithm. Our results improve the running times of
both exact and approximate algorithms. Finally, we
derive a smaller candidate set with size in {$ \Omega
(m) \cap O(m n) $}.",
acknowledgement = ack-nhfb,
articleno = "8",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Gaspers:2009:ETA,
author = "Serge Gaspers and Dieter Kratsch and Mathieu Liedloff
and Ioan Todinca",
title = "Exponential time algorithms for the minimum dominating
set problem on some graph classes",
journal = j-TALG,
volume = "6",
number = "1",
pages = "9:1--9:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644024",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The minimum dominating set problem remains NP-hard
when restricted to any of the following graph classes:
$c$-dense graphs, chordal graphs, 4-chordal graphs,
weakly chordal graphs, and circle graphs. Developing
and using a general approach, for each of these graph
classes we present an exponential time algorithm
solving the minimum dominating set problem faster than
the best known algorithm for general graphs. Our
algorithms have the following running time: {$
O(1.4124^n) $} for chordal graphs, {$ O(1.4776^n) $}
for weakly chordal graphs, {$ O(1.4845^n) $} for
4-chordal graphs, {$ O(1.4887^n) $} for circle graphs,
and {$ O(1.2273^{(1 + \sqrt {1 - 2 c}) n}) $} for
{$c$}-dense graphs.",
acknowledgement = ack-nhfb,
articleno = "9",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Chan:2009:OTE,
author = "Ho-Leung Chan and Joseph Wun-Tat Chan and Tak-Wah Lam
and Lap-Kei Lee and Kin-Sum Mak and Prudence W. H.
Wong",
title = "Optimizing throughput and energy in online deadline
scheduling",
journal = j-TALG,
volume = "6",
number = "1",
pages = "10:1--10:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644025",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "This article extends the study of online algorithms
for energy-efficient deadline scheduling to the
overloaded setting. Specifically, we consider a
processor that can vary its speed between $0$ and a
maximum speed {$T$} to minimize its energy usage (the
rate is believed to be a cubic function of the speed).
As the speed is upper bounded, the processor may be
overloaded with jobs and no scheduling algorithms can
guarantee to meet the deadlines of all jobs. An optimal
schedule is expected to maximize the throughput, and
furthermore, its energy usage should be the smallest
among all schedules that achieve the maximum
throughput. In designing a scheduling algorithm, one
has to face the dilemma of selecting more jobs and
being conservative in energy usage. If we ignore energy
usage, the best possible online algorithm is
4-competitive on throughput [Koren and Shasha 1995]. On
the other hand, existing work on energy-efficient
scheduling focuses on a setting where the processor
speed is unbounded and the concern is on minimizing the
energy to complete all jobs; {$ O(1) $}-competitive
online algorithms with respect to energy usage have
been known [Yao et al. 1995; Bansal et al. 2007a; Li et
al. 2006]. This article presents the first online
algorithm for the more realistic setting where
processor speed is bounded and the system may be
overloaded; the algorithm is {$ O(1) $}-competitive on
both throughput and energy usage. If the maximum speed
of the online scheduler is relaxed slightly to {$ (1 +
\epsilon) T $} for some {$ \epsilon > 0 $}, we can
improve the competitive ratio on throughput to
arbitrarily close to one, while maintaining {$ O(1)
$}-competitiveness on energy usage.",
acknowledgement = ack-nhfb,
articleno = "10",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Alon:2009:ACM,
author = "Noga Alon and Yossi Azar and Shai Gutner",
title = "Admission control to minimize rejections and online
set cover with repetitions",
journal = j-TALG,
volume = "6",
number = "1",
pages = "11:1--11:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644026",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study the admission control problem in general
networks. Communication requests arrive over time, and
the online algorithm accepts or rejects each request
while maintaining the capacity limitations of the
network. The admission control problem has been usually
analyzed as a benefit problem, where the goal is to
devise an online algorithm that accepts the maximum
number of requests possible. The problem with this
objective function is that even algorithms with optimal
competitive ratios may reject almost all of the
requests, when it would have been possible to reject
only a few. This could be inappropriate for settings in
which rejections are intended to be rare events. In
this article, we consider preemptive online algorithms
whose goal is to minimize the number of rejected
requests. Each request arrives together with the path
it should be routed on. We show an {$ O(\log^2 (m c))
$}-competitive randomized algorithm for the weighted
case, where {$m$} is the number of edges in the graph
and {$c$} is the maximum edge capacity. For the
unweighted case, we give an {$ O(\log m \log c)
$}-competitive randomized algorithm. This settles an
open question of Blum et al. [2001]. We note that
allowing preemption and handling requests with given
paths are essential for avoiding trivial lower bounds.
The admission control problem is a generalization of
the online set cover with repetitions problem, whose
input is a family of {$m$} subsets of a ground set of
{$n$} elements. Elements of the ground set are given to
the online algorithm one by one, possibly requesting
each element a multiple number of times. (If each
element arrives at most once, this corresponds to the
online set cover problem.) The algorithm must cover
each element by different subsets, according to the
number of times it has been requested. We give an {$
O(\log m \log n) $}-competitive randomized algorithm
for the online set cover with repetitions problem. This
matches a recent lower bound of {$ \Omega (\log m \log
n) $} given by Korman [2005] (based on Feige [1998])
for the competitive ratio of any randomized polynomial
time algorithm, under the BPP /= NP assumption. Given
any constant {$ \epsilon > 0 $}, an {$ O(\log m \log n)
$}-competitive deterministic bicriteria algorithm is
shown that covers each element by at least {$ (1 -
\epsilon) k $} sets, where {$k$} is the number of times
the element is covered by the optimal solution.",
acknowledgement = ack-nhfb,
articleno = "11",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Hay:2009:JRM,
author = "David Hay and Gabriel Scalosub",
title = "Jitter regulation for multiple streams",
journal = j-TALG,
volume = "6",
number = "1",
pages = "12:1--12:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644027",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "For widely used interactive communication, it is
essential that traffic is kept as smooth as possible;
the smoothness of the traffic is typically captured by
its delay jitter, that is, the difference between the
maximal and minimal end-to-end delays. The task of
minimizing the jitter is done by jitter regulators that
use a limited-size buffer in order to shape the
traffic. In many real-life situations regulators must
handle multiple streams simultaneously and provide low
jitter on each of them separately. Moreover,
communication links have limited capacity, and these
may pose further restrictions on the choices made by
the regulator. This article investigates the problem of
minimizing jitter in such an environment, using a
fixed-size buffer. We show that the offline version of
the problem can be solved in polynomial time, by
introducing an efficient offline algorithm that finds a
release schedule with optimal jitter. When regulating
{$M$} streams in the online setting, we take a
competitive analysis point of view and note that, in
the upcapacitated case, previous results in Mansour and
Patt-Shamir [2001] can be extended to an online
algorithm that uses a buffer of size {$ 2 \cdot M \cdot
B $} and obtains the optimal jitter possible with a
buffer of size {$B$} (and an offline algorithm). The
question arises whether such a resource augmentation is
essential. We answer this question in the affirmative,
by proving a lower bound that is tight up to a factor
of 2, thus showing that jitter regulation does not
scale well as the number of streams increases unless
the buffer is sized-up proportionally.",
acknowledgement = ack-nhfb,
articleno = "12",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Becchetti:2009:LCA,
author = "Luca Becchetti and Alberto Marchetti-Spaccamela and
Andrea Vitaletti and Peter Korteweg and Martin Skutella
and Leen Stougie",
title = "Latency-constrained aggregation in sensor networks",
journal = j-TALG,
volume = "6",
number = "1",
pages = "13:1--13:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644028",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A sensor network consists of sensing devices which may
exchange data through wireless communication; sensor
networks are highly energy constrained since they are
usually battery operated. Data aggregation is a
possible way to save energy consumption: nodes may
delay data in order to aggregate them into a single
packet before forwarding them towards some central node
(sink). However, many applications impose constraints
on the maximum delay of data; this translates into
latency constraints for data arriving at the sink. We
study the problem of data aggregation to minimize
maximum energy consumption under latency constraints on
sensed data delivery, and we assume unique
communication paths that form an intree rooted at the
sink. We prove that the offline problem is strongly
NP-hard and we design a 2-approximation algorithm. The
latter uses a novel rounding technique. Almost all
real-life sensor networks are managed online by simple
distributed algorithms in the nodes. In this context we
consider both the case in which sensor nodes are
synchronized or not. We assess the performance of the
algorithm by competitive analysis. We also provide
lower bounds for the models we consider, in some cases
showing optimality of the algorithms we propose. Most
of our results also hold when minimizing the total
energy consumption of all nodes.",
acknowledgement = ack-nhfb,
articleno = "13",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Cohen:2009:TDM,
author = "Rami Cohen and Dror Rawitz and Danny Raz",
title = "Time-dependent multi-scheduling of multicast",
journal = j-TALG,
volume = "6",
number = "1",
pages = "14:1--14:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644029",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Many network applications that need to distribute
content and data to a large number of clients use a
hybrid scheme in which one (or more) multicast channel
is used in parallel to a unicast dissemination. This
way the application can distribute data using one of
its available multicast channels or by sending one or
more unicast transmissions. In such a model the
utilization of the multicast channels is critical for
the overall performance of the system. We study the
scheduling algorithm of the sender in such a model. We
describe this scheduling problem as an optimization
problem where the objective is to maximize the
utilization of the multicast channel. Our model
captures the fact that it may be beneficial to
multicast an object more than once (e.g., page update).
Thus, the benefit depends, among other things, on the
last time the object was sent, which makes the problem
much more complex than previous related scheduling
problems. We show that our problem is NP-hard. Then,
using the local ratio technique we obtain a
4-approximation algorithm for the case where the
objects are of fixed size and a 10-approximation
algorithm for the general case. We also consider a
special case which may be of practical interest, and
prove that a simple greedy algorithm is a
3-approximation algorithm in this case.",
acknowledgement = ack-nhfb,
articleno = "14",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Gamzu:2009:IOA,
author = "Iftah Gamzu and Danny Segev",
title = "Improved online algorithms for the sorting buffer
problem on line metrics",
journal = j-TALG,
volume = "6",
number = "1",
pages = "15:1--15:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644030",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "An instance of the sorting buffer problem consists of
a metric space and a server, equipped with a
finite-capacity buffer capable of holding a limited
number of requests. An additional ingredient of the
input is an online sequence of requests, each of which
is characterized by a destination in the given metric
space; whenever a request arrives, it must be stored in
the sorting buffer. At any point in time, a currently
pending request can be served by drawing it out of the
buffer and moving the server to its corresponding
destination. The objective is to serve all input
requests in a way that minimizes the total distance
traveled by the server. In this article, we focus our
attention on instances of the problem in which the
underlying metric is either an evenly-spaced line
metric or a continuous line metric. Our main findings
can be briefly summarized as follows. (1) We present a
deterministic {$ O(\log n) $}-competitive algorithm for
{$n$}-point evenly-spaced line metrics. This result
improves on a randomized {$ O(\log^2 n) $}-competitive
algorithm due to Khandekar and Pandit [2006b]. It also
refutes their conjecture, stating that a deterministic
strategy is unlikely to obtain a nontrivial competitive
ratio. (2) We devise a deterministic {$ O(\log N \log
\log N) $}-competitive algorithm for continuous line
metrics, where {$N$} denotes the length of the input
sequence. In this context, we introduce a novel
discretization technique of independent interest. (3)
We establish the first nontrivial lower bound for the
evenly-spaced case, by proving that the competitive
ratio of any deterministic algorithm is at least {$ 2 +
\sqrt 3 / \sqrt 3 \approx 2.154 $}. This result
settles, to some extent, an open question due to
Khandekar and Pandit [2006b], who posed the task of
attaining lower bounds on the achievable competitive
ratio as a foundational objective for future
research.",
acknowledgement = ack-nhfb,
articleno = "15",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Andreev:2009:SSL,
author = "Konstantin Andreev and Charles Garrod and Daniel
Golovin and Bruce Maggs and Adam Meyerson",
title = "Simultaneous source location",
journal = j-TALG,
volume = "6",
number = "1",
pages = "16:1--16:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644031",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the problem of simultaneous source
location: selecting locations for sources in a
capacitated graph such that a given set of demands can
be satisfied simultaneously, with the goal of
minimizing the number of locations chosen. For general
directed and undirected graphs we give an {$ O(\log D)
$}-approximation algorithm, where {$D$} is the sum of
demands, and prove matching {$ \Omega (\log D) $}
hardness results assuming P {$ \neq $} NP. For
undirected trees, we give an exact algorithm and show
how this can be combined with a result of R{\"a}cke to
give a solution that exceeds edge capacities by at most
{$ O(\log^2 n \log \log n) $}, where {$n$} is the
number of nodes. For undirected graphs of bounded
treewidth we show that the problem is still NP-hard,
but we are able to give a PTAS with at most {$ (1 +
\epsilon) $} violation of the capacities for
arbitrarily small {$ \epsilon $}, or a $ (k + 1) $
approximation with exact capacities, where $k$ is the
treewidth.",
acknowledgement = ack-nhfb,
articleno = "16",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bein:2009:KYQ,
author = "Wolfgang Bein and Mordecai J. Golin and Lawrence L.
Larmore and Yan Zhang",
title = "The {Knuth--Yao} quadrangle-inequality speedup is a
consequence of total monotonicity",
journal = j-TALG,
volume = "6",
number = "1",
pages = "17:1--17:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644032",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "There exist several general techniques in the
literature for speeding up naive implementations of
dynamic programming. Two of the best known are the
Knuth--Yao quadrangle inequality speedup and the SMAWK
algorithm for finding the row-minima of totally
monotone matrices. Although both of these techniques
use a quadrangle inequality and seem similar, they are
actually quite different and have been used differently
in the literature. In this article we show that the
Knuth--Yao technique is actually a direct consequence
of total monotonicity. As well as providing new
derivations of the Knuth--Yao result, this also permits
to solve the Knuth--Yao problem directly using the
SMAWK algorithm. Another consequence of this approach
is a method for solving online versions of problems
with the Knuth--Yao property. The online algorithms
given here are asymptotically as fast as the best
previously known static ones. For example, the
Knuth--Yao technique speeds up the standard dynamic
program for finding the optimal binary search tree of
$n$ elements from {$ \Theta (n^3) $} down to {$ O(n^2)
$}, and the results in this article allow construction
of an optimal binary search tree in an online fashion
(adding a node to the left or the right of the current
nodes at each step) in {$ O(n) $} time per step.",
acknowledgement = ack-nhfb,
articleno = "17",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Hassin:2009:AMQ,
author = "Refael Hassin and Asaf Levin and Maxim Sviridenko",
title = "Approximating the minimum quadratic assignment
problems",
journal = j-TALG,
volume = "6",
number = "1",
pages = "18:1--18:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644033",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the well-known minimum quadratic
assignment problem. In this problem we are given two $
n \times n $ nonnegative symmetric matrices {$ A =
(a_{ij}) $} and {$ B = (b_{ij}) $}. The objective is to
compute a permutation {$ \pi $} of {$ V = \{ 1, \ldots
{}, n \} $} so that {$ \Sigma i, j \in V_{i \neq j}
a_{\pi (i), \pi (j)} b_{i, j} $} is minimized. We
assume that {$A$} is a {$ 0 / 1 $} incidence matrix of
a graph, and that {$B$} satisfies the triangle
inequality. We analyze the approximability of this
class of problems by providing polynomial bounded
approximations for some special cases, and
inapproximability results for other cases.",
acknowledgement = ack-nhfb,
articleno = "18",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Alagic:2009:QAS,
author = "Gorjan Alagic and Cristopher Moore and Alexander
Russell",
title = "Quantum algorithms for {Simon}'s problem over
nonabelian groups",
journal = j-TALG,
volume = "6",
number = "1",
pages = "19:1--19:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644034",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Daniel Simon's 1994 discovery of an efficient quantum
algorithm for finding ``hidden shifts'' of Z$_2^n$
provided the first algebraic problem for which quantum
computers are exponentially faster than their classical
counterparts. In this article, we study the
generalization of Simon's problem to arbitrary groups.
Fixing a finite group {$G$}, this is the problem of
recovering an involution {$ m = (m_1, \ldots {}, m_n)
\in G^n $} from an oracle {$f$} with the property that
{$ f(x \cdot y) = f(x) \leq y \in \{ 1, m \} $}. In the
current parlance, this is the hidden subgroup problem
(HSP) over groups of the form {$ G^n $}, where {$G$} is
a nonabelian group of constant size, and where the
hidden subgroup is either trivial or has order two.
Although groups of the form {$ G^n $} have a simple
product structure, they share important
representation--theoretic properties with the symmetric
groups {$ S_n $}, where a solution to the HSP would
yield a quantum algorithm for Graph Isomorphism. In
particular, solving their HSP with the so-called
``standard method'' requires highly entangled
measurements on the tensor product of many coset
states. In this article, we provide quantum algorithms
with time complexity {2$^{o(\sqrt n)}$} that recover
hidden involutions {$ m = (m_1, \ldots {}, m_n) \in G^n
$} where, as in Simon's problem, each {$ m_i $} is
either the identity or the conjugate of a known element
{$m$} which satisfies {$ \kappa (m) = - \kappa (1) $}
for some {$ \kappa \in G $}. Our approach combines the
general idea behind Kuperberg's sieve for dihedral
groups with the ``missing harmonic'' approach of Moore
and Russell. These are the first nontrivial HSP
algorithms for group families that require highly
entangled multiregister Fourier sampling.",
acknowledgement = ack-nhfb,
articleno = "19",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Babai:2009:CRC,
author = "L{\'a}szl{\'o} Babai and Pedro F. Felzenszwalb",
title = "Computing rank-convolutions with a mask",
journal = j-TALG,
volume = "6",
number = "1",
pages = "20:1--20:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644035",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Rank-convolutions have important applications in a
variety of areas such as signal processing and computer
vision. We define a mask as a function taking only
values zero and infinity. Rank-convolutions with masks
are of special interest to image processing. We show
how to compute the rank-$k$ convolution of a function
over an interval of length $n$ with an arbitrary mask
of length $m$ in {$ O(n \sqrt m \log m) $} time. The
result generalizes to the {$d$}-dimensional case.
Previously no algorithm performing significantly better
than the brute-force {$ O(n m) $} bound was known. Our
algorithm seems to perform well in practice. We
describe an implementation, illustrating its
application to a problem in image processing. Already
on relatively small images, our experiments show a
significant speedup compared to brute force.",
acknowledgement = ack-nhfb,
articleno = "20",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bruss:2009:IAI,
author = "F. Thomas Bruss and Guy Louchard and Mark Daniel
Ward",
title = "Inverse auctions: {Injecting} unique minima into
random sets",
journal = j-TALG,
volume = "6",
number = "1",
pages = "21:1--21:??",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1644015.1644036",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:31 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider auctions in which the winning bid is the
smallest bid that is unique. Only the upper-price limit
is given. Neither the number of participants nor the
distribution of the offers are known, so that the
problem of placing a bid to win with maximum
probability looks, a priori, ill-posed. Indeed, the
essence of the problem is to inject a (final) minimum
into a random subset (of unique offers) of a larger
random set. We will see, however, that here no more
than two external (and almost compelling) arguments
make the problem meaningful. By appropriately modeling
the relationship between the number of participants and
the distribution of the bids, we can then maximize our
chances of winning the auction and propose a computable
algorithm for placing our bid.",
acknowledgement = ack-nhfb,
articleno = "21",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Albers:2010:EN,
author = "Susanne Albers",
title = "Editorial {Note}",
journal = j-TALG,
volume = "6",
number = "2",
pages = "22:1--22:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721838",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "22",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Mathieu:2010:FSI,
author = "Claire Mathieu",
title = "Foreword to special issue {SODA} 2009",
journal = j-TALG,
volume = "6",
number = "2",
pages = "23:1--23:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721839",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "23",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Cabello:2010:FSC,
author = "Sergio Cabello",
title = "Finding shortest contractible and shortest separating
cycles in embedded graphs",
journal = j-TALG,
volume = "6",
number = "2",
pages = "24:1--24:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721840",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We give a polynomial-time algorithm to find a shortest
contractible cycle (i.e., a closed walk without
repeated vertices) in a graph embedded in a surface.
This answers a question posed by Hutchinson. In
contrast, we show that finding a shortest contractible
cycle through a given vertex is NP-hard. We also show
that finding a shortest separating cycle in an embedded
graph is NP-hard. This answers a question posed by
Mohar and Thomassen.",
acknowledgement = ack-nhfb,
articleno = "24",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "3-path condition; forbidden pairs; graphs on surfaces;
topological graph theory",
}
@Article{Aspnes:2010:ASM,
author = "James Aspnes and Keren Censor",
title = "Approximate shared-memory counting despite a strong
adversary",
journal = j-TALG,
volume = "6",
number = "2",
pages = "25:1--25:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721841",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A new randomized asynchronous shared-memory data
structure is given for implementing an approximate
counter that can be incremented once by each of $n$
processes in a model that allows up to $ n - 1 $ crash
failures. For any fixed $ \epsilon $, the counter
achieves a relative error of $ \delta $ with high
probability, at the cost of {$ O(((1 / \delta) \log
n)^{O(1 / \epsilon)}) $} register operations per
increment and {$ O(n^{4 / 5 + \epsilon }((1 / \delta)
\log n)^{O(1 / \epsilon)}) $} register operations per
read. The counter combines randomized sampling for
estimating large values with an expander for estimating
small values. This is the first counter implementation
that is sublinear the number of processes and works
despite a strong adversary scheduler that can observe
internal states of processes.\par
An application of the improved counter is an improved
protocol for solving randomized shared-memory
consensus, which reduces the best previously known
individual work complexity from {$ O(n \log n) $} to an
optimal {$ O(n) $}, resolving one of the last remaining
open problems concerning consensus in this model.",
acknowledgement = ack-nhfb,
articleno = "25",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "approximate counting; consensus; Distributed
computing; expanders; martingales",
}
@Article{Chan:2010:CBT,
author = "Timothy M. Chan",
title = "Comparison-based time-space lower bounds for
selection",
journal = j-TALG,
volume = "6",
number = "2",
pages = "26:1--26:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721842",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We establish the first nontrivial lower bounds on
time-space trade-offs for the selection problem. We
prove that any comparison-based randomized algorithm
for finding the median requires {$ \Omega (n \log
\log_S n) $} expected time in the RAM model (or more
generally in the comparison branching program model),
if we have {$S$} bits of extra space besides the
read-only input array. This bound is tight for all {$ S
> \log n $}, and remains true even if the array is
given in a random order. Our result thus answers a
16-year-old question of Munro and Raman [1996], and
also complements recent lower bounds that are
restricted to sequential access, as in the multipass
streaming model [Chakrabarti et al. 2008b].\par
We also prove that any comparison-based, deterministic,
multipass streaming algorithm for finding the median
requires {$ \Omega (n \log^*(n / s) + n \log_s n) $}
worst-case time (in scanning plus comparisons), if we
have {$s$} cells of space. This bound is also tight for
all {$ s > \log^2 n $}. We get deterministic lower
bounds for I/O-efficient algorithms as well.\par
The proofs in this article are self-contained and do
not rely on communication complexity techniques.",
acknowledgement = ack-nhfb,
articleno = "26",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Adversary arguments; lower bounds; median finding;
RAM; randomized algorithms; streaming algorithms;
time--space trade-offs",
}
@Article{Goel:2010:PMU,
author = "Ashish Goel and Michael Kapralov and Sanjeev Khanna",
title = "Perfect matchings via uniform sampling in regular
bipartite graphs",
journal = j-TALG,
volume = "6",
number = "2",
pages = "27:1--27:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721843",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article we further investigate the
well-studied problem of finding a perfect matching in a
regular bipartite graph. The first nontrivial
algorithm, with running time {$ O(m n) $}, dates back
to K{\"o}nig's work in 1916 (here {$ m = n d $} is the
number of edges in the graph, {$ 2^n $} is the number
of vertices, and {$d$} is the degree of each node). The
currently most efficient algorithm takes time {$ O(m)
$}, and is due to Cole et al. [2001]. We improve this
running time to {$ O(\min \{ m, n^{2.5} \ln n / d \})
$}; this minimum can never be larger than {$ O(n^{1.75}
\sqrt {\ln n}) $}. We obtain this improvement by
proving a uniform sampling theorem: if we sample each
edge in a {$d$}-regular bipartite graph independently
with a probability {$ p = O(n \ln n / d^2) $} then the
resulting graph has a perfect matching with high
probability. The proof involves a decomposition of the
graph into pieces which are guaranteed to have many
perfect matchings but do not have any small cuts. We
then establish a correspondence between potential
witnesses to nonexistence of a matching (after
sampling) in any piece and cuts of comparable size in
that same piece. Karger's sampling theorem [1994a,
1994b] for preserving cuts in a graph can now be
adapted to prove our uniform sampling theorem for
preserving perfect matchings. Using the {$ O(m \sqrt n)
$} algorithm (due to Hopcroft and Karp [1973]) for
finding maximum matchings in bipartite graphs on the
sampled graph then yields the stated running time. We
also provide an infinite family of instances to show
that our uniform sampling result is tight up to
polylogarithmic factors (in fact, up to {$ l n^2 n $}
).",
acknowledgement = ack-nhfb,
articleno = "27",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Perfect matching; regular bipartite graphs",
}
@Article{Aminof:2010:RAO,
author = "Benjamin Aminof and Orna Kupferman and Robby Lampert",
title = "Reasoning about online algorithms with weighted
automata",
journal = j-TALG,
volume = "6",
number = "2",
pages = "28:1--28:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721844",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We describe an automata-theoretic approach for the
competitive analysis of {\em online algorithms}. Our
approach is based on {\em weighted automata}, which
assign to each input word a cost in {$ R^{\geq 0} $}.
By relating the ``unbounded look ahead'' of optimal
offline algorithms with nondeterminism, and relating
the ``no look ahead'' of online algorithms with
determinism, we are able to solve problems about the
competitive ratio of online algorithms, and the memory
they require, by reducing them to questions about {\em
determinization\/} and {\em approximated
determinization\/} of weighted automata.",
acknowledgement = ack-nhfb,
articleno = "28",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Formal verification; online algorithms; weighted
automata",
}
@Article{Marx:2010:AFH,
author = "D{\'a}niel Marx",
title = "Approximating fractional hypertree width",
journal = j-TALG,
volume = "6",
number = "2",
pages = "29:1--29:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721845",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Fractional hypertree width is a hypergraph measure
similar to tree width and hypertree width. Its
algorithmic importance comes from the fact that, as
shown in previous work, Constraint Satisfaction
Problems (CSP) and various problems in database theory
are polynomial-time solvable if the input contains a
bounded-width fractional hypertree decomposition of the
hypergraph of the constraints. In this article, we show
that for every fixed $ w \geq 1 $, there is a
polynomial-time algorithm that, given a hypergraph
{$H$} with fractional hypertree width at most {$w$},
computes a fractional hypertree decomposition of width
{$ O(w^3) $} for {$H$}. This means that polynomial-time
algorithms relying on bounded-width fractional
hypertree decompositions no longer need to be given a
decomposition explicitly in the input, since an
appropriate decomposition can be computed in polynomial
time. Therefore, if {$H$} is a class of hypergraphs
with bounded fractional hypertree width, then a CSP
restricted to instances whose structure is in {$H$} is
polynomial-time solvable. This makes bounded fractional
hypertree width the most general known hypergraph
property that makes CSP, Boolean conjunctive queries,
and conjunctive query containment polynomial-time
solvable.",
acknowledgement = ack-nhfb,
articleno = "29",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "constraint satisfaction; fractional hypertree width;
Treewidth",
}
@Article{Klein:2010:SPD,
author = "Philip N. Klein and Shay Mozes and Oren Weimann",
title = "Shortest paths in directed planar graphs with negative
lengths: a linear-space {$ O(n \log^2 n) $}-time
algorithm",
journal = j-TALG,
volume = "6",
number = "2",
pages = "30:1--30:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721846",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We give an {$ O(n \log^2 n) $}-time, linear-space
algorithm that, given a directed planar graph with
positive and negative arc-lengths, and given a node
{$s$}, finds the distances from {$s$} to all nodes.",
acknowledgement = ack-nhfb,
articleno = "30",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Monge; Planar graphs; replacement paths; shortest
paths",
}
@Article{Panagiotou:2010:MBS,
author = "Konstantinos Panagiotou and Angelika Steger",
title = "Maximal biconnected subgraphs of random planar
graphs",
journal = j-TALG,
volume = "6",
number = "2",
pages = "31:1--31:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721847",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Let {$C$} be a class of labeled connected graphs, and
let {$ C_n $} be a graph drawn uniformly at random from
graphs in {$C$} that contain exactly {$n$} vertices.
Denote by {$ b(\ell; C_n) $} the number of blocks
(i.e., maximal biconnected subgraphs) of {$ C_n $} that
contain exactly {$ \ell $} vertices, and let {$ l
b(C_n) $} be the number of vertices in a largest block
of {$ C_n $}. We show that under certain general
assumptions on {$C$}, {$ C_n $} belongs with high
probability to one of the following categories:\par
(1) {$ l b(C_n) \sim c n $}, for some explicitly given
{$ c = c(C) $}, and the second largest block is of
order {$ n^\alpha $}, where {$ 1 > \alpha = \alpha (C)
$}, or\par
(2) {$ l b(C_n) = O(\log n) $}, that is, all blocks
contain at most logarithmically many
vertices.\par
Moreover, in both cases we show that the quantity {$
b(\ell; C_n) $} is concentrated for all {$ \ell $} and
we determine its expected value. As a corollary we
obtain that the class of planar graphs belongs to
category {$1$}. In contrast to that, outerplanar and
series-parallel graphs belong to category {$1$}.",
acknowledgement = ack-nhfb,
articleno = "31",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Graphs with constraints; planar graphs; random
structures",
}
@Article{Thomasse:2010:KFV,
author = "St{\'e}phan Thomass{\'e}",
title = "A $ 4 k^2 $ kernel for feedback vertex set",
journal = j-TALG,
volume = "6",
number = "2",
pages = "32:1--32:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721848",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Aug 14 15:49:22 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We prove that given an undirected graph {$G$} on {$n$}
vertices and an integer {$k$}, one can compute, in
polynomial time in {$n$}, a graph {$ G \prime $} with
at most {$ 4 k^2 $} vertices and an integer {$ k \prime
$} such that {$G$} has a feedback vertex set of size at
most {$ k \iff G \prime $} has a feedback vertex set of
size at most {$ k \prime $}. This result improves a
previous {$ O(k^{11}) $} kernel of Burrage et al., and
a more recent cubic kernel of Bodlaender. This problem
was communicated by Fellows.",
acknowledgement = ack-nhfb,
articleno = "32",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "feedback vertex set; fixed parameter tractability;
Kernelization; matching",
}
@Article{Thomasse:2010:KKF,
author = "St{\'e}phan Thomass{\'e}",
title = "A $ 4 k^2 $ kernel for feedback vertex set",
journal = j-TALG,
volume = "6",
number = "2",
pages = "32:1--32:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721848",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We prove that given an undirected graph {$G$} on {$n$}
vertices and an integer {$k$}, one can compute, in
polynomial time in {$n$}, a graph {$ G' $} with at most
{$ 4 k^2 $} vertices and an integer {$ k' $} such that
{$G$} has a feedback vertex set of size at most {$k$}
iff {$ G' $} has a feedback vertex set of size at most
{$ k' $}. This result improves a previous {$ O(k^{11})
$} kernel of Burrage et al., and a more recent cubic
kernel of Bodlaender. This problem was communicated by
Fellows.",
acknowledgement = ack-nhfb,
articleno = "32",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Madani:2010:DDM,
author = "Omid Madani and Mikkel Thorup and Uri Zwick",
title = "Discounted deterministic {Markov} decision processes
and discounted all-pairs shortest paths",
journal = j-TALG,
volume = "6",
number = "2",
pages = "33:1--33:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721849",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present algorithms for finding optimal strategies
for discounted, infinite-horizon, Determinsitc Markov
Decision Processes (DMDPs). Our fastest algorithm has a
worst-case running time of {$ O(m n) $}, improving the
recent bound of {$ O(m n^2) $} obtained by Andersson
and Vorbyov [2006]. We also present a randomized {$
O(m^{1 / 2} n^2) $}-time algorithm for finding
Discounted All-Pairs Shortest Paths (DAPSP), improving
an {$ O(m n^2) $}-time algorithm that can be obtained
using ideas of Papadimitriou and Tsitsiklis [1987].",
acknowledgement = ack-nhfb,
articleno = "33",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Markov decision processes; minimum mean weight cycles;
shortest paths",
}
@Article{Shalita:2010:EAG,
author = "Alon Shalita and Uri Zwick",
title = "Efficient algorithms for the 2-gathering problem",
journal = j-TALG,
volume = "6",
number = "2",
pages = "34:1--34:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721850",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Pebbles are placed on some vertices of a directed
graph. Is it possible to move each pebble along at most
one edge of the graph so that in the final
configuration no pebble is left on its own? We give an
{$ O(m n) $}-time algorithm for solving this problem,
which we call the {\em 2-gathering\/} problem, where
{$n$} is the number of vertices and {$m$} is the number
of edges of the graph. If such a 2-gathering is not
possible, the algorithm finds a solution that minimizes
the number of solitary pebbles. The 2-gathering problem
forms a nontrivial generalization of the nonbipartite
matching problem and it is solved by extending the
augmenting paths technique used to solve matching
problems.",
acknowledgement = ack-nhfb,
articleno = "34",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "2-gatherings; augmenting paths; nonbipartite
matchings",
}
@Article{Bansal:2010:DPI,
author = "Nikhil Bansal and Ning Chen and Neva Cherniavsky and
Atri Rurda and Baruch Schieber and Maxim Sviridenko",
title = "Dynamic pricing for impatient bidders",
journal = j-TALG,
volume = "6",
number = "2",
pages = "35:1--35:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721851",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study the following problem related to pricing over
time. Assume there is a collection of bidders, each of
whom is interested in buying a copy of an item of which
there is an unlimited supply. Every bidder is
associated with a time interval over which the bidder
will consider buying a copy of the item, and a maximum
value the bidder is willing to pay for the item. On
every time unit, the seller sets a price for the item.
The seller's goal is to set the prices so as to
maximize revenue from the sale of copies of items over
the time period.\par
In the first model considered, we assume that all
bidders are {\em impatient}, that is, bidders buy the
item at the first time unit within their bid interval
that they can afford the price. To the best of our
knowledge, this is the first work that considers this
model. In the offline setting, we assume that the
seller knows the bids of all the bidders in advance. In
the online setting we assume that at each time unit the
seller only knows the values of the bids that have
arrived before or at that time unit. We give a
polynomial time offline algorithm and prove upper and
lower bounds on the competitiveness of deterministic
and randomized online algorithms, compared with the
optimal offline solution. The gap between the upper and
lower bounds is quadratic.\par
We also consider the {\em envy-free\/} model in which
bidders are sold the item at the minimum price during
their bid interval, as long as it is not over their
limit value. We prove tight bounds on the
competitiveness of deterministic online algorithms for
this model, and upper and lower bounds on the
competitiveness of randomized algorithms with quadratic
gap. The lower bounds for the randomized case in both
models use a novel general technique.",
acknowledgement = ack-nhfb,
articleno = "35",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Digital goods; online algorithms; pricing",
}
@Article{Azar:2010:TUF,
author = "Yossi Azar and Iftah Gamzu and Shai Gutner",
title = "Truthful unsplittable flow for large capacity
networks",
journal = j-TALG,
volume = "6",
number = "2",
pages = "36:1--36:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721852",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The {\em unsplittable flow problem\/} is one of the
most extensively studied optimization problems in the
field of networking. An instance of it consists of an
edge capacitated graph and a set of connection
requests, each of which is associated with source and
target vertices, a demand, and a value. The objective
is to route a maximum value subset of requests subject
to the edge capacities. It is a well known fact that as
the capacities of the edges are larger with respect to
the maximal demand among the requests, the problem can
be approximated better. In particular, it is known that
for sufficiently large capacities, the integrality gap
of the corresponding integer linear program becomes $ 1
+ \epsilon $, which can be matched by an algorithm that
utilizes the randomized rounding technique.\par
In this article, we focus our attention on the large
capacities unsplittable flow problem in a game
theoretic setting. In this setting, there are selfish
agents, which control some of the requests
characteristics, and may be dishonest about them. It is
worth noting that in game theoretic settings many
standard techniques, such as randomized rounding,
violate certain monotonicity properties, which are
imperative for truthfulness, and therefore cannot be
employed. In light of this state of affairs, we design
a monotone deterministic algorithm, which is based on a
primal-dual machinery, which attains an approximation
ratio of $ e / (e - 1) $, up to a disparity of $
\epsilon $ away. This implies an improvement on the
current best truthful mechanism, as well as an
improvement on the current best combinatorial algorithm
for the problem under consideration. Surprisingly, we
demonstrate that any algorithm in the family of
reasonable iterative path minimizing algorithms, cannot
yield a better approximation ratio. Consequently, it
follows that in order to achieve a monotone PTAS, if
that exists, one would have to exert different
techniques. We also consider the large capacities {\em
single-minded multi-unit combinatorial auction
problem}. This problem is closely related to the
unsplittable flow problem since one can formulate it as
a special case of the integer linear program of the
unsplittable flow problem. Accordingly, we obtain a
comparable performance guarantee by refining the
algorithm suggested for the unsplittable flow
problem.",
acknowledgement = ack-nhfb,
articleno = "36",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "approximation algorithms; combinatorial and multi-unit
auctions; Mechanism design; primal-dual method",
}
@Article{Svitkina:2010:FLH,
author = "Zoya Svitkina and {\'E}va Tardos",
title = "Facility location with hierarchical facility costs",
journal = j-TALG,
volume = "6",
number = "2",
pages = "37:1--37:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721853",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We introduce a facility location problem with
submodular facility cost functions, and give an {$
O(\log n) $} approximation algorithm for it. Then we
focus on a special case of submodular costs, called
hierarchical facility costs, and give a {$ (4.237 +
\epsilon) $}-approximation algorithm using local
search. The hierarchical facility costs model
multilevel service installation. Shmoys et al. [2004]
gave a constant factor approximation algorithm for a
two-level version of the problem. Here we consider a
multilevel problem, and give a constant factor
approximation algorithm, independent of the number of
levels, for the case of identical costs on all
facilities.",
acknowledgement = ack-nhfb,
articleno = "37",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithm; facility location; local
search; submodular function",
}
@Article{Christodoulou:2010:MDF,
author = "George Christodoulou and Elias Koutsoupias and
Annam{\'a}ria Kov{\'a}cs",
title = "Mechanism design for fractional scheduling on
unrelated machines",
journal = j-TALG,
volume = "6",
number = "2",
pages = "38:1--38:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721854",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Scheduling on unrelated machines is one of the most
general and classical variants of the task scheduling
problem. Fractional scheduling is the LP-relaxation of
the problem, which is polynomially solvable in the
nonstrategic setting, and is a useful tool to design
deterministic and randomized approximation
algorithms.\par
The mechanism design version of the scheduling problem
was introduced by Nisan and Ronen. In this article, we
consider the mechanism design version of the fractional
variant of this problem. We give lower bounds for any
fractional truthful mechanism. Our lower bounds also
hold for any (randomized) mechanism for the integral
case. In the positive direction, we propose a truthful
mechanism that achieves approximation 3/2 for 2
machines, matching the lower bound. This is the first
new tight bound on the approximation ratio of this
problem, after the tight bound of 2, for 2 machines,
obtained by Nisan and Ronen. For $n$ machines, our
mechanism achieves an approximation ratio of $ n + 1 /
2 $.\par
Motivated by the fact that all the known deterministic
and randomized mechanisms for the problem assign each
task independently from the others, we focus on an
interesting subclass of allocation algorithms, the {\em
task-independent\/} algorithms. We give a lower bound
of $ n + 1 / 2 $, that holds for every (not only
monotone) allocation algorithm that takes independent
decisions. Under this consideration, our truthful
independent mechanism is the best that we can hope from
this family of algorithms.",
acknowledgement = ack-nhfb,
articleno = "38",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "scheduling; Truthful mechanisms; unrelated machines",
}
@Article{Korman:2010:LSV,
author = "Amos Korman",
title = "Labeling schemes for vertex connectivity",
journal = j-TALG,
volume = "6",
number = "2",
pages = "39:1--39:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721855",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "This article studies labeling schemes for the vertex
connectivity function on general graphs. We consider
the problem of assigning short labels to the nodes of
any $n$-node graph is such a way that given the labels
of any two nodes $u$ and $v$, one can decide whether
$u$ and $v$ are $k$-vertex connected in {$G$}, that is,
whether there exist {$k$} vertex disjoint paths
connecting {$u$} and {$v$}. This article establishes an
upper bound of $ k^2 \log n $ on the number of bits
used in a label. The best previous upper bound for the
label size of such a labeling scheme is $ 2^k \log n
$.",
acknowledgement = ack-nhfb,
articleno = "39",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Graph algorithms; labeling schemes;
vertex-connectivity",
}
@Article{Butman:2010:OPM,
author = "Ayelet Butman and Danny Hermelin and Moshe Lewenstein
and Dror Rawitz",
title = "Optimization problems in multiple-interval graphs",
journal = j-TALG,
volume = "6",
number = "2",
pages = "40:1--40:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721856",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Multiple-interval graphs are a natural generalization
of interval graphs where each vertex may have more then
one interval associated with it. We initiate the study
of optimization problems in multiple-interval graphs by
considering three classical problems: Minimum Vertex
Cover, Minimum Dominating Set, and Maximum Clique. We
describe applications for each one of these problems,
and then proceed to discuss approximation algorithms
for them.\par
Our results can be summarized as follows: Let $t$ be
the number of intervals associated with each vertex in
a given multiple-interval graph. For Minimum Vertex
Cover, we give a $ (2 - 1 / t) $-approximation
algorithm which also works when a $t$-interval
representation of our given graph is absent. Following
this, we give a $ t^2 $-approximation algorithm for
Minimum Dominating Set which adapts well to more
general variants of the problem. We then proceed to
prove that Maximum Clique is NP-hard already for
3-interval graphs, and provide a $ t^2 - (t + 1) / 2
$-approximation algorithm for general values of $ t
\geq 2 $, using bounds proven for the so-called
transversal number of $t$-interval families.",
acknowledgement = ack-nhfb,
articleno = "40",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "$t$-interval graphs; Approximation algorithms; maximum
clique; minimum dominating set; minimum vertex cover;
multiple-interval graphs",
}
@Article{Gupta:2010:DRF,
author = "Anupam Gupta and Mohammadtaghi Hajiaghayi and
Viswanath Nagarajan and R. Ravi",
title = "Dial a {Ride} from $k$-forest",
journal = j-TALG,
volume = "6",
number = "2",
pages = "41:1--41:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721857",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Aug 14 15:49:22 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The {\em $k$-forest problem\/} is a common
generalization of both the $k$-MST and the {\em
dense-$k$-subgraph\/} problems. Formally, given a
metric space on $n$ vertices {$V$}, with {$m$} demand
pairs {$ \subseteq V \times V $} and a ``target'' {$ k
\leq m $}, the goal is to find a minimum cost subgraph
that connects {\em at least\/} {$k$} pairs. In this
paper, we give an {$ O(m i n \{ \sqrt n \cdot \log k,
\sqrt k \}) $}-approximation algorithm for
{$k$}-forest, improving on the previous best ratio of
{$ O(m i n \{ n^{2 / 3}, \sqrt m \log n \}) $} by Segev
and Segev.\par
We then apply our algorithm for {$k$}-forest to obtain
approximation algorithms for several {\em
Dial-a-Ride\/} problems. The basic Dial-a-Ride problem
is the following: given an {$n$} point metric space
with {$m$} objects each with its own source and
destination, and a vehicle capable of carrying {\em at
most\/} $k$ objects at any time, find the minimum
length tour that uses this vehicle to move each object
from its source to destination. We want that the tour
be {\em non-preemptive\/}: that is, each object, once
picked up at its source, is dropped only at its
destination. We prove that an $ \alpha $-approximation
algorithm for the $k$-forest problem implies an {$
O(\alpha \cdot \log^2 n) $}-approximation algorithm for
Dial-a-Ride. Using our results for {$k$}-forest, we get
an {$ O(m i n \{ \sqrt n, \sqrt k \} \cdot \log^2 n)
$}-approximation algorithm for Dial-a-Ride. The only
previous result known for Dial-a-Ride was an {$ O(\sqrt
k \log n) $}-approximation by Charikar and
Raghavachari; our results give a different proof of a
similar approximation guarantee --- in fact, when the
vehicle capacity {$k$} is large, we give a slight
improvement on their results. The reduction from
Dial-a-Ride to the {$k$}-forest problem is fairly
robust, and allows us to obtain approximation
algorithms (with the same guarantee) for some
interesting generalizations of Dial-a-Ride.",
acknowledgement = ack-nhfb,
articleno = "41",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithms; network design; vehicle
routing",
}
@Article{Gupta:2010:DRK,
author = "Anupam Gupta and Mohammadtaghi Hajiaghayi and
Viswanath Nagarajan and R. Ravi",
title = "Dial a {Ride} from $k$-forest",
journal = j-TALG,
volume = "6",
number = "2",
pages = "41:1--41:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721857",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The $k$-forest problem is a common generalization of
both the $k$-MST and the dense-$k$-subgraph problems.
Formally, given a metric space on $n$ vertices {$V$},
with {$m$} demand pairs {$ \subseteq V \times V $} and
a ``target'' {$ k \leq m $}, the goal is to find a
minimum cost subgraph that connects at least {$k$}
pairs. In this paper, we give an {$ O(m i n{\sqrt n
\cdot \log k, \sqrt k}) $}-approximation algorithm for
{$k$}-forest, improving on the previous best ratio of
{$ O(m i n \{ n^{2 / 3}, \sqrt m \} \log n) $} by Segev
and Segev. We then apply our algorithm for {$k$}-forest
to obtain approximation algorithms for several
Dial-a-Ride problems. The basic Dial-a-Ride problem is
the following: given an {$n$} point metric space with
{$m$} objects each with its own source and destination,
and a vehicle capable of carrying at most $k$ objects
at any time, find the minimum length tour that uses
this vehicle to move each object from its source to
destination. We want that the tour be non-preemptive:
that is, each object, once picked up at its source, is
dropped only at its destination. We prove that an $
\alpha $-approximation algorithm for the $k$-forest
problem implies an {$ O(\alpha \cdot \log^2 n)
$}-approximation algorithm for Dial-a-Ride. Using our
results for {$k$}-forest, we get an {$ O(\min \{ \sqrt
n, \sqrt k \} \cdot \log^2 n) $}-approximation
algorithm for Dial-a-Ride. The only previous result
known for Dial-a-Ride was an {$ O(\sqrt k \log n)
$}-approximation by Charikar and Raghavachari; our
results give a different proof of a similar
approximation guarantee-in fact, when the vehicle
capacity {$k$} is large, we give a slight improvement
on their results. The reduction from Dial-a-Ride to the
{$k$}-forest problem is fairly robust, and allows us to
obtain approximation algorithms (with the same
guarantee) for some interesting generalizations of
Dial-a-Ride.",
acknowledgement = ack-nhfb,
articleno = "41",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bobier:2010:FAG,
author = "Bruce Bobier and Joe Sawada",
title = "A fast algorithm to generate open meandric systems and
meanders",
journal = j-TALG,
volume = "6",
number = "2",
pages = "42:1--42:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721858",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "An open meandric system is a planar configuration of
acyclic curves crossing an infinite horizontal line in
the plane such that the curves may extend in both
horizontal directions. We present a fast, recursive
algorithm to exhaustively generate open meandric
systems with $n$ crossings. We then illustrate how to
modify the algorithm to generate unidirectional open
meandric systems (the curves extend only to the right)
and nonisomorphic open meandric systems where
equivalence is taken under horizontal reflection. Each
algorithm can be modified to generate systems with
exactly $k$ curves. In the unidirectional case when $ k
= 1 $, we can apply a minor modification along with
some additional optimization steps to yield the first
fast and simple algorithm to generate open meanders.",
acknowledgement = ack-nhfb,
articleno = "42",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "CAT algorithm; meander; open meandric system",
}
@Article{Ergun:2010:PTS,
author = "Funda Ergun and S. Muthukrishnan and Cenk Sahinalp",
title = "Periodicity testing with sublinear samples and space",
journal = j-TALG,
volume = "6",
number = "2",
pages = "43:1--43:??",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1721837.1721859",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:34 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this work, we are interested in periodic trends in
long data streams in the presence of computational
constraints. To this end; we present algorithms for
discovering periodic trends in the combinatorial
property testing model in a data stream {$S$} of length
{$n$} using {$ o(n) $} samples and space.\par
In accordance with the property testing model, we first
explore the notion of being ``close'' to periodic by
defining three different notions of self-distance
through relaxing different notions of exact
periodicity. An input {$S$} is then called
approximately periodic if it exhibits a small
self-distance (with respect to any one self-distance
defined). We show that even though the different
definitions of exact periodicity are equivalent, the
resulting definitions of self-distance and approximate
periodicity are not; we also show that these
self-distances are constant approximations of each
other. Afterwards, we present algorithms which
distinguish between the two cases where {$S$} is
exactly periodic and {$S$} is far from periodic with
only a constant probability of error.\par
Our algorithms sample only {$ O(\sqrt n \log^2 n) $}
(or {$ O(\sqrt n \log^4 n) $}, depending on the
self-distance) positions and use as much space. They
can also find, using {$ o(n) $} samples and space, the
largest/smallest period, and/or all of the approximate
periods of {$S$}. These algorithms can also be viewed
as working on streaming inputs where each data item is
seen once and in order, storing only a sublinear ({$
O(\sqrt n \log^2 n) $} or {$ O(\sqrt n \log^4 n) $})
size sample from which periodicities are identified.",
acknowledgement = ack-nhfb,
articleno = "43",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Combinatorial property testing; periodicity",
}
@Article{Vassilevska:2010:FHS,
author = "Virginia Vassilevska and Ryan Williams and Raphael
Yuster",
title = "Finding heaviest {$H$}-subgraphs in real weighted
graphs, with applications",
journal = j-TALG,
volume = "6",
number = "3",
pages = "44:1--44:??",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1798596.1798597",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Aug 14 15:50:18 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "For a graph {$G$} with real weights assigned to the
vertices (edges), the MAX {$H$}-SUBGRAPH problem is to
find an {$H$}-subgraph of {$G$} with maximum total
weight, if one exists. Our main results are new
strongly polynomial algorithms for the MAX
{$H$}-SUBGRAPH problem. Some of our algorithms are
based, in part, on fast matrix multiplication.\par
For vertex-weighted graphs with {$n$} vertices we solve
a more general problem: the {\em all pairs\/} MAX
{$H$}-SUBGRAPH problem, where the task is to find for
every pair of vertices {$ u, v $}, a maximum
{$H$}-subgraph containing both {$u$} and {$v$}, if one
exists. We obtain an {$ O(n^t(\omega, h)) $}-time
algorithm for the {\em all pairs\/} MAX {$H$}-SUBGRAPH
problem in the case where {$H$} is a fixed graph with
{$h$} vertices and {$ \omega $}.\par
We also present improved algorithms for the MAX
{$H$}-SUBGRAPH problem in the edge-weighted case. In
particular, we obtain an {$ O(m^{2 - 1 / k \log n})
$}-time algorithm for the heaviest cycle of length 2
{$k$} or {$ 2 k - 1 $} in a graph with {$m$} edges and
an {$ O(n^3 / \log n) $}-time randomized algorithm for
finding the heaviest cycle of any fixed length.\par
Our methods also yield efficient algorithms for several
related problems that are faster than any previously
existing algorithms. For example, we show how to find
chromatic {$H$}-subgraphs in edge-colored graphs, and
how to compute the most significant bits of the
distance product of two real matrices, in truly
subcubic time.",
acknowledgement = ack-nhfb,
articleno = "44",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "H-subgraph; matrix multiplication; weighted graph",
}
@Article{Ruskey:2010:EUC,
author = "Frank Ruskey and Aaron Williams",
title = "An explicit universal cycle for the $ (n - 1)
$-permutations of an $n$-set",
journal = j-TALG,
volume = "6",
number = "3",
pages = "45:1--45:12",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1798596.1798598",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Aug 14 15:50:18 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We show how to construct an {\em explicit\/} Hamilton
cycle in the directed Cayley graph {$ \vec {\rm
Cay}(\sigma_n, \sigma_{n - 1} : S_n) $}, where {$
\sigma_k $} is the rotation {$ (1 2 \cdots k) $}. The
existence of such cycles was shown by Jackson [1996]
but the proof only shows that a certain directed graph
is Eulerian, and Knuth [2005] asks for an explicit
construction. We show that a simple recursion describes
our Hamilton cycle and that the cycle can be generated
by an iterative algorithm that uses {$ O(n) $} space.
Moreover, the algorithm produces each successive edge
of the cycle in constant time; such algorithms are said
to be {\em loopless}. Finally, our Hamilton cycle can
be used to construct an explicit universal cycle for
the {$ (n - 1) $}-permutations of a {$n$}-set, or as
the basis of an efficient algorithm for generating
every {$n$}-permutation of an $n$-set within a circular
array or linked list.",
acknowledgement = ack-nhfb,
articleno = "45",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "loopless algorithm; Universal cycle",
}
@Article{Drescher:2010:AAM,
author = "Matthew Drescher and Adrian Vetta",
title = "An approximation algorithm for the maximum leaf
spanning arborescence problem",
journal = j-TALG,
volume = "6",
number = "3",
pages = "46:1--46:??",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1798596.1798599",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Aug 14 15:50:18 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present an {$ O(\sqrt {{\rm opt}}) $}-approximation
algorithm for the maximum leaf spanning arborescence
problem, where opt is the number of leaves in an
optimal spanning arborescence. The result is based upon
an {$ O(1) $}-approximation algorithm for a special
class of directed graphs called willows. Incorporating
the method for willow graphs as a subroutine in a local
improvement algorithm gives the bound for general
directed graphs.",
acknowledgement = ack-nhfb,
articleno = "46",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation Algorithms; arborescence; directed
graphs; maximum leaf",
}
@Article{Naor:2010:DCA,
author = "Joseph (Seffi) Naor and Roy Schwartz",
title = "The directed circular arrangement problem",
journal = j-TALG,
volume = "6",
number = "3",
pages = "47:1--47:??",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1798596.1798600",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Aug 14 15:50:18 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the problem of embedding a directed graph
onto evenly spaced points on a circle while minimizing
the total weighted edge length. We present the first
poly-logarithmic approximation factor algorithm for
this problem which yields an approximation factor of {$
O(\log n \log \log n) $}, thus improving the previous
{$ \tilde {O}(\sqrt n) $} approximation factor. In
order to achieve this, we introduce a new problem which
we call the {\em directed penalized linear
arrangement}. This problem generalizes both the
directed feedback edge set problem and the directed
linear arrangement problem. We present an {$ O(\log n
\log \log n) $}-approximation factor algorithm for this
newly defined problem. Our solution uses two distinct
directed metrics (``right'' and ``left'') which
together yield a lower bound on the value of an optimal
solution. In addition, we define a sequence of new
directed spreading metrics that are used for applying
the algorithm recursively on smaller subgraphs. The new
spreading metrics allow us to define an asymmetric
region growing procedure that accounts simultaneously
for both incoming and outgoing edges. To the best of
our knowledge, this is the first time that a region
growing procedure is defined in directed graphs that
allows for such an accounting.",
acknowledgement = ack-nhfb,
articleno = "47",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "region growing; scheduling; Spreading metrics",
}
@Article{Azar:2010:DEC,
author = "Yossi Azar and Shay Kutten and Boaz Patt-Shamir",
title = "Distributed error confinement",
journal = j-TALG,
volume = "6",
number = "3",
pages = "48:1--48:??",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1798596.1798601",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Aug 14 15:50:18 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study error confinement in distributed
applications, which can be viewed as an extreme case of
various fault locality notions studied in the past.
Error confinement means that to the external observer,
only nodes that were directly hit by a fault may
deviate from their specified correct behavior, and only
temporarily. The externally observable behavior of all
other nodes must remain impeccable, even though their
internal state may be affected. Error confinement is
impossible if an adversary is allowed to inflict
arbitrary transient faults on the system, since the
faults might completely wipe out input values. We
introduce a new fault-tolerance measure we call {\em
agility}, which quantifies the fault tolerance of an
algorithm that disseminates information against state
corrupting faults.\par
We then propose broadcast algorithms that guarantee
error confinement with optimal agility to within a
constant factor in synchronous networks. These
algorithms can serve as building blocks in more general
reactive systems. Previous results in exploring
locality in reactive systems were not error confined,
or allowed a wide range of behaviors to be considered
correct. Our results also include a new technique that
can be used to analyze the ``cow path'' problem.",
acknowledgement = ack-nhfb,
articleno = "48",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Distributed algorithms; persistence;
self-stabilization; voting",
}
@Article{Aggarwal:2010:AAC,
author = "Gagan Aggarwal and Rina Panigrahy and Tom{\'a}s Feder
and Dilys Thomas and Krishnaram Kenthapadi and Samir
Khuller and An Zhu",
title = "Achieving anonymity via clustering",
journal = j-TALG,
volume = "6",
number = "3",
pages = "49:1--49:??",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1798596.1798602",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Aug 14 15:50:18 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Publishing data for analysis from a table containing
personal records, while maintaining individual privacy,
is a problem of increasing importance today. The
traditional approach of deidentifying records is to
remove identifying fields such as social security
number, name, etc. However, recent research has shown
that a large fraction of the U.S. population can be
identified using nonkey attributes (called
quasi-identifiers) such as date of birth, gender, and
zip code. The $k$-anonymity model protects privacy via
requiring that nonkey attributes that leak information
are suppressed or generalized so that, for every record
in the modified table, there are at least $k$-1 other
records having exactly the same values for
quasi-identifiers. We propose a new method for
anonymizing data records, where quasi-identifiers of
data records are first clustered and then cluster
centers are published. To ensure privacy of the data
records, we impose the constraint that each cluster
must contain no fewer than a prespecified number of
data records. This technique is more general since we
have a much larger choice for cluster centers than
$k$-anonymity. In many cases, it lets us release a lot
more information without compromising privacy. We also
provide constant factor approximation algorithms to
come up with such a clustering. This is the first set
of algorithms for the anonymization problem where the
performance is independent of the anonymity parameter
$k$. We further observe that a few outlier points can
significantly increase the cost of anonymization.
Hence, we extend our algorithms to allow an $ \epsilon
$ fraction of points to remain unclustered, that is,
deleted from the anonymized publication. Thus, by not
releasing a small fraction of the database records, we
can ensure that the data published for analysis has
less distortion and hence is more useful. Our
approximation algorithms for new clustering objectives
are of independent interest and could be applicable in
other clustering scenarios as well.",
acknowledgement = ack-nhfb,
articleno = "49",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "anonymity; approximation algorithms; clustering;
Privacy",
}
@Article{Gordon:2010:CWT,
author = "Eyal Gordon and Adi Ros{\'e}n",
title = "Competitive weighted throughput analysis of greedy
protocols on {DAGs}",
journal = j-TALG,
volume = "6",
number = "3",
pages = "50:1--50:??",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1798596.1798603",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Aug 14 15:50:18 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The combination of the buffer sizes of routers
deployed in the Internet, and the Internet traffic
itself, leads routinely to the dropping of packets.
Motivated by this, we are interested in the problem of
maximizing the throughput of protocols that control
packet networks. Moreover, we are interested in a
setting where different packets have different
priorities (or weights), thus taking into account
Quality-of-Service considerations.\par
We first extend the Competitive Network Throughput
(CNT) model introduced by Aiello et al. [2003] to the
weighted packets case. We analyze the performance of
online, local-control protocols by their competitive
ratio, in the face of arbitrary traffic, using as a
measure the total weight of the packets that arrive to
their destinations, rather than being dropped en-route.
We prove that on Directed Acyclic Graphs (DAGs), any
greedy protocol is competitive, with competitive ratio
independent of the weights of the packets. Here we mean
by a ``greedy protocol'' a protocol that not only does
not leave a resource idle unnecessarily, but also
prefers packets with higher weight over those with
lower weight. We give two independent upper bounds on
the competitive ratio of general greedy protocols on
DAGs. We further give lower bounds that show that our
upper bounds cannot be improved (other than constant
factors) in the general case. Both our upper and lower
bounds apply also to the unweighted case, and they
improve the results given in Aiello et al. [2003] for
that case. We thus give tight (up to constant factors)
upper and lower bounds for both the unweighted and
weighted cases.\par
In the course of proving our upper bounds we prove a
lemma that gives upper bounds on the delivery times of
packets by any greedy protocol on general DAGs (without
buffer size considerations). We believe that this lemma
may be of independent interest and may find additional
applications.",
acknowledgement = ack-nhfb,
articleno = "50",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Buffer management; competitive analysis; competitive
network throughput; online algorithms",
}
@Article{Chakrabarti:2010:NOA,
author = "Amit Chakrabarti and Graham Cormode and Andrew
Mcgregor",
title = "A near-optimal algorithm for estimating the entropy of
a stream",
journal = j-TALG,
volume = "6",
number = "3",
pages = "51:1--51:??",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1798596.1798604",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Aug 14 15:50:18 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We describe a simple algorithm for approximating the
empirical entropy of a stream of $m$ values up to a
multiplicative factor of $ (1 + \epsilon) $ using a
single pass, {$ O(\epsilon^{ - 2} \log (\delta^{ - 1})
\log m) $} words of space, and {$ O(\log \epsilon^{ -
1} + \log \log \delta^{ - 1} + \log \log m) $}
processing time per item in the stream. Our algorithm
is based upon a novel extension of a method introduced
by Alon et al. [1999]. This improves over previous work
on this problem. We show a space lower bound of {$
\Omega (\epsilon^{ - 2} / \log^2 (\epsilon^{ - 1})) $},
demonstrating that our algorithm is near-optimal in
terms of its dependency on {$ \epsilon $}.\par
We show that generalizing to
multiplicative-approximation of the {$k$} th-order
entropy requires close to linear space for {$ k \geq 1
$}. In contrast we show that additive-approximation is
possible in a single pass using only poly-logarithmic
space. Lastly, we show how to compute a multiplicative
approximation to the entropy of a random walk on an
undirected graph.",
acknowledgement = ack-nhfb,
articleno = "51",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "approximation algorithms; Data streams; entropy",
}
@Article{Fattal:2010:ADM,
author = "Shahar Fattal and Dana Ron",
title = "Approximating the distance to monotonicity in high
dimensions",
journal = j-TALG,
volume = "6",
number = "3",
pages = "52:1--52:??",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1798596.1798605",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Aug 14 15:50:18 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article we study the problem of approximating
the distance of a function {$ f : [n]^d \rightarrow R
$} to monotonicity where {$ [n] = \{ 1, \ldots, n \} $}
and {$R$} is some fully ordered range. Namely, we are
interested in randomized sublinear algorithms that
approximate the Hamming distance between a given
function and the closest monotone function. We allow
both an additive error, parameterized by {$ \delta $},
and a multiplicative error.\par
Previous work on distance approximation to monotonicity
focused on the one-dimensional case and the only
explicit extension to higher dimensions was with a
multiplicative approximation factor exponential in the
dimension {\em d}. Building on Goldreich et al. [2000]
and Dodis et al. [1999], in which there are better
implicit results for the case {$ n = 2 $}, we describe
a reduction from the case of functions over the
{$d$}-dimensional hypercube $ [n]^d $ to the case of
functions over the $k$-dimensional hypercube $ [n]^k $,
where $ 1 \leq k \leq d $. The quality of estimation
that this reduction provides is linear in $ \lceil d /
k \rceil $ and logarithmic in the size of the range {$
|R| $} (if the range is infinite or just very large,
then {$ \log |R| $} can be replaced by {$ d \log n $}).
Using this reduction and a known distance approximation
algorithm for the one-dimensional case, we obtain a
distance approximation algorithm for functions over the
{$d$}-dimensional hypercube, with any range {$R$},
which has a multiplicative approximation factor of {$
O(d \log |R) $}.\par
For the case of a binary range, we present algorithms
for distance approximation to monotonicity of functions
over one dimension, two dimensions, and the
{$k$}-dimensional hypercube (for any {$ k \geq 1 $} ).
Applying these algorithms and the reduction described
before, we obtain a variety of distance approximation
algorithms for Boolean functions over the
{$d$}-dimensional hypercube which suggest a trade-off
between quality of estimation and efficiency of
computation. In particular, the multiplicative error
ranges between {$ O(d) $} and {$ O(1) $}.",
acknowledgement = ack-nhfb,
articleno = "52",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "distance approximation; monotonicity; property
testing; Sublinear approximation algorithms",
}
@Article{Martinez:2010:ASS,
author = "Conrado Mart{\'\i}nez and Daniel Panario and Alfredo
Viola",
title = "Adaptive sampling strategies for quickselects",
journal = j-TALG,
volume = "6",
number = "3",
pages = "53:1--53:??",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1798596.1798606",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Aug 14 15:50:18 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Quickselect with median-of-3 is largely used in
practice and its behavior is fairly well understood.
However, the following natural adaptive variant, which
we call {\em proportion-from-3}, had not been
previously analyzed: ``choose as pivot the smallest of
the sample if the relative rank of the sought element
is below 1/3, the largest if the relative rank is above
2/3, and the median if the relative rank is between 1/3
and 2/3.'' We first analyze the average number of
comparisons made when using proportion-from-2 and then
for proportion-from-3. We also analyze $ \nu $-find, a
generalization of proportion-from-3 with interval
breakpoints at $ \nu $ and $ 1 - \nu $. We show that
there exists an optimal value of $ \nu $ and we also
provide the range of values of $ \nu $ where $ \nu
$-find outperforms median-of-3. Then, we consider the
average total cost of these strategies, which takes
into account the cost of both comparisons and
exchanges. Our results strongly suggest that a suitable
implementation of $ \nu $-find could be the method of
choice in a practical setting. We also study the
behavior of proportion-from-$s$ with $ s > 3 $ and in
particular we show that proportion-from-$s$-like
strategies are optimal when $ s \rightarrow \infty $.",
acknowledgement = ack-nhfb,
articleno = "53",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Analytic combinatorics; average-case analysis;
divide-and-conquer; Find; quickselect; sampling;
selection",
}
@Article{Alon:2010:BFP,
author = "Noga Alon and Shai Gutner",
title = "Balanced families of perfect hash functions and their
applications",
journal = j-TALG,
volume = "6",
number = "3",
pages = "54:1--54:??",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1798596.1798607",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Aug 14 15:50:18 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The construction of perfect hash functions is a
well-studied topic. In this article, this concept is
generalized with the following definition. We say that
a family of functions from $ [n] $ to $ [k] $ is a $
\delta $-balanced $ (n, k) $-family of perfect hash
functions if for every {$ S \subseteq [n] $}, {$ |S| =
k $}, the number of functions that are {$1$}-{$1$} on
{$S$} is between {$ T / \delta $} and {$ \delta T $}
for some constant {$ T > 0 $}. The standard definition
of a family of perfect hash functions requires that
there will be at least one function that is {$1$}-{$1$}
on {$S$}, for each {$S$} of size {$k$}. In the new
notion of balanced families, we require the number of
{$1$}-{$1$} functions to be almost the same (taking $
\delta $ to be close to $1$ ) for every such {$S$}. Our
main result is that for any constant {$ \delta > 1 $},
a {$ \delta $}-balanced {$ (n, k) $}-family of perfect
hash functions of size {$ 2^{O(k \log \log k)} \log n
$} can be constructed in time {$ 2^{O(k \log \log k)} n
\log n $}. Using the technique of color-coding we can
apply our explicit constructions to devise
approximation algorithms for various counting problems
in graphs. In particular, we exhibit a deterministic
polynomial-time algorithm for approximating both the
number of simple paths of length {$k$} and the number
of simple cycles of size {$k$} for any {$ k \leq O(\log
n / \log \log \log n) $} in a graph with {$n$}
vertices. The approximation is up to any fixed
desirable relative error.",
acknowledgement = ack-nhfb,
articleno = "54",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximate counting of subgraphs; color-coding;
perfect hashing",
}
@Article{Coppersmith:2010:OWN,
author = "Don Coppersmith and Lisa K. Fleischer and Atri Rurda",
title = "Ordering by weighted number of wins gives a good
ranking for weighted tournaments",
journal = j-TALG,
volume = "6",
number = "3",
pages = "55:1--55:??",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1798596.1798608",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Aug 14 15:50:18 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the following simple algorithm for
feedback arc set problem in weighted tournaments: order
the vertices by their weighted indegrees. We show that
this algorithm has an approximation guarantee of 5 if
the weights satisfy {\em probability constraints\/}
(for any pair of vertices $u$ and $v$, $ w_{uv} +
w_{vu} = 1 $ ). Special cases of the feedback arc set
problem in such weighted tournaments include the
feedback arc set problem in unweighted tournaments and
rank aggregation. To complement the upper bound, for
any constant $ \epsilon > 0 $, we exhibit an infinite
family of (unweighted) tournaments for which the
aforesaid algorithm ({\em irrespective\/} of how ties
are broken) has an approximation ratio of $ 5 -
\epsilon $.",
acknowledgement = ack-nhfb,
articleno = "55",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "Approximation algorithms; Borda's method; feedback arc
set problem; rank aggregation; tournaments",
}
@Article{Gonzalez-Gutierrez:2010:ACT,
author = "Arturo Gonzalez-Gutierrez and Teofilo F. Gonzalez",
title = "Approximating corridors and tours via restriction and
relaxation techniques",
journal = j-TALG,
volume = "6",
number = "3",
pages = "56:1--56:??",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1798596.1798609",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
bibdate = "Sat Aug 14 15:50:18 MDT 2010",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given a rectangular boundary partitioned into
rectangles, the Minimum-Length Corridor (MLC-R) problem
consists of finding a corridor of least total length. A
corridor is a set of connected line segments, each of
which must lie along the line segments that form the
rectangular boundary and/or the boundary of the
rectangles, and must include at least one point from
the boundary of every rectangle and from the
rectangular boundary. The MLC-R problem is known to be
NP-hard. We present the first polynomial-time constant
ratio approximation algorithm for the MLC-R and MLC$_k$
problems. The MLC$_k$ problem is a generalization of
the MLC-R problem where the rectangles are rectilinear
$c$-gons, for $ c \leq k $ and $k$ is a constant. We
also present the first polynomial-time constant ratio
approximation algorithm for the Group Traveling
Salesperson Problem (GTSP) for a rectangular boundary
partitioned into rectilinear $c$-gons as in the MLC$_k$
problem. Our algorithms are based on the restriction
and relaxation approximation techniques.",
acknowledgement = ack-nhfb,
articleno = "56",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
keywords = "approximation algorithms; complexity; computational
geometry; Corridors; restriction and relaxation
techniques",
}
@Article{Alber:2010:EN,
author = "Susanne Alber",
title = "Editorial note",
journal = j-TALG,
volume = "6",
number = "4",
pages = "57:1--57:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824778",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "57",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Hajiaghayi:2010:FSI,
author = "Mohammad T. Hajiaghayi and Shang-Hua Teng",
title = "Foreword to special issue on {SODA 2008}",
journal = j-TALG,
volume = "6",
number = "4",
pages = "58:1--58:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824793",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "58",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Ackermann:2010:CMN,
author = "Marcel R. Ackermann and Johannes Bl{\"o}mer and
Christian Sohler",
title = "Clustering for metric and nonmetric distance
measures",
journal = j-TALG,
volume = "6",
number = "4",
pages = "59:1--59:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824779",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study a generalization of the $k$-median problem
with respect to an arbitrary dissimilarity measure $D$.
Given a finite set $P$ of size $n$, our goal is to find
a set $C$ of size $k$ such that the sum of errors $
D(P, C) = \sum_{p \in P} \min_{c \in C} D(p, c)$ is
minimized. The main result in this article can be
stated as follows: There exists a $ (1 +
\epsilon)$-approximation algorithm for the $k$-median
problem with respect to $D$, if the 1-median problem
can be approximated within a factor of $ (1 +
\epsilon)$ by taking a random sample of constant size
and solving the 1-median problem on the sample exactly.
This algorithm requires time $ n 2^O(m k \log (m k /
\epsilon))$, where $m$ is a constant that depends only
on $ \epsilon $ and $D$. Using this characterization,
we obtain the first linear time $ (1 +
\epsilon)$-approximation algorithms for the $k$-median
problem in an arbitrary metric space with bounded
doubling dimension, for the Kullback--Leibler
divergence (relative entropy), for the Itakura-Saito
divergence, for Mahalanobis distances, and for some
special cases of Bregman divergences. Moreover, we
obtain previously known results for the Euclidean
$k$-median problem and the Euclidean $k$-means problem
in a simplified manner. Our results are based on a new
analysis of an algorithm of Kumar et al. [2004].",
acknowledgement = ack-nhfb,
articleno = "59",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Andersen:2010:LAF,
author = "Reid Andersen",
title = "A local algorithm for finding dense subgraphs",
journal = j-TALG,
volume = "6",
number = "4",
pages = "60:1--60:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824780",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We describe a local algorithm for finding subgraphs
with high density, according to a measure of density
introduced by Kannan and Vinay [1999]. The algorithm
takes as input a bipartite graph $G$, a starting vertex
$v$, and a parameter $k$, and outputs an induced
subgraph of $G$. It is local in the sense that it does
not examine the entire input graph; instead, it
adaptively explores a region of the graph near the
starting vertex. The running time of the algorithm is
bounded by $ O(\Delta k^2)$, which depends on the
maximum degree $ \Delta $, but is otherwise independent
of the graph. We prove the following approximation
guarantee: for any subgraph $S$ with $ k'$ vertices and
density $ \theta $, there exists a set $ S' \subseteq
S$ for which the algorithm outputs a subgraph with
density $ \Omega (\theta / \log \Delta)$ whenever $ v
\in S'$ and $ k \geq k'$. We prove that $ S'$ contains
at least half of the edges in $S$.",
acknowledgement = ack-nhfb,
articleno = "60",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Cabello:2010:FOT,
author = "Sergio Cabello and Matt Devos and Jeff Erickson and
Bojan Mohar",
title = "Finding one tight cycle",
journal = j-TALG,
volume = "6",
number = "4",
pages = "61:1--61:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824781",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A cycle on a combinatorial surface is tight if it as
short as possible in its (free) homotopy class. We
describe an algorithm to compute a single tight,
noncontractible, essentially simple cycle on a given
orientable combinatorial surface in $ O(n \log n) $
time. The only method previously known for this problem
was to compute the globally shortest noncontractible or
nonseparating cycle in $ O (\min \{ g^3, n, n \log n
\}) $ time, where $g$ is the genus of the surface. As a
consequence, we can compute the shortest cycle freely
homotopic to a chosen boundary cycle in $ O (n \log n)$
time, a tight octagonal decomposition in $ O (g n \log
n)$ time, and a shortest contractible cycle enclosing a
nonempty set of faces in $ O (n \log^2 n)$ time.",
acknowledgement = ack-nhfb,
articleno = "61",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Chan:2010:BSP,
author = "Timothy M. Chan",
title = "On the bichromatic $k$-set problem",
journal = j-TALG,
volume = "6",
number = "4",
pages = "62:1--62:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824782",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study a generalization of the k -median problem
with respect to an arbitrary dissimilarity measure D.
Given a finite set P of size n, our goal is to find a
set C of size k such that the sum of errors $ D(P, C) =
\sum_{p \in P} \min_{c \in C} D(p, c) $ is minimized.
The main result in this article can be stated as
follows: There exists a $ (1 + \epsilon)$-approximation
algorithm for the k median problem with respect to $D$,
if the 1-median problem can be approximated within a
factor of $ (1 + \epsilon)$ by taking a random sample
of constant size and solving the 1-median problem on
the sample exactly. This algorithm requires time $ n
2^O (m k \log (m k / \epsilon))$, where $m$ is a
constant that depends only on $ \epsilon $ and $D$.
Using this characterization, we obtain the first linear
time $ (1 + \epsilon)$-approximation algorithms for the
$k$ median problem in an arbitrary metric space with
bounded doubling dimension, for the Kullback--Leibler
divergence (relative entropy), for the Itakura-Saito
divergence, for Mahalanobis distances, and for some
special cases of Bregman divergences. Moreover, we
obtain previously known results for the Euclidean $k$
median problem and the Euclidean $k$-means problem in a
simplified manner. Our results are based on a new
analysis of an algorithm of Kumar et al. [2004].",
acknowledgement = ack-nhfb,
articleno = "62",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Clarkson:2010:CSG,
author = "Kenneth L. Clarkson",
title = "Coresets, sparse greedy approximation, and the
{Frank--Wolfe} algorithm",
journal = j-TALG,
volume = "6",
number = "4",
pages = "63:1--63:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824783",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The problem of maximizing a concave function $ f(x) $
in the unit simplex $ \Delta $ can be solved
approximately by a simple greedy algorithm. For given
$k$, the algorithm can find a point $ x_{(k)}$ on a
$k$-dimensional face of $ \Delta $, such that $
f(x_{(k)}) \geq f(x_*) - O(1 / k)$. Here $ f(x_*)$ is
the maximum value of $f$ in $ \Delta $, and the
constant factor depends on $f$. This algorithm and
analysis were known before, and related to problems of
statistics and machine learning, such as boosting,
regression, and density mixture estimation. In other
work, coming from computational geometry, the existence
of $ \epsilon $-coresets was shown for the minimum
enclosing ball problem by means of a simple greedy
algorithm. Similar greedy algorithms, which are special
cases of the Frank-0Wolfe algorithm, were described for
other enclosure problems. Here these results are tied
together, stronger convergence results are reviewed,
and several coreset bounds are generalized or
strengthened.",
acknowledgement = ack-nhfb,
articleno = "63",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Emek:2010:NLT,
author = "Yuval Emek and David Peleg and Liam Roditty",
title = "A near-linear-time algorithm for computing replacement
paths in planar directed graphs",
journal = j-TALG,
volume = "6",
number = "4",
pages = "64:1--64:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824784",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Let $ G = (V(G), E(G)) $ be a directed graph with
nonnegative edge lengths and let $P$ be a shortest path
from $s$ to $t$ in $G$. In the replacement paths
problem we are required to compute for every edge $e$
in $P$, the length of a shortest path from $s$ to $t$
that avoids $e$. The fastest known algorithm for
solving the problem in weighted directed graphs is the
trivial one: each edge in $P$ is removed from the graph
in its turn and the distance from $s$ to $t$ in the
modified graph is computed. The running time of this
algorithm is $ O(m n + n^2 \log n)$, where $ n = | V(G)
|$ and $ m = | E(G) |$. The replacement paths problem
is strongly motivated by two different applications.
First, the fastest algorithm to compute the $k$ simple
shortest paths from $s$ to $t$ in directed graphs [Yen
1971; Lawler 1972] repeatedly computes the replacement
paths from $s$ to $t$. Its running time is $ O(k n (m +
n \log n))$. Second, the computation of Vickrey pricing
of edges in distributed networks can be reduced to the
replacement paths problem. An open question raised by
Nisan and Ronen [2001] asks whether it is possible to
compute the Vickrey pricing faster than the trivial
algorithm described in the previous paragraph. In this
article we present a near-linear time algorithm for
computing replacement paths in weighted planar directed
graphs. In particular, the algorithm computes the
lengths of the replacement paths in $ O (n \log^3 n)$
time (recall that in planar graphs $ m = O(n)$). This
result immediately improves the running time of the two
applications mentioned before by almost a linear
factor. Our algorithm is obtained by combining several
new ideas with a data structure of Klein [2005] that
supports multisource shortest paths queries in planar
directed graphs in logarithmic time. Our algorithm can
be adapted to address the variant of the problem in
which one is interested in the replacement path itself
(rather than the length of the path). In that case the
algorithm is executed in a preprocessing stage
constructing a data structure that supports replacement
path queries in time $ {\tilde O}(h)$, where $h$ is the
number of hops in the replacement path. In addition, we
can handle the variant in which vertices should be
avoided instead of edges.",
acknowledgement = ack-nhfb,
articleno = "64",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Faigle:2010:TPG,
author = "Ulrich Faigle and Britta Peis",
title = "Two-phase greedy algorithms for some classes of
combinatorial linear programs",
journal = j-TALG,
volume = "6",
number = "4",
pages = "65:1--65:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824785",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present greedy algorithms for some classes of
combinatorial packing and cover problems within the
general formal framework of Hoffman and Schwartz'
lattice polyhedra. Our algorithms compute in a first
phase Monge solutions for the associated dual cover and
packing problems and then proceed to construct greedy
solutions for the primal problems in a second phase. We
show optimality of the algorithms under certain sub-
and supermodular assumptions and monotone constraints.
For supermodular lattice polyhedra with submodular
constraints, our algorithms offer the farthest reaching
generalization of Edmonds' polymatroid greedy algorithm
currently known.",
acknowledgement = ack-nhfb,
articleno = "65",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Feldman:2010:DSS,
author = "Jon Feldman and S. Muthukrishnan and Anastasios
Sidiropoulos and Cliff Stein and Zoya Svitkina",
title = "On distributing symmetric streaming computations",
journal = j-TALG,
volume = "6",
number = "4",
pages = "66:1--66:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824786",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A common approach for dealing with large datasets is
to stream over the input in one pass, and perform
computations using sublinear resources. For truly
massive datasets, however, even making a single pass
over the data is prohibitive. Therefore, streaming
computations must be distributed over many machines. In
practice, obtaining significant speedups using
distributed computation has numerous challenges
including synchronization, load balancing, overcoming
processor failures, and data distribution. Successful
systems in practice such as Google's MapReduce and
Apache's Hadoop address these problems by only allowing
a certain class of highly distributable tasks defined
by local computations that can be applied in any order
to the input. The fundamental question that arises is:
How does the class of computational tasks supported by
these systems differ from the class for which streaming
solutions exist? We introduce a simple algorithmic
model for massive, unordered, distributed (mud)
computation, as implemented by these systems. We show
that in principle, mud algorithms are equivalent in
power to symmetric streaming algorithms. More
precisely, we show that any symmetric (order-invariant)
function that can be computed by a streaming algorithm
can also be computed by a mud algorithm, with
comparable space and communication complexity. Our
simulation uses Savitch's theorem and therefore has
superpolynomial time complexity. We extend our
simulation result to some natural classes of
approximate and randomized streaming algorithms. We
also give negative results, using communication
complexity arguments to prove that extensions to
private randomness, promise problems, and indeterminate
functions are impossible. We also introduce an
extension of the mud model to multiple keys and
multiple rounds.",
acknowledgement = ack-nhfb,
articleno = "66",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Oudot:2010:GDT,
author = "Steve Y. Oudot and Leonidas J. Guibas and Jie Gao and
Yue Wang",
title = "Geodesic {Delaunay} triangulations in bounded planar
domains",
journal = j-TALG,
volume = "6",
number = "4",
pages = "67:1--67:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824787",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We introduce a new feature size for bounded domains in
the plane endowed with an intrinsic metric. Given a
point $x$ in a domain $X$, the systolic feature size of
$X$ at $x$ measures half the length of the shortest
loop through $x$ that is not null-homotopic in $X$. The
resort to an intrinsic metric makes the systolic
feature size rather insensitive to the local geometry
of the domain, in contrast with its predecessors (local
feature size, weak feature size, homology feature
size). This reduces the number of samples required to
capture the topology of $X$, provided that a reliable
approximation to the intrinsic metric of $X$ is
available. Under sufficient sampling conditions
involving the systolic feature size, we show that the
geodesic Delaunay triangulation $ D_x(L)$ of a finite
sampling $L$ is homotopy equivalent to $X$. Under
similar conditions, $ D_x(L)$ is sandwiched between the
geodesic witness complex $ C^W_X (L)$ and a relaxed
version $ C^W_{X, \nu }(L)$. In the conference version
of the article, we took advantage of this fact and
proved that the homology of $ D_x(L)$ (and hence the
one of $X$) can be retrieved by computing the
persistent homology between $ C^W_X(L)$ and $ C^W_{X,
\nu }(L)$. Here, we investigate further and show that
the homology of $X$ can also be recovered from the
persistent homology associated with inclusions of type
$ C^W_{X, \nu }(L) \hookrightarrow C^W_{X, \nu '} (L)$,
under some conditions on the parameters $ \nu \leq \nu
'$. Similar results are obtained for Vietoris--Rips
complexes in the intrinsic metric. The proofs draw some
connections with recent advances on the front of
homology inference from point cloud data, but also with
several well-known concepts of Riemannian (and even
metric) geometry. On the algorithmic front, we propose
algorithms for estimating the systolic feature size of
a bounded planar domain $X$, selecting a landmark set
of sufficient density, and computing the homology of
$X$ using geodesic witness complexes or Rips
complexes.",
acknowledgement = ack-nhfb,
articleno = "67",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Kapron:2010:FAB,
author = "Bruce M. Kapron and David Kempe and Valerie King and
Jared Saia and Vishal Sanwalani",
title = "Fast asynchronous {Byzantine} agreement and leader
election with full information",
journal = j-TALG,
volume = "6",
number = "4",
pages = "68:1--68:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824788",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We resolve two long-standing open problems in
distributed computation by describing polylogarithmic
protocols for Byzantine agreement and leader election
in the asynchronous full information model with a
nonadaptive malicious adversary. All past protocols for
asynchronous Byzantine agreement had been exponential,
and no protocol for asynchronous leader election had
been known. Our protocols tolerate up to $ (1 / 3 -
\epsilon) \cdot n $ faulty processors, for any positive
constant $ \epsilon $. They are Monte Carlo, succeeding
with probability $ 1 - o(1) $ for Byzantine agreement,
and constant probability for leader election. A key
technical contribution of our article is a new approach
for emulating Feige's lightest bin protocol, even with
adversarial message scheduling.",
acknowledgement = ack-nhfb,
articleno = "68",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Svitkina:2010:LBF,
author = "Zoya Svitkina",
title = "Lower-bounded facility location",
journal = j-TALG,
volume = "6",
number = "4",
pages = "69:1--69:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824789",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study the lower-bounded facility location problem
which generalizes the classical uncapacitated facility
location problem in that it comes with lower bound
constraints for the number of clients assigned to a
facility in the case that this facility is opened. This
problem was introduced independently in the papers by
Karger and Minkoff [2000] and by Guha et al. [2000],
both of which give bicriteria approximation algorithms
for it. These bicriteria algorithms come within a
constant factor of the optimal solution cost, but they
also violate the lower bound constraints by a constant
factor. Our result in this article is the first true
approximation algorithm for the lower-bounded facility
location problem which respects the lower bound
constraints and achieves a constant approximation ratio
for the objective function. The main technical idea for
the design of the algorithm is a reduction to the
capacitated facility location problem, which has known
constant-factor approximation algorithms.",
acknowledgement = ack-nhfb,
articleno = "69",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Williams:2010:NPW,
author = "Virginia Vassilevska Williams",
title = "Nondecreasing paths in a weighted graph or: How to
optimally read a train schedule",
journal = j-TALG,
volume = "6",
number = "4",
pages = "70:1--70:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824790",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A travel booking office has timetables giving arrival
and departure times for all scheduled trains, including
their origins and destinations. A customer presents a
starting station and demands a route with perhaps
several train connections taking him to his destination
as early as possible. The booking office must find the
best route for its customers. This problem was first
considered in the theory of algorithms by Minty [1958],
who reduced it to a problem on directed edge-weighted
graphs: find a path from a given source to a given
target such that the consecutive weights on the path
are nondecreasing and the last weight on the path is
minimized. Minty gave the first algorithm for the
single-source version of the problem, in which one
finds minimum last weight nondecreasing paths from the
source to every other vertex. In this article we give
the first linear -time algorithm for this problem in
the word-RAM model of computation. We also define an
all-pairs version for the problem and give a strongly
polynomial truly subcubic algorithm for it. Finally, we
discuss an extension of the problem in which one also
has prices on trip segments and one wishes to find a
cheapest valid itinerary.",
acknowledgement = ack-nhfb,
articleno = "70",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Agarwal:2010:HDU,
author = "Pankaj K. Agarwal and Sariel Har-Peled and Micha
Sharir and Yusu Wang",
title = "{Hausdorff} distance under translation for points and
balls",
journal = j-TALG,
volume = "6",
number = "4",
pages = "71:1--71:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824791",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study the shape matching problem under the
Hausdorff distance and its variants. In the first part
of the article, we consider two sets $A$, $B$ of balls
in $ R^d$, $ d = 2, 3$, and wish to find a translation
t that minimizes the Hausdorff distance between $ A +
t$, the set of all balls in $A$ shifted by $t$, and
$B$. We consider several variants of this problem.
First, we extend the notion of Hausdorff distance from
sets of points to sets of balls, so that each ball has
to be matched with the nearest ball in the other set.
We also consider the problem in the standard setting,
by computing the Hausdorff distance between the unions
of the two sets (as point sets). Second, we consider
either all possible translations $t$ (as is the
standard approach), or consider only translations that
keep the balls of $ A + t$ disjoint from those of $B$.
We propose several exact and approximation algorithms
for these problems. In the second part of the article,
we note that the Hausdorff distance is sensitive to
outliers, and thus consider two variants that are more
robust: the root-mean-square (rms) and the summed
Hausdorff distance. We propose efficient approximation
algorithms for computing the minimum rms and the
minimum summed Hausdorff distances under translation,
between two point sets in $ R^d$. In order to obtain a
fast algorithm for the summed Hausdorff distance, we
propose a deterministic efficient dynamic data
structure for maintaining an $ \epsilon $-approximation
of the 1-median of a set of points in $ R^d$, under
insertions and deletions.",
acknowledgement = ack-nhfb,
articleno = "71",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Carlsson:2010:FEC,
author = "John Gunnar Carlsson and Benjamin Armbruster and Yinyu
Ye",
title = "Finding equitable convex partitions of points in a
polygon efficiently",
journal = j-TALG,
volume = "6",
number = "4",
pages = "72:1--72:??",
month = aug,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1824777.1824792",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Previous work has developed algorithms for finding an
equitable convex partition that partitions the plane
into n convex pieces each containing an equal number of
red and blue points. Motivated by a vehicle routing
heuristic, we look at a related problem where each
piece must contain one point and an equal fraction of
the area of some convex polygon. We first show how
algorithms for solving the older problem lead to
approximate solutions for this new equitable convex
partition problem. Then we demonstrate a new algorithm
that finds an exact solution to our problem in $ O (N n
\log N) $ time or operations, where n is the number of
points, m the number of vertices or edges of the
polygon, and $ N \colon = n + m $ the sum.",
acknowledgement = ack-nhfb,
articleno = "72",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Rutter:2010:CLM,
author = "Ignaz Rutter and Alexander Wolff",
title = "Computing large matchings fast",
journal = j-TALG,
volume = "7",
number = "1",
pages = "1:1--1:??",
month = nov,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1868237.1868238",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/string-matching.bib;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article we present algorithms for computing
large matchings in 3-regular graphs, graphs with
maximum degree 3, and 3-connected planar graphs. The
algorithms give a guarantee on the size of the computed
matching and take linear or slightly superlinear time.
Thus they are faster than the best-known algorithm for
computing maximum matchings in general graphs, which
runs in $ O(\sqrt {n m}) $ time, where $n$ denotes the
number of vertices and $m$ the number of edges of the
given graph. For the classes of 3-regular graphs and
graphs with maximum degree 3, the bounds we achieve are
known to be best possible. We also investigate graphs
with block trees of bounded degree, where the $d$-block
tree is the adjacency graph of the $d$-connected
components of the given graph. In 3-regular graphs and
3-connected planar graphs with bounded-degree 2- and
4-block trees, respectively, we show how to compute
maximum matchings in slightly superlinear time.",
acknowledgement = ack-nhfb,
articleno = "1",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Iwama:2010:AAS,
author = "Kazuo Iwama and Shuichi Miyazaki and Hiroki
Yanagisawa",
title = "Approximation algorithms for the sex-equal stable
marriage problem",
journal = j-TALG,
volume = "7",
number = "1",
pages = "2:1--2:??",
month = nov,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1868237.1868239",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The stable marriage problem is a classical matching
problem introduced by Gale and Shapley. It is known
that for any instance, there exists a solution, and
there is a polynomial time algorithm to find one.
However, the matching obtained by this algorithm is
man-optimal, that is, the matching is favorable for men
but unfavorable for women, (or, if we exchange the
roles of men and women, the resulting matching is
woman-optimal). The sex-equal stable marriage problem,
posed by Gusfield and Irving, seeks a stable matching
``fair'' for both genders. Specifically it seeks a
stable matching with the property that the sum of the
men's scores is as close as possible to that of the
women's. This problem is known to be strongly NP-hard.
In this paper, we give a polynomial time algorithm for
finding a near optimal solution for the sex-equal
stable marriage problem. Furthermore, we consider the
problem of optimizing an additional criterion: among
stable matchings that are near optimal in terms of the
sex-equality, find a minimum egalitarian stable
matching. We show that this problem is strongly
NP-hard, and give a polynomial time algorithm whose
approximation ratio is less than two.",
acknowledgement = ack-nhfb,
articleno = "2",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Djidjev:2010:FAC,
author = "Hristo N. Djidjev",
title = "A faster algorithm for computing the girth of planar
and bounded genus graphs",
journal = j-TALG,
volume = "7",
number = "1",
pages = "3:1--3:??",
month = nov,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1868237.1868240",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The girth of a graph $G$ is the length of a shortest
cycle of $G$. In this article we design an $ O(n^{5 /
4} \log n)$ algorithm for finding the girth of an
undirected $n$-vertex planar graph, the first $ o(n^2)$
algorithm for this problem. We also extend our results
for the class of graphs embedded into an orientable
surface of small genus. Our approach uses several
techniques such as graph partitioning, hammock
decomposition, graph covering, and dynamic
shortest-path computation. We discuss extensions and
generalizations of our result.",
acknowledgement = ack-nhfb,
articleno = "3",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Baier:2010:LBC,
author = "Georg Baier and Thomas Erlebach and Alexander Hall and
Ekkehard K{\"o}hler and Petr Kolman and Ondrej
Pangr{\'a}c and Heiko Schilling and Martin Skutella",
title = "Length-bounded cuts and flows",
journal = j-TALG,
volume = "7",
number = "1",
pages = "4:1--4:??",
month = nov,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1868237.1868241",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "For a given number $L$, an $L$-length-bounded edge-cut
(node-cut, respectively) in a graph $G$ with source $s$
and sink $t$ is a set $C$ of edges (nodes,
respectively) such that no $s$--$t$-path of length at
most $L$ remains in the graph after removing the edges
(nodes, respectively) in $C$. An $L$-length-bounded
flow is a flow that can be decomposed into flow paths
of length at most $L$. In contrast to classical flow
theory, we describe instances for which the minimum
$L$-length-bounded edge-cut (node-cut, respectively) is
$ \Theta (n^{2 / 3})$-times $ (\Theta (\sqrt
{n}))$-times, respectively larger than the maximum $L$
length-bounded flow, where n denotes the number of
nodes; this is the worst case. We show that the minimum
length-bounded cut problem is NP -hard to approximate
within a factor of $ 1.1377$ for $ L \geq 5$ in the
case of node-cuts and for $ L \geq 4$ in the case of
edge-cuts. We also describe algorithms with
approximation ratio $ O(\min \{ L, n / L \}) \subseteq
O (\sqrt {n})$ in the node case and $ O (\min \{ L, n^2
/ L^2, \sqrt {m} \}) \subseteq O(n^{2 / 3})$ in the
edge case, where $m$ denotes the number of edges.
Concerning $L$ length-bounded flows, we show that in
graphs with unit-capacities and general edge lengths it
is NP complete to decide whether there is a fractional
length-bounded flow of a given value. We analyze the
structure of optimal solutions and present further
complexity results.",
acknowledgement = ack-nhfb,
articleno = "4",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Baswana:2010:ASS,
author = "Surender Baswana and Telikepalli Kavitha and Kurt
Mehlhorn and Seth Pettie",
title = "Additive spanners and $ (\alpha, \beta)$-spanners",
journal = j-TALG,
volume = "7",
number = "1",
pages = "5:1--5:??",
month = nov,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1868237.1868242",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "An $ (\alpha, \beta)$-spanner of an unweighted graph
$G$ is a subgraph $H$ that distorts distances in $G$ up
to a multiplicative factor of $ \alpha $ and an
additive term $ \beta $. It is well known that any
graph contains a (multiplicative) $ (2 k - 1,
0)$-spanner of size $ O (n^{1 + 1 / k})$ and an
(additive) $ (1, 2)$-spanner of size $ O (n^{3 / 2})$.
However no other additive spanners are known to exist.
In this article we develop a couple of new techniques
for constructing $ (\alpha, \beta)$-spanners. Our first
result is an additive (1,6)-spanner of size $ O (n^{4 /
3})$. The construction algorithm can be understood as
an economical agent that assigns costs and values to
paths in the graph, purchasing affordable paths and
ignoring expensive ones, which are intuitively well
approximated by paths already purchased. We show that
this path buying algorithm can be parameterized in
different ways to yield other sparseness-distortion
tradeoffs. Our second result addresses the problem of
which $ (\alpha, \beta)$-spanners can be computed
efficiently, ideally in linear time. We show that, for
any $k$, a $ (k, k - 1)$-spanner with size $ O (k n^{1
+ 1 / k})$ can be found in linear time, and, further,
that in a distributed network the algorithm terminates
in a constant number of rounds. Previous spanner
constructions with similar performance had roughly
twice the multiplicative distortion.",
acknowledgement = ack-nhfb,
articleno = "5",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Flammini:2010:BSP,
author = "Michele Flammini and Gaia Nicosia",
title = "On the bicriteria $k$-server problem",
journal = j-TALG,
volume = "7",
number = "1",
pages = "6:1--6:??",
month = nov,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1868237.1868244",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article we consider multicriteria formulations
of classical online problems in which an algorithm must
simultaneously perform well with respect to two
different cost measures. Every strategy for serving a
sequence of requests is characterized by a pair of
costs and therefore there can be many different minimal
or optimal incomparable solutions. The adversary is
assumed to choose from one of these minimal strategies
and the performance of the algorithm is measured with
respect to the costs the adversary pays servicing the
sequence according to its determined choice of
strategy. We consider a parametric family of functions
which includes all the possible selections for such
strategies. Then, starting from a simple general method
that combines any multicriteria instance into a
single-criterion one, we provide a universal
multicriteria algorithm that can be applied to
different online problems. In the multicriteria
k-server formulation with two different edge
weightings, for each function class, such a universal
algorithm achieves competitive ratios that are only an
O (log W) multiplicative factor away from the
corresponding determined lower bounds, where W is the
maximum ratio between the two weights associated to
each edge. We then extend our results to two specific
functions, for which nearly optimal competitive
algorithms are obtained by exploiting more knowledge of
the selection properties. Finally, we show how to apply
our framework to other multicriteria online problems
sharing similar properties.",
acknowledgement = ack-nhfb,
articleno = "6",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Epstein:2010:OUC,
author = "Leah Epstein and Rob {Van Stee}",
title = "On the online unit clustering problem",
journal = j-TALG,
volume = "7",
number = "1",
pages = "7:1--7:??",
month = nov,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1868237.1868245",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We continue the study of the online unit clustering
problem, introduced by Chan and Zarrabi-Zadeh (
Workshop on Approximation and Online Algorithms 2006,
LNCS 4368, p. 121--131. Springer, 2006). We design a
deterministic algorithm with a competitive ratio of 7/4
for the one-dimensional case. This is the first
deterministic algorithm that beats the bound of 2. It
also has a better competitive ratio than the previous
randomized algorithms. Moreover, we provide the first
non-trivial deterministic lower bound, improve the
randomized lower bound, and prove the first lower
bounds for higher dimensions.",
acknowledgement = ack-nhfb,
articleno = "7",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Gao:2010:CLH,
author = "Jie Gao and Michael Langberg and Leonard J. Schulman",
title = "Clustering lines in high-dimensional space:
Classification of incomplete data",
journal = j-TALG,
volume = "7",
number = "1",
pages = "8:1--8:??",
month = nov,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1868237.1868246",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A set of k balls B$_1$, \ldots{}, B$_k$ in a Euclidean
space is said to cover a collection of lines if every
line intersects some ball. We consider the k --- center
problem for lines in high-dimensional space: Given a
set of n lines $^l$ = { l$_1$,\ldots{}, l$_n$ in R$^d$,
find k balls of minimum radius which cover l. We
present a 2-approximation algorithm for the cases k =
2, 3 of this problem, having running time quasi-linear
in the number of lines and the dimension of the ambient
space. Our result for 3-clustering is strongly based on
a new result in discrete geometry that may be of
independent interest: a Helly-type theorem for
collections of axis-parallel ``crosses'' in the plane.
The family of crosses does not have finite Helly number
in the usual sense. Our Helly theorem is of a new type:
it depends on $ \epsilon $-contracting the sets. In
statistical practice, data is often incompletely
specified; we consider lines as the most elementary
case of incompletely specified data points. Clustering
of data is a key primitive in nonparametric statistics.
Our results provide a way of performing this primitive
on incomplete data, as well as imputing the missing
values.}",
acknowledgement = ack-nhfb,
articleno = "8",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Cook:2010:GFD,
author = "Atlas F. {Cook IV} and Carola Wenk",
title = "Geodesic {Fr{\'e}chet} distance inside a simple
polygon",
journal = j-TALG,
volume = "7",
number = "1",
pages = "9:1--9:??",
month = nov,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1868237.1868247",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present an alternative to parametric search that
applies to both the nongeodesic and geodesic
Fr{\'e}chet optimization problems. This randomized
approach is based on a variant of red-blue
intersections and is appealing due to its elegance and
practical efficiency when compared to parametric
search. We introduce the first algorithm to compute the
geodesic Fr{\'e}chet distance between two polygonal
curves A and B inside a simple bounding polygon P. The
geodesic Fr{\'e}chet decision problem is solved almost
as fast as its nongeodesic sibling in $ O (N^2 \log k)
$ time and $ O (k + N) $ space after $ O(k) $
preprocessing, where $N$ is the larger of the
complexities of $A$ and $B$ and $k$ is the complexity
of $P$. The geodesic Fr{\'e}chet optimization problem
is solved by a randomized approach in $ O (k + N^2 \log
k N \log N)$ expected time and $ O (k + N^2)$ space.
This runtime is only a logarithmic factor larger than
the standard nongeodesic Fr{\'e}chet algorithm [Alt and
Godau 1995]. Results are also presented for the
geodesic Fr{\'e}chet distance in a polygonal domain
with obstacles and the geodesic Hausdorff distance for
sets of points or sets of line segments inside a simple
polygon P.",
acknowledgement = ack-nhfb,
articleno = "9",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Ferragina:2010:CPI,
author = "Paolo Ferragina and Rossano Venturini",
title = "The compressed permuterm index",
journal = j-TALG,
volume = "7",
number = "1",
pages = "10:1--10:??",
month = nov,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1868237.1868248",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The Permuterm index [Garfield 1976] is a
time-efficient and elegant solution to the string
dictionary problem in which pattern queries may
possibly include one wild-card symbol (called Tolerant
Retrieval problem). Unfortunately the Permuterm index
is space inefficient because it quadruples the
dictionary size. In this article we propose the
Compressed Permuterm Index which solves the Tolerant
Retrieval problem in time proportional to the length of
the searched pattern, and space close to the $k$ th
order empirical entropy of the indexed dictionary. We
also design a dynamic version of this index that allows
to efficiently manage insertion in, and deletion from,
the dictionary of individual strings. The result is
based on a simple variant of the Burrows--Wheeler
Transform, defined on a dictionary of strings of
variable length, that allows to efficiently solve the
Tolerant Retrieval problem via known (dynamic)
compressed indexes [Navarro and M{\"a}kinen 2007]. We
will complement our theoretical study with a
significant set of experiments that show that the
Compressed Permuterm Index supports fast queries within
a space occupancy that is close to the one achievable
by compressing the string dictionary via gzip or bzip.
This improves known approaches based on Front-Coding
[Witten et al. 1999] by more than 50\% in absolute
space occupancy, still guaranteeing comparable query
time.",
acknowledgement = ack-nhfb,
articleno = "10",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Agarwal:2010:EBU,
author = "Pankaj K. Agarwal and Lars Arge and Ke Yi",
title = "{I/O}-efficient batched union--find and its
applications to terrain analysis",
journal = j-TALG,
volume = "7",
number = "1",
pages = "11:1--11:??",
month = nov,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1868237.1868249",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article we present an I/O-efficient algorithm
for the batched (off-line) version of the union-find
problem. Given any sequence of $N$ union and find
operations, where each union operation joins two
distinct sets, our algorithm uses $ O (\SORT (N)) = O
(\frac N B \log_{M / B} \frac N B)$ I/Os, where $M$ is
the memory size and $B$ is the disk block size. This
bound is asymptotically optimal in the worst case. If
there are union operations that join a set with itself,
our algorithm uses $ O (\SORT (N) + \MST (N))$ I/Os,
where $ \MST (N)$ is the number of I/Os needed to
compute the minimum spanning tree of a graph with N
edges. We also describe a simple and practical $ O
(\SORT (N) \log (\frac N M))$-I/O algorithm for this
problem, which we have implemented. We are interested
in the union-find problem because of its applications
in terrain analysis. A terrain can be abstracted as a
height function defined over $ R^2$, and many problems
that deal with such functions require a union-find data
structure. With the emergence of modern mapping
technologies, huge amount of elevation data is being
generated that is too large to fit in memory, thus
I/O-efficient algorithms are needed to process this
data efficiently. In this article, we study two
terrain-analysis problems that benefit from a
union-find data structure: (i) computing topological
persistence and (ii) constructing the contour tree. We
give the first $ O(\SORT (N))$-I/O algorithms for these
two problems, assuming that the input terrain is
represented as a triangular mesh with $N$ vertices.",
acknowledgement = ack-nhfb,
articleno = "11",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Goel:2010:HPE,
author = "Ashish Goel and Sudipto Guha and Kamesh Munagala",
title = "How to probe for an extreme value",
journal = j-TALG,
volume = "7",
number = "1",
pages = "12:1--12:??",
month = nov,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1868237.1868250",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In several systems applications, parameters such as
load are known only with some associated uncertainty,
which is specified, or modeled, as a distribution over
values. The performance of the system optimization and
monitoring schemes can be improved by spending
resources such as time or bandwidth in observing or
resolving the values of these parameters. In a
resource-constrained situation, deciding which
parameters to observe in order to best optimize the
expected system performance (or in general, optimize
the expected value of a certain objective function)
itself becomes an interesting optimization problem. In
this article, we initiate the study of such problems
that we term ``model-driven optimization''. In
particular, we study the problem of optimizing the
minimum value in the presence of observable
distributions. We show that this problem is NP-Hard,
and present greedy algorithms with good performance
bounds. The proof of the performance bounds are via
novel sub-modularity arguments and connections to
covering integer programs.",
acknowledgement = ack-nhfb,
articleno = "12",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Caragiannis:2010:TLA,
author = "Ioannis Caragiannis and Christos Kaklamanis and
Panagiotis Kanellopoulos",
title = "Taxes for linear atomic congestion games",
journal = j-TALG,
volume = "7",
number = "1",
pages = "13:1--13:??",
month = nov,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1868237.1868251",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Thu Oct 1 15:37:27 MDT 2015",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study congestion games where players aim to access
a set of resources. Each player has a set of possible
strategies and each resource has a function associating
the latency it incurs to the players using it. Players
are non--cooperative and each wishes to follow a
strategy that minimizes her own latency with no regard
to the global optimum. Previous work has studied the
impact of this selfish behavior on system performance.
In this article, we study the question of how much the
performance can be improved if players are forced to
pay taxes for using resources. Our objective is to
extend the original game so that selfish behavior does
not deteriorate performance. We consider atomic
congestion games with linear latency functions and
present both negative and positive results. Our
negative results show that optimal system performance
cannot be achieved even in very simple games. On the
positive side, we show that there are ways to assign
taxes that can improve the performance of linear
congestion games by forcing players to follow
strategies where the total latency suffered is within a
factor of 2 of the minimum possible; this result is
shown to be tight. Furthermore, even in cases where in
the absence of taxes the system behavior may be very
poor, we show that the total disutility of players
(latency plus taxes) is not much larger than the
optimal total latency. Besides existential results, we
show how to compute taxes in time polynomial in the
size of the game by solving convex quadratic programs.
Similar questions have been extensively studied in the
model of non-atomic congestion games. To the best of
our knowledge, this is the first study of the
efficiency of taxes in atomic congestion games.",
acknowledgement = ack-nhfb,
articleno = "13",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Georgiadis:2011:DSM,
author = "Loukas Georgiadis and Haim Kaplan and Nira Shafrir and
Robert E. Tarjan and Renato F. Werneck",
title = "Data structures for mergeable trees",
journal = j-TALG,
volume = "7",
number = "2",
pages = "14:1--14:??",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1921659.1921660",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:38 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Motivated by an application in computational geometry,
we consider a novel variant of the problem of
efficiently maintaining a forest of dynamic rooted
trees. This variant includes an operation that merges
two tree paths. In contrast to the standard problem, in
which a single operation can only add or delete one
arc, one merge can add and delete up to a linear number
of arcs. In spite of this, we develop three different
methods that need only polylogarithmic time per
operation. The first method extends a solution of
Farach and Thorup [1998] for the special case of paths.
Each merge takes {$ O(\log^2 n) $} amortized time on an
{$n$}-node forest and each standard dynamic tree
operation takes {$ O(\log n) $} time; the latter bound
is amortized, worst case, or randomized depending on
the underlying data structure. For the special case
that occurs in the motivating application, in which
arbitrary arc deletions (cuts) do not occur, we give a
method that takes {$ O(\log n) $} time per operation,
including merging. This is best possible in a model of
computation with an {$ \Omega (n \log n) $} lower bound
for sorting {$n$} numbers, since such sorting can be
done in {$ O(n) $} tree operations. For the
even-more-special case in which there are no cuts and
no parent queries, we give a method that uses standard
dynamic trees as a black box: each mergeable tree
operation becomes a constant number of standard dynamic
tree operations. This third method can also be used in
the motivating application, but only by changing the
algorithm in the application. Each of our three methods
needs different analytical tools and reveals different
properties of dynamic trees.",
acknowledgement = ack-nhfb,
articleno = "14",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Chakaravarthy:2011:DTE,
author = "Venkatesan T. Chakaravarthy and Vinayaka Pandit and
Sambuddha Roy and Pranjal Awasthi and Mukesh K.
Mohania",
title = "Decision trees for entity identification:
{Approximation} algorithms and hardness results",
journal = j-TALG,
volume = "7",
number = "2",
pages = "15:1--15:??",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1921659.1921661",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:38 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the problem of constructing decision trees
for entity identification from a given relational
table. The input is a table containing information
about a set of entities over a fixed set of attributes
and a probability distribution over the set of entities
that specifies the likelihood of the occurrence of each
entity. The goal is to construct a decision tree that
identifies each entity unambiguously by testing the
attribute values such that the average number of tests
is minimized. This classical problem finds such diverse
applications as efficient fault detection, species
identification in biology, and efficient diagnosis in
the field of medicine. Prior work mainly deals with the
special case where the input table is binary and the
probability distribution over the set of entities is
uniform. We study the general problem involving
arbitrary input tables and arbitrary probability
distributions over the set of entities. We consider a
natural greedy algorithm and prove an approximation
guarantee of {$ O(r_K \cdot \log N) $}, where {$N$} is
the number of entities and {$K$} is the maximum number
of distinct values of an attribute. The value {$ r_K $}
is a suitably defined Ramsey number, which is at most
{$ \log K $}. We show that it is NP-hard to approximate
the problem within a factor of {$ \Omega (\log N) $},
even for binary tables (i.e., {$ K = 2 $}). Thus, for
the case of binary tables, our approximation algorithm
is optimal up to constant factors (since {$ r_2 = 2
$}). In addition, our analysis indicates a possible way
of resolving a Ramsey-theoretic conjecture by
Erd{\H{o}}s.",
acknowledgement = ack-nhfb,
articleno = "15",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Jacobs:2011:CFA,
author = "Tobias Jacobs",
title = "Constant factor approximations for the hotlink
assignment problem",
journal = j-TALG,
volume = "7",
number = "2",
pages = "16:1--16:??",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1921659.1921662",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:38 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The concept of hotlink assignment aims at reducing the
navigation effort for the users of a Web directory or
similar structure by inserting a limited number of
additional hyperlinks called hotlinks. The $k$-hotlink
assignment problem denotes the task of adding at most
$k$ outgoing hotlinks to each page of a tree-like site,
minimizing the path length, that is, the expected
number of ``clicks'' necessary for the user to reach
her destination page. Another common formulation of
this problem is to maximize the gain, that is, the path
length reduction achieved by the assignment. In this
work we analyze the natural greedy strategy, proving
that it reaches the optimal gain up to the constant
factor of 2. Considering the gain, we also prove the
existence of a PTAS. Finally, we give a polynomial-time
2-approximation for the 1-hotlink assignment problem,
which constitutes the first constant factor
approximation in terms of the path length. The
algorithms' performance analyses are made possible by a
set of three new basic operations for the
transformation of hotlink assignments.",
acknowledgement = ack-nhfb,
articleno = "16",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Ambuhl:2011:TEL,
author = "Christoph Amb{\"u}hl and Leszek Gasieniec and Andrzej
Pelc and Tomasz Radzik and Xiaohui Zhang",
title = "Tree exploration with logarithmic memory",
journal = j-TALG,
volume = "7",
number = "2",
pages = "17:1--17:??",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1921659.1921663",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:38 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the task of network exploration by a
mobile agent (robot) with small memory. The agent has
to traverse all nodes and edges of a network
(represented as an undirected connected graph), and
return to the starting node. Nodes of the network are
unlabeled and edge ports are locally labeled at each
node. The agent has no a priori knowledge of the
topology of the network or of its size, and cannot mark
nodes in any way. Under such weak assumptions, cycles
in the network may prevent feasibility of exploration,
hence we restrict attention to trees. We present an
algorithm to accomplish tree exploration (with return)
using {$ O(\log n) $}-bit memory for all {$n$}-node
trees. This strengthens the result from Diks et al.
[2004], where {$ O(\log^2 n) $}-bit memory was used for
tree exploration, and matches the lower bound on memory
size proved there. We also extend our {$ O(\log n)
$}-bit memory traversal mechanism to a weaker model in
which ports at each node are ordered in circular
manner, however, the explicit values of port numbers
are not available.",
acknowledgement = ack-nhfb,
articleno = "17",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Chekuri:2011:SCP,
author = "Chandra Chekuri and Guy Even and Anupam Gupta and
Danny Segev",
title = "Set connectivity problems in undirected graphs and the
directed {Steiner} network problem",
journal = j-TALG,
volume = "7",
number = "2",
pages = "18:1--18:??",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1921659.1921664",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:38 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In the generalized connectivity problem, we are given
an edge-weighted graph {$ G = (V, E) $} and a
collection {$ D = \{ (S_1, T_1), \ldots {}, (S_k, T_k)
\} $} of distinct demands each demand {$ (S_i, T_i) $}
is a pair of disjoint vertex subsets. We say that a
subgraph {$F$} of {$G$} connects a demand {$ (S_i, T_i)
$} when it contains a path with one endpoint in {$ S_i
$} and the other in {$ T_i $}. The goal is to identify
a minimum weight subgraph that connects all demands in
D. Alon et al. (SODA '04) introduced this problem to
study online network formation settings and showed that
it captures some well-studied problems such as Steiner
forest, facility location with nonmetric costs, tree
multicast, and group Steiner tree. Obtaining a
nontrivial approximation ratio for generalized
connectivity was left as an open problem. We describe
the first poly-logarithmic approximation algorithm for
generalized connectivity that has a performance
guarantee of {$ O(\log^2 n \log^2 k) $}. Here, {$n$} is
the number of vertices in {$G$} and {$k$} is the number
of demands. We also prove that the cut-covering
relaxation of this problem has an {$ O(\log^3 n \log^2
k) $} integrality gap. Building upon the results for
generalized connectivity, we obtain improved
approximation algorithms for two problems that contain
generalized connectivity as a special case. For the
directed Steiner network problem, we obtain an {$
O(k^{1 / 2 + \epsilon }) $} approximation which
improves on the currently best performance guarantee of
{$ \tilde {O}(k^{2 / 3}) $} due to Charikar et al.
(SODA '98). For the set connector problem, recently
introduced by Fukunaga and Nagamochi (IPCO '07), we
present a poly-logarithmic approximation; this result
improves on the previously known ratio which can be {$
\Omega (n) $} in the worst case.",
acknowledgement = ack-nhfb,
articleno = "18",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{DeVerdiere:2011:SVD,
author = "{\'E}ric Colin {De Verdi{\`e}re} and Alexander
Schrijver",
title = "Shortest vertex-disjoint two-face paths in planar
graphs",
journal = j-TALG,
volume = "7",
number = "2",
pages = "19:1--19:??",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1921659.1921665",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:38 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Let {$G$} be a directed planar graph of complexity
{$n$}, each arc having a nonnegative length. Let {$s$}
and {$t$} be two distinct faces of {$G$} let {$ s_1,
\ldots {}, s_k $} be vertices incident with {$s$} let
{$ t_1, \ldots {}, t_k $} be vertices incident with
$t$. We give an algorithm to compute $k$ pairwise
vertex-disjoint paths connecting the pairs $ (s_i, t_i)
$ in {$G$}, with minimal total length, in {$ O(k n \log
n) $} time.",
acknowledgement = ack-nhfb,
articleno = "19",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Elkin:2011:SFD,
author = "Michael Elkin",
title = "Streaming and fully dynamic centralized algorithms for
constructing and maintaining sparse spanners",
journal = j-TALG,
volume = "7",
number = "2",
pages = "20:1--20:??",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1921659.1921666",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:38 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present a streaming algorithm for constructing
sparse spanners and show that our algorithm
significantly outperforms the state-of-the-art
algorithm for this task (due to Feigenbaum et al.).
Specifically, the processing time per edge of our
algorithm is {$ O(1) $}, and it is drastically smaller
than that of the algorithm of Feigenbaum et al., and
all other efficiency parameters of our algorithm are no
greater (and some of them are strictly smaller) than
the respective parameters of the state-of-the-art
algorithm. We also devise a fully dynamic centralized
algorithm maintaining sparse spanners. This algorithm
has incremental update time of {$ O(1) $}, and a
nontrivial decremental update time. To our knowledge,
this is the first fully dynamic centralized algorithm
for maintaining sparse spanners that provides
nontrivial bounds on both incremental and decremental
update time for a wide range of stretch parameter
{$t$}.",
acknowledgement = ack-nhfb,
articleno = "20",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Cormode:2011:ADF,
author = "Graham Cormode and S. Muthukrishnan and Ke Yi",
title = "Algorithms for distributed functional monitoring",
journal = j-TALG,
volume = "7",
number = "2",
pages = "21:1--21:??",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1921659.1921667",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:38 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Consider the following problem: We have $k$ players
each receiving a stream of items, and communicating
with a central coordinator. Let the multiset of items
received by player $i$ up until time $t$ be {$ A_i(t)
$}. The coordinator's task is to monitor a given
function {$f$} computed over the union of the inputs {$
\cup_i A_i(t) $}, continuously at all times {$t$}. The
goal is to minimize the number of bits communicated
between the players and the coordinator. Of interest is
the approximate version where the coordinator outputs
{$1$} if {$ f \geq \tau $} and $0$ if $ f \leq (1 -
\epsilon) \tau $. This defines the $ (k, f, \tau,
\epsilon) $ distributed functional monitoring problem.
Functional monitoring problems are fundamental in
distributed systems, in particular sensor networks,
where we must minimize communication; they also connect
to the well-studied streaming model and communication
complexity. Yet few formal bounds are known for
functional monitoring. We give upper and lower bounds
for the $ (k, f, \tau, \epsilon) $ problem for some of
the basic $f$'s. In particular, we study the frequency
moments F$_p$ for $ p = 0, 1, 2 $. For {$ F_0 $} and {$
F_1 $}, we obtain monitoring algorithms with cost
almost the same as algorithms that compute the function
for a single instance of time. However, for {$ F_2 $}
the monitoring problem seems to be much harder than
computing the function for a single time instance. We
give a carefully constructed multiround algorithm that
uses ``sketch summaries'' at multiple levels of details
and solves the {$ (k, F_2, \tau, \epsilon) $} problem
with communication {$ \tilde {O} (k^2 / \epsilon + k^{3
/ 2} / \epsilon^3) $}. Our algorithmic techniques are
likely to be useful for other functional monitoring
problems as well.",
acknowledgement = ack-nhfb,
articleno = "21",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Halldorsson:2011:SEC,
author = "Magn{\'u}s M. Halld{\'o}rsson and Guy Kortsarz and
Maxim Sviridenko",
title = "Sum edge coloring of multigraphs via configuration
{LP}",
journal = j-TALG,
volume = "7",
number = "2",
pages = "22:1--22:21",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1921659.1921668",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:38 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the scheduling of biprocessor jobs under
sum objective (BPSMSM). Given a collection of
unit-length jobs where each job requires the use of two
processors, find a schedule such that no two jobs
involving the same processor run concurrently. The
objective is to minimize the sum of the completion
times of the jobs. Equivalently, we would like to find
a sum edge coloring of a given multigraph, that is, a
partition of its edge set into matchings {$ M_1 $},
\ldots {}, {$ M_t $} minimizing {$ \Sigma_{i = 1}^t i
|M_i| $}.\par
This problem is APX-hard, even in the case of bipartite
graphs [Marx 2009]. This special case is closely
related to the classic open shop scheduling problem. We
give a 1.8298-approximation algorithm for BPSMSM
improving the previously best ratio known of 2 [Bar-Noy
et al. 1998]. The algorithm combines a configuration LP
with greedy methods, using nonstandard randomized
rounding on the LP fractions. We also give an efficient
combinatorial 1.8886-approximation algorithm for the
case of simple graphs, which gives an improved {$
1.79568 + O(\log \bar {d} / \bar {d}) $}-approximation
in graphs of large average degree {$ \bar {d} $}.",
acknowledgement = ack-nhfb,
articleno = "22",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Ben-Aroya:2011:CAF,
author = "Avraham Ben-Aroya and Sivan Toledo",
title = "Competitive analysis of flash memory algorithms",
journal = j-TALG,
volume = "7",
number = "2",
pages = "23:1--23:??",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1921659.1921669",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:38 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Flash memories are widely used in computer systems
ranging from embedded systems to workstations and
servers to digital cameras and mobile phones. The
memory cells of flash devices can only endure a limited
number of write cycles, usually between 10,000 and
1,000,000. Furthermore, cells containing data must be
erased before they can store new data, and erasure
operations erase large blocks of memory, not individual
cells. To maximize the endurance of the device (the
amount of useful data that can be written to it before
one of its cells wears out), flash-based systems move
data around in an attempt to reduce the total number of
erasures and to level the wear of the different erase
blocks. This data movement introduces an interesting
online problem called the wear-leveling problem.
Wear-leveling algorithms have been used at least since
1993, but they have never been mathematically analyzed.
In this article we analyze the two main wear-leveling
problems. We show that a simple randomized algorithm
for one of them is essentially optimal both in the
competitive sense and in the absolute sense (our
competitive result relies on an analysis of a
nearly-optimal offline algorithm). We show that
deterministic algorithms cannot achieve comparable
endurance. We also analyze a more difficult problem and
show that offline algorithms for it can improve upon
naive approaches, but that online algorithms
essentially cannot.",
acknowledgement = ack-nhfb,
articleno = "23",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Aumann:2011:FWP,
author = "Yonatan Aumann and Moshe Lewenstein and Noa Lewenstein
and Dekel Tsur",
title = "Finding witnesses by peeling",
journal = j-TALG,
volume = "7",
number = "2",
pages = "24:1--24:??",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1921659.1921670",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:38 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In the $k$-matches problem, we are given a pattern and
a text, and for each text location, the desired output
consists of all aligned matching characters if there
are $k$ or fewer of them, and any $k$ aligned matching
characters if there are more than $k$ of them. This
problem is one of several string matching problems that
seek not only to find where the pattern matches the
text under different ``match'' definitions, but also to
provide witnesses to the match. Other such problems
include $k$-aligned ones, $k$-witnesses, and
$k$-mismatches. In addition, the solutions to several
other string matching problems rely on the efficient
solutions of the witness finding problems. In this
article we provide a general method for solving such
witness finding problems efficiently. We do so by
casting the problem as a generalization of group
testing, which we then solve by a process we call
peeling. Using this general framework we obtain
improved results for all of the problems mentioned. We
also show that our method also solves a couple of
problems outside the pattern matching domain.",
acknowledgement = ack-nhfb,
articleno = "24",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Choi:2011:CPM,
author = "Yongwook Choi and Wojciech Szpankowski",
title = "Constrained pattern matching",
journal = j-TALG,
volume = "7",
number = "2",
pages = "25:1--25:??",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1921659.1921671",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:38 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Constrained sequences are strings satisfying certain
additional structural restrictions (e.g., some patterns
are forbidden). They find applications in
communication, digital recording, and biology. In this
article, we restrict our attention to the so-called $
(d, k) $ constrained binary sequences in which any run
of zeros must be of length at least $d$ and at most
$k$, where $ 0 \leq d < k $. In many applications, one
needs to know the number of occurrences of a given
pattern $w$ in such sequences, for which we coin the
term constrained pattern matching. For a given word
$w$, we first estimate the mean and the variance of the
number of occurrences of $w$ in a $ (d, k) $ sequence
generated by a memoryless source. Then we present the
central limit theorem and large deviations results. As
a by-product, we enumerate asymptotically the number of
$ (d, k) $ sequences with exactly $r$ occurrences of
$w$, and compute Shannon entropy of $ (d, k) $
sequences with a given number of occurrences of $w$. We
also apply our results to detect under- and
overrepresented patterns in neuronal data (spike
trains), which satisfy structural constraints that
match the framework of $ (d, k) $ binary sequences.
Throughout this article we use techniques of analytic
combinatorics such as combinatorial calculus,
generating functions, and complex asymptotics.",
acknowledgement = ack-nhfb,
articleno = "25",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Fu:2011:DAH,
author = "Bin Fu and Ming-Yang Kao and Lusheng Wang",
title = "Discovering almost any hidden motif from multiple
sequences",
journal = j-TALG,
volume = "7",
number = "2",
pages = "26:1--26:??",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1921659.1921672",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:38 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study a natural probabilistic model for motif
discovery. In this model, there are $k$ background
sequences, and each character in a background sequence
is a random character from an alphabet {$ \Sigma $}. A
motif {$ G = g_1, g_2, \ldots {}, g_m $} is a string of
{$m$} characters. Each background sequence is implanted
with a probabilistically generated approximate copy of
{$G$}. For a probabilistically generated approximate
copy {$ b_1, b_2, \ldots {}, b_m $} of {$G$}, every
character is probabilistically generated such that the
probability for {$ b_i \neq g_i $} is at most {$ \alpha
$}. In this article, we develop an efficient algorithm
that can discover a hidden motif from a set of
sequences for any alphabet {$ \Sigma $} with {$ |
\Sigma | \geq 2 $} and is applicable to DNA motif
discovery. We prove that for {$ \alpha < 1 / 8 (1 - 1 /
| \Sigma |) $}, there exist positive constants {$ c_0
$}, {$ \epsilon $}, and {$ \delta_2 $} such that if
there are at least $ c_0 \log n $ input sequences, then
in {$ O(n^2 / h (\log n)^{O(1)}) $} time this algorithm
finds the motif with probability at least {$ 3 / 4 $}
for every {$ G \in \Sigma^\rho - \Psi_{\rho, h,
\epsilon }(\Sigma) $}, where {$n$} the length of
longest sequences, {$ \rho $} is the length of the
motif, {$h$} is a parameter with $ \rho \geq 4 h \geq
\delta_2 \log n $, and {$ \Psi_{\rho, h, \epsilon
}(\Sigma) $} is a small subset of at most {$ 2^{ -
\Theta (\epsilon^2 h)} $} fraction of the sequences in
{$ \Sigma^\rho $}.",
acknowledgement = ack-nhfb,
articleno = "26",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Nong:2011:CIS,
author = "Ge Nong and Sen Zhang and Wai Hong Chan",
title = "Computing the {Inverse Sort Transform} in linear
time",
journal = j-TALG,
volume = "7",
number = "2",
pages = "27:1--27:??",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1921659.1921673",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:38 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The Sort Transform (ST) can significantly speed up the
block sorting phase of the Burrows-Wheeler Transform
(BWT) by sorting the limited order contexts. However,
the best result obtained so far for the inverse ST has
a time complexity {$ O(N \log k) $} and a space
complexity {$ O(N) $}, where {$N$} and {$k$} are the
text size and the context order of the transform,
respectively. In this article, we present a novel
algorithm that can compute the inverse ST for any
{$k$}-order contexts in an {$ O(N) $} time and space
complexity, a linear result independent of {$k$}. The
main idea behind the design of this linear algorithm is
a set of cycle properties of {$k$}-order contexts that
we explore for this work. These newly discovered cycle
properties allow us to quickly compute the Longest
Common Prefix (LCP) between any pair of adjacent
{$k$}-order contexts that may belong to two different
cycles, which eventually leads to the proposed
linear-time solution.",
acknowledgement = ack-nhfb,
articleno = "27",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Friggstad:2011:MMM,
author = "Zachary Friggstad and Mohammad R. Salavatipour",
title = "Minimizing movement in mobile facility location
problems",
journal = j-TALG,
volume = "7",
number = "3",
pages = "28:1--28:??",
month = jul,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1978782.1978783",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In the mobile facility location problem, which is a
variant of the classical facility location, each
facility and client is assigned to a start location in
a metric graph and our goal is to find a destination
node for each client and facility such that every
client is sent to a node which is the destination of
some facility. The quality of a solution can be
measured either by the total distance clients and
facilities travel or by the maximum distance traveled
by any client or facility. As we show in this article
(by an approximation-preserving reduction), the problem
of minimizing the total movement of facilities and
clients generalizes the classical $k$-median problem.
The class of movement problems was introduced by
Demaine et al. [2007] where a simple 2-approximation
was proposed for the minimum maximum movement mobile
facility location problem while an approximation for
the minimum total movement variant and hardness results
for both were left as open problems. Our main result
here is an 8-approximation algorithm for the minimum
total movement mobile facility location problem. Our
algorithm is obtained by rounding an LP relaxation in
five phases. For the minimum maximum movement mobile
facility location problem, we show that we cannot have
a better than a 2-approximation for the problem, unless
P = NP so the simple algorithm proposed by Demaine et
al. [2007] is essentially best possible.",
acknowledgement = ack-nhfb,
articleno = "28",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Borodin:2011:HWC,
author = "Allan Borodin and David Cashman and Avner Magen",
title = "How well can primal-dual and local-ratio algorithms
perform?",
journal = j-TALG,
volume = "7",
number = "3",
pages = "29:1--29:??",
month = jul,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1978782.1978784",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We define an algorithmic paradigm, the stack model,
that captures many primal-dual and local-ratio
algorithms for approximating covering and packing
problems. The stack model is defined syntactically and
without any complexity limitations and hence our
approximation bounds are independent of the P versus NP
question. Using the stack model, we bound the
performance of a broad class of primal-dual and
local-ratio algorithms and supply a $ (\log n + 1) / 2
$ inapproximability result for set cover, a $ 4 / 3 $
inapproximability for min Steiner tree, and a $ 0.913 $
inapproximability for interval scheduling on two
machines.",
acknowledgement = ack-nhfb,
articleno = "29",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Chung:2011:CDK,
author = "Kai-Min Chung and Omer Reingold and Salil Vadhan",
title = "{S}-{T} connectivity on digraphs with a known
stationary distribution",
journal = j-TALG,
volume = "7",
number = "3",
pages = "30:1--30:??",
month = jul,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1978782.1978785",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present a deterministic logspace algorithm for
solving S-T Connectivity on directed graphs if: (i) we
are given a stationary distribution of the random walk
on the graph in which both of the input vertices $s$
and $t$ have nonnegligible probability mass and (ii)
the random walk which starts at the source vertex $s$
has polynomial mixing time. This result generalizes the
recent deterministic logspace algorithm for
{$S$}--{$T$} Connectivity on undirected graphs
[Reingold, 2008]. It identifies knowledge of the
stationary distribution as the gap between the
{$S$}--{$T$} Connectivity problems we know how to solve
in logspace (L) and those that capture all of
randomized logspace (RL).",
acknowledgement = ack-nhfb,
articleno = "30",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Gairing:2011:RSF,
author = "Martin Gairing and Burkhard Monien and Karsten
Tiemann",
title = "Routing (un-) splittable flow in games with
player-specific affine latency functions",
journal = j-TALG,
volume = "7",
number = "3",
pages = "31:1--31:??",
month = jul,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1978782.1978786",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this work we study weighted network congestion
games with player-specific latency functions where
selfish players wish to route their traffic through a
shared network. We consider both the case of splittable
and unsplittable traffic. Our main findings are as
follows. For routing games on parallel links with
linear latency functions, we introduce two new
potential functions for unsplittable and for splittable
traffic, respectively. We use these functions to derive
results on the convergence to pure Nash equilibria and
the computation of equilibria. For several
generalizations of these routing games, we show that
such potential functions do not exist. We prove tight
upper and lower bounds on the price of anarchy for
games with polynomial latency functions. All our
results on the price of anarchy translate to general
congestion games.",
acknowledgement = ack-nhfb,
articleno = "31",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Rosen:2011:RVB,
author = "Adi Ros{\'e}n and Gabriel Scalosub",
title = "Rate vs. buffer size --- greedy information gathering
on the line",
journal = j-TALG,
volume = "7",
number = "3",
pages = "32:1--32:??",
month = jul,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1978782.1978787",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider packet networks with limited buffer space
at the nodes, and are interested in the question of
maximizing the number of packets that arrive to
destination rather than being dropped due to full
buffers. We initiate a more refined analysis of the
throughput competitive ratio of admission and
scheduling policies in the Competitive Network
Throughput model [Aiello et al. 2005], taking into
account not only the network size but also the buffer
size and the injection rate of the traffic. We
specifically consider the problem of information
gathering on the line, with limited buffer space, under
adversarial traffic. We examine how the buffer size and
the injection rate of the traffic affect the
performance of the greedy protocol for this problem. We
establish upper bounds on the competitive ratio of the
greedy protocol in terms of the network size, the
buffer size, and the adversary's rate, and present
lower bounds which are tight up to constant factors.
These results show, for example, that provisioning the
network with sufficiently large buffers may
substantially improve the performance of the greedy
protocol in some cases, whereas for some high-rate
adversaries, using larger buffers does not have any
effect on the competitive ratio of the protocol.",
acknowledgement = ack-nhfb,
articleno = "32",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bonifaci:2011:MFT,
author = "Vincenzo Bonifaci and Peter Korteweg and Alberto
Marchetti-Spaccamela and Leen Stougie",
title = "Minimizing flow time in the wireless gathering
problem",
journal = j-TALG,
volume = "7",
number = "3",
pages = "33:1--33:??",
month = jul,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1978782.1978788",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We address the problem of efficient data gathering in
a wireless network through multihop communication. We
focus on two objectives related to flow times, that is,
the times spent by data packets in the system:
minimization of the maximum flow time and minimization
of the average flow time of the packets. For both
problems we prove that, unless P = NP, no
polynomial-time algorithm can approximate the optimal
solution within a factor less than {$ \Omega (m^{1 -
\epsilon }) $} for any {$ 0 < \epsilon < 1 $}, where
{$m$} is the number of packets. We then assess the
performance of two natural algorithms by proving that
their cost remains within the optimal cost of the
respective problem if we allow the algorithms to
transmit data at a speed 5 times higher than that of
the optimal solutions to which we compare them.",
acknowledgement = ack-nhfb,
articleno = "33",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Kranakis:2011:RRL,
author = "Evangelos Kranakis and Danny Krizanc and Pat Morin",
title = "Randomized rendezvous with limited memory",
journal = j-TALG,
volume = "7",
number = "3",
pages = "34:1--34:??",
month = jul,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1978782.1978789",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present a trade-off between the expected time for
two identical agents to rendezvous on a synchronous,
anonymous, oriented ring and the memory requirements of
the agents. In particular, we show there exists a $ 2^t
$ state agent which can achieve rendezvous on an
$n$-node ring in expected time {$ O(n^2 / 2^t + 2^t) $}
and that any {$ t / 2 $} state agent requires expected
time {$ \Omega (n^2 / 2^t) $}. As a corollary we
observe that {$ \Theta (\log \log n) $} bits of memory
are necessary and sufficient to achieve rendezvous in
linear time.",
acknowledgement = ack-nhfb,
articleno = "34",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Pemmaraju:2011:MCO,
author = "Sriram V. Pemmaraju and Rajiv Raman and Kasturi
Varadarajan",
title = "Max-coloring and online coloring with bandwidths on
interval graphs",
journal = j-TALG,
volume = "7",
number = "3",
pages = "35:1--35:??",
month = jul,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1978782.1978790",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given a graph $ G = (V, E) $ and positive integral
vertex weights $ w \colon V \to N $, the max-coloring
problem seeks to find a proper vertex coloring of $G$
whose color classes $ C_1, C_2, \ldots {}, C_k $,
minimize $ \Sigma_i = 1^k \max_v \in C^i w(v) $. This
problem, restricted to interval graphs, arises whenever
there is a need to design dedicated memory managers
that provide better performance than the
general-purpose memory management of the operating
system. Though this problem seems similar to the
dynamic storage allocation problem, there are
fundamental differences. We make a connection between
max-coloring and online graph coloring and use this to
devise a simple 2-approximation algorithm for
max-coloring on interval graphs. We also show that a
simple first-fit strategy, that is a natural choice for
this problem, yields an 8-approximation algorithm. We
show this result by proving that the first-fit
algorithm for online coloring an interval graph $G$
uses no more than $ 8 c \chi (G) $ colors,
significantly improving the bound of $ 26 c \chi (G) $
by Kierstead and Qin [1995]. We also show that the
max-coloring problem is NP-hard. The problem of online
coloring of intervals with bandwidths is a simultaneous
generalization of online interval coloring and online
bin packing. The input is a set $I$ of intervals, each
interval $ i \in I $ having an associated bandwidth $
b(i) \in (0, 1] $. We seek an online algorithm that
produces a coloring of the intervals such that for any
color $c$ and any real $r$, the sum of the bandwidths
of intervals containing $r$ and colored $c$ is at most
$1$. Motivated by resource allocation problems, Adamy
and Erlebach [2003] consider this problem and present
an algorithm that uses at most 195 times the number of
colors used by an optimal offline algorithm. Using the
new analysis of first-fit coloring of interval graphs,
we show that the Adamy-Erlebach algorithm is
35-competitive. Finally, we generalize the
Adamy-Erlebach algorithm to a class of algorithms and
show that a different instance from this class is
30-competitive.",
acknowledgement = ack-nhfb,
articleno = "35",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Khuller:2011:FFG,
author = "Samir Khuller and Azarakhsh Malekian and Juli{\'a}n
Mestre",
title = "To fill or not to fill: {The} gas station problem",
journal = j-TALG,
volume = "7",
number = "3",
pages = "36:1--36:??",
month = jul,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1978782.1978791",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article we study several routing problems that
generalize shortest paths and the traveling salesman
problem. We consider a more general model that
incorporates the actual cost in terms of gas prices. We
have a vehicle with a given tank capacity. We assume
that at each vertex gas may be purchased at a certain
price. The objective is to find the cheapest route to
go from $s$ to $t$, or the cheapest tour visiting a
given set of locations. We show that the problem of
finding a cheapest plan to go from $s$ to $t$ can be
solved in polynomial time. For most other versions,
however, the problem is NP-complete and we develop
polynomial-time approximation algorithms for these
versions.",
acknowledgement = ack-nhfb,
articleno = "36",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Coppersmith:2011:OOG,
author = "Don Coppersmith and Tomasz Nowicki and Giuseppe
Paleologo and Charles Tresser and Chai Wah Wu",
title = "The optimality of the online greedy algorithm in
carpool and chairman assignment problems",
journal = j-TALG,
volume = "7",
number = "3",
pages = "37:1--37:??",
month = jul,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1978782.1978792",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study several classes of related scheduling
problems including the carpool problem, its
generalization to arbitrary inputs and the chairman
assignment problem. We derive both lower and upper
bounds for online algorithms solving these problems. We
show that the greedy algorithm is optimal among online
algorithms for the chairman assignment problem and the
generalized carpool problem. We also consider geometric
versions of these problems and show how the bounds
adapt to these cases.",
acknowledgement = ack-nhfb,
articleno = "37",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bille:2011:TIP,
author = "Philip Bille and Inge Li Gortz",
title = "The tree inclusion problem: {In} linear space and
faster",
journal = j-TALG,
volume = "7",
number = "3",
pages = "38:1--38:47",
month = jul,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1978782.1978793",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given two rooted, ordered, and labeled trees {$P$} and
{$T$} the tree inclusion problem is to determine if
{$P$} can be obtained from {$T$} by deleting nodes in
{$T$}. This problem has recently been recognized as an
important query primitive in XML databases.
Kilpel{\"a}inen and Mannila [1995] presented the first
polynomial-time algorithm using quadratic time and
space. Since then several improved results have been
obtained for special cases when {$P$} and {$T$} have a
small number of leaves or small depth. However, in the
worst case these algorithms still use quadratic time
and space. Let {n$_S$}, {l$_S$}, and {d$_S$} denote the
number of nodes, the number of leaves, and the depth of
a tree {$ S \in P, T $}. In this article we show that
the tree inclusion problem can be solved in space {$
O(n_T) $} and time: { $$ O(\min \left \{ l_P n_T, l_P
n_T \log \log n_T + n_T, (n_P n_T) / (\log n_T) + n_T
\log n_T \right \}) $$} This improves or matches the
best known time complexities while using only linear
space instead of quadratic. This is particularly
important in practical applications, such as XML
databases, where the space is likely to be a
bottleneck.",
acknowledgement = ack-nhfb,
articleno = "38",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Laber:2011:IAH,
author = "Eduardo Laber and Marco Molinaro",
title = "Improved approximations for the hotlink assignment
problem",
journal = j-TALG,
volume = "7",
number = "3",
pages = "39:1--39:??",
month = jul,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1978782.1978794",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:40 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Let {$ G = (V, E) $} be a graph representing a Web
site, where nodes correspond to pages and arcs to
hyperlinks. In this context, hotlinks are defined as
shortcuts (new arcs) added to Web pages of {$G$} in
order to reduce the time spent by users to reach their
desired information. In this article, we consider the
problem where {$G$} is a rooted directed tree and the
goal is minimizing the expected time spent by users by
assigning at most {$k$} hotlinks to each node. For the
most studied version of this problem where at most one
hotlink can be added to each node, we prove the
existence of two FPTAS's which optimize different
objectives considered in the literature: one minimizes
the expected user path length and the other maximizes
the expected reduction in user path lengths. These
results improve over a constant factor approximation
for the expected length and over a PTAS for the
expected reduction, both obtained recently in Jacobs
[2007]. Indeed, these FPTAS's are essentially the best
possible results one can achieve under the assumption
that P {$ \neq $} NP. Another contribution we give here
is a 16-approximation algorithm for the most general
version of the problem where up to {$k$} hotlinks can
be assigned from each node. This algorithm runs in {$
O(|V| \log |V|) $} time and it turns to be the first
algorithm with constant approximation for this
problem.",
acknowledgement = ack-nhfb,
articleno = "39",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Salvy:2011:PFF,
author = "Bruno Salvy and Bob Sedgewick and Michele Soria and
Wojciech Szpankowski and Brigitte Vallee",
title = "{Philippe Flajolet}, the father of analytic
combinatorics",
journal = j-TALG,
volume = "7",
number = "4",
pages = "40:1--40:??",
month = sep,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2000807.2000808",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:43 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "40",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Dvorak:2011:TCT,
author = "Zdenek Dvor{\'a}k and Ken-Ichi Kawarabayashi and Robin
Thomas",
title = "Three-coloring triangle-free planar graphs in linear
time",
journal = j-TALG,
volume = "7",
number = "4",
pages = "41:1--41:??",
month = sep,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2000807.2000809",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:43 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Gr{\"o}tzsch's theorem states that every triangle-free
planar graph is 3-colorable, and several relatively
simple proofs of this fact were provided by Thomassen
and other authors. It is easy to convert these proofs
into quadratic-time algorithms to find a 3-coloring,
but it is not clear how to find such a coloring in
linear time (Kowalik used a nontrivial data structure
to construct an {$ O(n \log n) $} algorithm). We design
a linear-time algorithm to find a 3-coloring of a given
triangle-free planar graph. The algorithm avoids using
any complex data structures, which makes it easy to
implement. As a by-product, we give a yet simpler proof
of Gr{\"o}tzsch's theorem.",
acknowledgement = ack-nhfb,
articleno = "41",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Moran:2011:PCR,
author = "Shlomo Moran and Sagi Snir and Wing-Kin Sung",
title = "Partial convex recolorings of trees and galled
networks: {Tight} upper and lower bounds",
journal = j-TALG,
volume = "7",
number = "4",
pages = "42:1--42:??",
month = sep,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2000807.2000810",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:43 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A coloring of a graph is convex if the vertices that
pertain to any color induce a connected subgraph; a
partial coloring (which assigns colors to a subset of
the vertices) is convex if it can be completed to a
convex (total) coloring. Convex coloring has
applications in fields such as phylogenetics,
communication or transportation networks, etc. When a
coloring of a graph is not convex, a natural question
is how far it is from a convex one. This problem is
denoted as convex recoloring (CR). While the initial
works on CR defined and studied the problem on trees,
recent efforts aim at either generalizing the
underlying graphs or specializing the input colorings.
In this work, we extend the underlying graph and the
input coloring to partially colored galled networks. We
show that although determining whether a coloring is
convex on an arbitrary network is hard, it can be found
efficiently on galled networks. We present a fixed
parameter tractable algorithm that finds the recoloring
distance of such a network whose running time is
quadratic in the network size and exponential in that
distance. This complexity is achieved by amortized
analysis that uses a novel technique for contracting
colored graphs that seems to be of independent
interest.",
acknowledgement = ack-nhfb,
articleno = "42",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Cabello:2011:GCF,
author = "Sergio Cabello and Panos Giannopoulos and Christian
Knauer and D{\'a}niel Marx and G{\"u}nter Rote",
title = "Geometric clustering: {Fixed-parameter} tractability
and lower bounds with respect to the dimension",
journal = j-TALG,
volume = "7",
number = "4",
pages = "43:1--43:??",
month = sep,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2000807.2000811",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:43 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study the parameterized complexity of the
$k$-center problem on a given $n$-point set {$P$} in {$
R^d $}, with the dimension {$d$} as the parameter. We
show that the rectilinear 3-center problem is
fixed-parameter tractable, by giving an algorithm that
runs in {$ O(n \log n) $} time for any fixed dimension
d. On the other hand, we show that this is unlikely to
be the case with both the Euclidean and rectilinear
{$k$}-center problems for any {$ k \geq 2 $} and {$ k
\geq 4 $} respectively. In particular, we prove that
deciding whether {$P$} can be covered by the union of 2
balls of given radius or by the union of 4 cubes of
given side length is W[1]-hard with respect to {$d$},
and thus not fixed-parameter tractable unless FPT =
W[1]. For the Euclidean case, we also show that even an
{$ n^{o(d)} $}-time algorithm does not exist, unless
there is a {2$^{o(n)}$}-time algorithm for $n$-variable
3SAT, that is, the Exponential Time Hypothesis fails.",
acknowledgement = ack-nhfb,
articleno = "43",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bonsma:2011:TBF,
author = "Paul Bonsma and Frederic Dorn",
title = "Tight bounds and a fast {FPT} algorithm for directed
{Max-Leaf Spanning Tree}",
journal = j-TALG,
volume = "7",
number = "4",
pages = "44:1--44:??",
month = sep,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2000807.2000812",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:43 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "An out-tree {$T$} of a directed graph {$D$} is a
rooted tree subgraph with all arcs directed outwards
from the root. An out-branching is a spanning out-tree.
By {$ l(D) $} and {$ l_s(D) $}, we denote the maximum
number of leaves over all out-trees and out-branchings
of {$D$}, respectively. We give fixed parameter
tractable algorithms for deciding whether {$ l_s(D)
\geq k $} and whether {$ l(D) \geq k $} for a digraph
{$D$} on {$n$} vertices, both with time complexity {$
2^{o(k \log k)} \cdot n^{o(1)} $}. This answers an open
question whether the problem for out-branchings is in
FPT, and improves on the previous complexity of {$
2^{o(k \log 2 k)} \cdot n^{o(1)} $} in the case of
out-trees. To obtain the complexity bound in the case
of out-branchings, we prove that when all arcs of {$D$}
are part of at least one out-branching, {$ l_s(D) \geq
l(D) / 3 $}. The second bound we prove in this article
states that for strongly connected digraphs {$D$} with
minimum in-degree {$ 3, l_s(D) \geq \Theta (\sqrt n)
$}, where previously {$ l_s(D) \geq \Theta (3 \sqrt n)
$} was the best known bound. This bound is tight, and
also holds for the larger class of digraphs with
minimum in-degree {$3$} in which every arc is part of
at least one out-branching.",
acknowledgement = ack-nhfb,
articleno = "44",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Roditty:2011:APS,
author = "Liam Roditty and Asaf Shapira",
title = "All-pairs shortest paths with a sublinear additive
error",
journal = j-TALG,
volume = "7",
number = "4",
pages = "45:1--45:??",
month = sep,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2000807.2000813",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:43 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We show that, for every $ 0 \leq p \leq 1 $, there is
an {$ O(n^{2.575 - p / (7.4 - 2.3 p)}) $}-time
algorithm that given a directed graph with small
positive integer weights, estimates the length of the
shortest path between every pair of vertices {$ u, v $}
in the graph to within an additive error {$ \delta^p(u,
v) $}, where {$ \delta (u, v) $} is the exact length of
the shortest path between $u$ and $v$. This algorithm
runs faster than the fastest algorithm for computing
exact shortest paths for any $ 0 < p \leq 1 $.
Previously the only way to ``beat'' the running time of
the exact shortest path algorithms was by applying an
algorithm of Zwick [2002] that approximates the
shortest path distances within a multiplicative error
of $ (1 + \epsilon) $. Our algorithm thus gives a
smooth qualitative and quantitative transition between
the fastest exact shortest paths algorithm, and the
fastest approximation algorithm with a linear additive
error. In fact, the main ingredient we need in order to
obtain the above result, which is also interesting in
its own right, is an algorithm for computing $ (1 +
\epsilon) $ multiplicative approximations for the
shortest paths, whose running time is faster than the
running time of Zwick's approximation algorithm when $
\epsilon \ll 1 $ and the graph has small integer
weights.",
acknowledgement = ack-nhfb,
articleno = "45",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Pritchard:2011:FCS,
author = "David Pritchard and Ramakrishna Thurimella",
title = "Fast computation of small cuts via cycle space
sampling",
journal = j-TALG,
volume = "7",
number = "4",
pages = "46:1--46:??",
month = sep,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2000807.2000814",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:43 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We describe a new sampling-based method to determine
cuts in an undirected graph. For a graph {$ (V, E) $},
its cycle space is the family of all subsets of {$E$}
that have even degree at each vertex. We prove that
with high probability, sampling the cycle space
identifies the cuts of a graph. This leads to simple
new linear-time sequential algorithms for finding all
cut edges and cut pairs (a set of 2 edges that form a
cut) of a graph. In the model of distributed computing
in a graph {$ G = (V, E) $} with {$ O(\log |V|) $}-bit
messages, our approach yields faster algorithms for
several problems. The diameter of {$G$} is denoted by
{$D$}, and the maximum degree by {$ \Delta $}. We
obtain simple {$ O(D) $}-time distributed algorithms to
find all cut edges, 2-edge-connected components, and
cut pairs, matching or improving upon previous time
bounds. Under natural conditions these new algorithms
are universally optimal-that is, a {$ \Omega (D)
$}-time lower bound holds on every graph. We obtain a
{$ O(D + \Delta / \log |V|) $}-time distributed
algorithm for finding cut vertices; this is faster than
the best previous algorithm when {$ \Delta, D = O(\sqrt
|V|) $}. A simple extension of our work yields the
first distributed algorithm with sub-linear time for
3-edge-connected components. The basic distributed
algorithms are Monte Carlo, but they can be made Las
Vegas without increasing the asymptotic complexity. In
the model of parallel computing on the EREW PRAM, our
approach yields a simple algorithm with optimal time
complexity {$ O(\log V) $} for finding cut pairs and
3-edge-connected components.",
acknowledgement = ack-nhfb,
articleno = "46",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Chang:2011:BSA,
author = "Jessica Chang and Thomas Erlebach and Renars Gailis
and Samir Khuller",
title = "Broadcast scheduling: {Algorithms} and complexity",
journal = j-TALG,
volume = "7",
number = "4",
pages = "47:1--47:??",
month = sep,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2000807.2000815",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:43 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Broadcast Scheduling is a popular method for
disseminating information in response to client
requests. There are $n$ pages of information, and
clients request pages at different times. However,
multiple clients can have their requests satisfied by a
single broadcast of the requested page. In this
article, we consider several related broadcast
scheduling problems. One central problem we study
simply asks to minimize the maximum response time (over
all requests). Another related problem we consider is
the version in which every request has a release time
and a deadline, and the goal is to maximize the number
of requests that meet their deadlines. While
approximation algorithms for both these problems were
proposed several years back, it was not known if they
were NP-complete. One of our main results is that both
these problems are NP-complete. In addition, we use the
same unified approach to give a simple NP-completeness
proof for minimizing the sum of response times. A very
complicated proof was known for this version.
Furthermore, we give a proof that FIFO is a
2-competitive online algorithm for minimizing the
maximum response time (this result had been claimed
earlier with no proof) and that there is no better
deterministic online algorithm (this result was claimed
earlier as well, but with an incorrect proof).",
acknowledgement = ack-nhfb,
articleno = "47",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Calinescu:2011:IAA,
author = "Gruia Calinescu and Amit Chakrabarti and Howard
Karloff and Yuval Rabani",
title = "An improved approximation algorithm for resource
allocation",
journal = j-TALG,
volume = "7",
number = "4",
pages = "48:1--48:??",
month = sep,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2000807.2000816",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:43 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study the problem of finding a most profitable
subset of $n$ given tasks, each with a given start and
finish time as well as profit and resource requirement,
that at no time exceeds the quantity {$B$} of available
resource. We show that this NP-hard Resource Allocation
problem can be {$ (1 / 2 - \epsilon) $}-approximated in
randomized polynomial time, which improves upon earlier
approximation results.",
acknowledgement = ack-nhfb,
articleno = "48",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Fotakis:2011:MFL,
author = "Dimitris Fotakis",
title = "Memoryless facility location in one pass",
journal = j-TALG,
volume = "7",
number = "4",
pages = "49:1--49:??",
month = sep,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2000807.2000817",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:43 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present the first one-pass memoryless algorithm for
metric Facility Location that maintains a set of
facilities approximating the optimal facility
configuration within a constant factor. The algorithm
is randomized and very simple to state and implement.
It processes the demand points one-by-one as they
arrive, and keeps in memory only the facility locations
currently open. We prove that its competitive ratio is
less than 14 in the special case of uniform facility
costs, and less than 49 in the general case of
nonuniform facility costs.",
acknowledgement = ack-nhfb,
articleno = "49",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Han:2011:NUB,
author = "Xin Han and Francis Y. L. Chin and Hing-Fung Ting and
Guochuan Zhang and Yong Zhang",
title = "A new upper bound $ 2.5545 $ on {$2$D} {Online Bin
Packing}",
journal = j-TALG,
volume = "7",
number = "4",
pages = "50:1--50:??",
month = sep,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2000807.2000818",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:43 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The 2D Online Bin Packing is a fundamental problem in
Computer Science and the determination of its
asymptotic competitive ratio has research attention. In
a long series of papers, the lower bound of this ratio
has been improved from 1.808, 1.856 to 1.907 and its
upper bound reduced from 3.25, 3.0625, 2.8596, 2.7834
to 2.66013. In this article, we rewrite the upper bound
record to 2.5545. Our idea for the improvement is as
follows. In 2002, Seiden and van Stee [Seiden and van
Stee 2003] proposed an elegant algorithm called {$ H
\otimes C $}, comprised of the Harmonic algorithm {$H$}
and the Improved Harmonic algorithm {$C$}, for the
two-dimensional online bin packing problem and proved
that the algorithm has an asymptotic competitive ratio
of at most 2.66013. Since the best known online
algorithm for one-dimensional bin packing is the Super
Harmonic algorithm [Seiden 2002], a natural question to
ask is: could a better upper bound be achieved by using
the Super Harmonic algorithm instead of the Improved
Harmonic algorithm? However, as mentioned in Seiden and
van Stee [2003], the previous analysis framework does
not work. In this article, we give a positive answer
for this question. A new upper bound of 2.5545 is
obtained for 2-dimensional online bin packing. The main
idea is to develop new weighting functions for the
Super Harmonic algorithm and propose new techniques to
bound the total weight in a rectangular bin.",
acknowledgement = ack-nhfb,
articleno = "50",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Edmonds:2011:CCR,
author = "Jeff Edmonds and Kirk Pruhs",
title = "Cake cutting really is not a piece of cake",
journal = j-TALG,
volume = "7",
number = "4",
pages = "51:1--51:??",
month = sep,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2000807.2000819",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:43 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the well-known cake cutting problem in
which a protocol wants to divide a cake among $ n \geq
2 $ players in such a way that each player believes
that they got a fair share. The standard Robertson-Webb
model allows the protocol to make two types of queries,
Evaluation and Cut, to the players. A deterministic
divide-and-conquer protocol with complexity {$ O(n \log
n) $} is known. We provide the first a {$ \Omega (n
\log n) $} lower bound on the complexity of any
deterministic protocol in the standard model. This
improves previous lower bounds, in that the protocol is
allowed to assign to a player a piece that is a union
of intervals and only guarantee approximate fairness.
We accomplish this by lower bounding the complexity to
find, for a single player, a piece of cake that is both
rich in value, and thin in width. We then introduce a
version of cake cutting in which the players are able
to cut with only finite precision. In this case, we can
extend the {$ \Omega (n \log n) $} lower bound to
include randomized protocols.",
acknowledgement = ack-nhfb,
articleno = "51",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Barbay:2011:SIS,
author = "J{\'e}r{\'e}my Barbay and Meng He and J. Ian Munro and
Srinivasa Rao Satti",
title = "Succinct indexes for strings, binary relations and
multilabeled trees",
journal = j-TALG,
volume = "7",
number = "4",
pages = "52:1--52:??",
month = sep,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2000807.2000820",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:43 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We define and design succinct indexes for several
abstract data types (ADTs). The concept is to design
auxiliary data structures that ideally occupy
asymptotically less space than the
information-theoretic lower bound on the space required
to encode the given data, and support an extended set
of operations using the basic operators defined in the
ADT. The main advantage of succinct indexes as opposed
to succinct (integrated data/index) encodings is that
we make assumptions only on the ADT through which the
main data is accessed, rather than the way in which the
data is encoded. This allows more freedom in the
encoding of the main data. In this article, we present
succinct indexes for various data types, namely
strings, binary relations and multilabeled trees. Given
the support for the interface of the ADTs of these data
types, we can support various useful operations
efficiently by constructing succinct indexes for them.
When the operators in the ADTs are supported in
constant time, our results are comparable to previous
results, while allowing more flexibility in the
encoding of the given data. Using our techniques, we
design a succinct encoding that represents a string of
length $n$ over an alphabet of size $ \sigma $ using {$
n H_k (S) + \lg \sigma \cdot o(n) + O(n \lg \sigma /
\lg \lg \lg \sigma) $} bits to support access\slash
rank\slash select operations in {$ o((\lg \lg
\sigma)^{1 + \epsilon }) $} time, for any fixed
constant {$ \epsilon > 0 $}. We also design a succinct
text index using {$ n H_0 (S) + O(n \lg \sigma / \lg
\lg \sigma) $} bits that supports finding all the occ
occurrences of a given pattern of length {$m$} in {$
O(m \lg \lg \sigma + {\rm occ} \lg n / \lg^\epsilon
\sigma) $} time, for any fixed constant {$ 0 < \epsilon
< 1 $}. Previous results on these two problems either
have a {$ \lg \sigma $} factor instead of {$ \lg \lg
\sigma $} in the running time, or are not compressed.
Finally, we present succinct encodings of binary
relations and multi-labeled trees that are more compact
than previous structures.",
acknowledgement = ack-nhfb,
articleno = "52",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Russo:2011:FCS,
author = "Lu{\'\i}s M. S. Russo and Gonzalo Navarro and Arlindo
L. Oliveira",
title = "{Fully} compressed suffix trees",
journal = j-TALG,
volume = "7",
number = "4",
pages = "53:1--53:??",
month = sep,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2000807.2000821",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:43 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Suffix trees are by far the most important data
structure in stringology, with a myriad of applications
in fields like bioinformatics and information
retrieval. Classical representations of suffix trees
require {$ \Theta (n \log n) $} bits of space, for a
string of size {$n$}. This is considerably more than
the {$ n \log_2 \sigma $} bits needed for the string
itself, where {$ \sigma $} is the alphabet size. The
size of suffix trees has been a barrier to their wider
adoption in practice. Recent compressed suffix tree
representations require just the space of the
compressed string plus {$ \Theta (n) $} extra bits.
This is already spectacular, but the linear extra bits
are still unsatisfactory when {$ \sigma $} is small as
in DNA sequences. In this article, we introduce the
first compressed suffix tree representation that breaks
this {$ \Theta (n) $}-bit space barrier. The Fully
Compressed Suffix Tree (FCST) representation requires
only sublinear space on top of the compressed text
size, and supports a wide set of navigational
operations in almost logarithmic time. This includes
extracting arbitrary text substrings, so the FCST
replaces the text using almost the same space as the
compressed text. An essential ingredient of FCSTs is
the lowest common ancestor (LCA) operation. We reveal
important connections between LCAs and suffix tree
navigation. We also describe how to make FCSTs dynamic,
that is, support updates to the text. The dynamic FCST
also supports several operations. In particular, it can
build the static FCST within optimal space and
polylogarithmic time per symbol. Our theoretical
results are also validated experimentally, showing that
FCSTs are very effective in practice as well.",
acknowledgement = ack-nhfb,
articleno = "53",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Izsak:2011:CPM,
author = "Alexander Izsak and Nicholas Pippenger",
title = "Carry propagation in multiplication by constants",
journal = j-TALG,
volume = "7",
number = "4",
pages = "54:1--54:??",
month = sep,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2000807.2000822",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Dec 8 09:35:43 MST 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Suppose that a random $n$-bit number V is multiplied
by an odd constant {$ M \geq 3 $}, by adding shifted
versions of the number {$V$} corresponding to the {$1$}
s in the binary representation of the constant {$M$}.
Suppose further that the additions are performed by
carry-save adders until the number of summands is
reduced to two, at which time the final addition is
performed by a carry-propagate adder. We show that in
this situation the distribution of the length of the
longest carry-propagation chain in the final addition
is the same (up to terms tending to {$0$} as {$ n \to
\infty $}) as when two independent {$n$}-bit numbers
are added, and in particular the mean and variance are
the same (again up to terms tending to 0). This result
applies to all possible orders of performing the
carry-save additions.",
acknowledgement = ack-nhfb,
articleno = "54",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Guha:2012:AUR,
author = "Sudipto Guha and Kamesh Munagala",
title = "Adaptive Uncertainty Resolution in {Bayesian}
Combinatorial Optimization Problems",
journal = j-TALG,
volume = "8",
number = "1",
pages = "1:1--1:??",
month = jan,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2071379.2071380",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Fri Mar 16 15:33:03 MDT 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In several applications such as databases, planning,
and sensor networks, parameters such as selectivity,
load, or sensed values are known only with some
associated uncertainty. The performance of such a
system (as captured by some objective function over the
parameters) is significantly improved if some of these
parameters can be probed or observed. In a resource
constrained situation, deciding which parameters to
observe in order to optimize system performance, itself
becomes an interesting and important optimization
problem. This general problem is the focus of this
article. One of the most important considerations in
this framework is whether adaptivity is required for
the observations. Adaptive observations introduce
blocking or sequential operations in the system whereas
nonadaptive observations can be performed in parallel.
One of the important questions in this regard is to
characterize the benefit of adaptivity for probes and
observation. We present general techniques for
designing constant factor approximations to the optimal
observation schemes for several widely used scheduling
and metric objective functions. We show a unifying
technique that relates this optimization problem to the
outlier version of the corresponding deterministic
optimization. By making this connection, our technique
shows constant factor upper bounds for the benefit of
adaptivity of the observation schemes. We show that
while probing yields significant improvement in the
objective function, being adaptive about the probing is
not beneficial beyond constant factors.",
acknowledgement = ack-nhfb,
articleno = "1",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Mahdian:2012:OOU,
author = "Mohammad Mahdian and Hamid Nazerzadeh and Amin
Saberi",
title = "Online {Optimization} with {Uncertain Information}",
journal = j-TALG,
volume = "8",
number = "1",
pages = "2:1--2:??",
month = jan,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2071379.2071381",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Fri Mar 16 15:33:03 MDT 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We introduce a new framework for designing online
algorithms that can incorporate additional information
about the input sequence, while maintaining a
reasonable competitive ratio if the additional
information is incorrect. Within this framework, we
present online algorithms for several problems
including allocation of online advertisement space,
load balancing, and facility location.",
acknowledgement = ack-nhfb,
articleno = "2",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Haeupler:2012:ICD,
author = "Bernhard Haeupler and Telikepalli Kavitha and Rogers
Mathew and Siddhartha Sen and Robert E. Tarjan",
title = "Incremental {Cycle Detection}, {Topological Ordering},
and {Strong Component Maintenance}",
journal = j-TALG,
volume = "8",
number = "1",
pages = "3:1--3:??",
month = jan,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2071379.2071382",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Fri Mar 16 15:33:03 MDT 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present two online algorithms for maintaining a
topological order of a directed $n$-vertex acyclic
graph as arcs are added, and detecting a cycle when one
is created. Our first algorithm handles $m$ arc
additions in {$ O(m^{3 / 2}) $} time. For sparse graphs
{$ (m / n = O(1)) $}, this bound improves the best
previous bound by a logarithmic factor, and is tight to
within a constant factor among algorithms satisfying a
natural locality property. Our second algorithm handles
an arbitrary sequence of arc additions in {$ O(n^{5 /
2}) $} time. For sufficiently dense graphs, this bound
improves the best previous bound by a polynomial
factor. Our bound may be far from tight: we show that
the algorithm can take {$ \Omega (n^2 2^{\sqrt {2 \lg
n}}) $} time by relating its performance to a
generalization of the {$k$}-levels problem of
combinatorial geometry. A completely different
algorithm running in {$ \Theta (n^2 \log n) $} time was
given recently by Bender, Fineman, and Gilbert. We
extend both of our algorithms to the maintenance of
strong components, without affecting the asymptotic
time bounds.",
acknowledgement = ack-nhfb,
articleno = "3",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Frigo:2012:COA,
author = "Matteo Frigo and Charles E. Leiserson and Harald
Prokop and Sridhar Ramachandran",
title = "Cache-Oblivious Algorithms",
journal = j-TALG,
volume = "8",
number = "1",
pages = "4:1--4:??",
month = jan,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2071379.2071383",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Fri Mar 16 15:33:03 MDT 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "This article presents asymptotically optimal
algorithms for rectangular matrix transpose, fast
Fourier transform (FFT), and sorting on computers with
multiple levels of caching. Unlike previous optimal
algorithms, these algorithms are cache oblivious: no
variables dependent on hardware parameters, such as
cache size and cache-line length, need to be tuned to
achieve optimality. Nevertheless, these algorithms use
an optimal amount of work and move data optimally among
multiple levels of cache. For a cache with size {$M$}
and cache-line length {$B$} where {$ M = \Omega (B^2)
$}, the number of cache misses for an {$ m \times n $}
matrix transpose is {$ \Theta (1 + m n / B) $}. The
number of cache misses for either an {$n$}-point FFT or
the sorting of {$n$} numbers is {$ \Theta (1 + (n /
B)(1 + \log M n)) $}. We also give a {$ \Theta (m n p)
$}-work algorithm to multiply an {$ m \times n $}
matrix by an {$ n \times p $} matrix that incurs {$
\Theta (1 + (m n + n p + m p) / B + m n p / B \sqrt
{M}) $} cache faults. We introduce an `ideal-cache'
model to analyze our algorithms. We prove that an
optimal cache-oblivious algorithm designed for two
levels of memory is also optimal for multiple levels
and that the assumption of optimal replacement in the
ideal-cache model can be simulated efficiently by LRU
replacement. We offer empirical evidence that
cache-oblivious algorithms perform well in practice.",
acknowledgement = ack-nhfb,
articleno = "4",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Chlebus:2012:AQM,
author = "Bogdan S. Chlebus and Dariusz R. Kowalski and Mariusz
A. Rokicki",
title = "Adversarial Queuing on the Multiple Access Channel",
journal = j-TALG,
volume = "8",
number = "1",
pages = "5:1--5:??",
month = jan,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2071379.2071384",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Fri Mar 16 15:33:03 MDT 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study deterministic broadcasting on multiple access
channels when packets are injected continuously. The
quality of service is considered in the framework of
adversarial queuing. An adversary is determined by
injection rate and burstiness, the latter denoting the
number of packets that can be injected simultaneously
in a round. We consider only injection rates that are
less than $1$. A protocol is stable when the numbers of
packets in queues stay bounded at all rounds, and it is
of fair latency when waiting times of packets in queues
are {$ O({\rm burstiness} / {\rm rate}) $}. For
channels with collision detection, we give a
full-sensing protocol of fair latency for injection
rates that are at most {$ 1 \over 2 (\lceil \lg n
\rceil + 1) $}, where {$n$} is the number of stations,
and show that fair latency is impossible to achieve for
injection rates that are {$ \omega (1 \over \log n) $}.
For channels without collision detection, we present a
full-sensing protocol of fair latency for injection
rates that are at most $ 1 \over c \lg^2 n $, for some
$ c > 0 $. We show that there exists an
acknowledgment-based protocol that has fair latency for
injection rates that are at most $ 1 \over c n \lg^2 n
$, for some $ c > 0 $, and develop an explicit
acknowledgment-based protocol of fair latency for
injection rates that are at most $ 1 \over 27 n^2 \ln n
$. Regarding impossibility to achieve just stability by
restricted protocols, we prove that no
acknowledgment-based protocol can be stable for
injection rates larger than $ 3 \over 1 + \lg n $.",
acknowledgement = ack-nhfb,
articleno = "5",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Chen:2012:IEC,
author = "Jianer Chen and Yang Liu and Songjian Lu and Sing-Hoi
Sze and Fenghui Zhang",
title = "Iterative Expansion and Color Coding: An Improved
Algorithm for {$3$D}-Matching",
journal = j-TALG,
volume = "8",
number = "1",
pages = "6:1--6:??",
month = jan,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2071379.2071385",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Fri Mar 16 15:33:03 MDT 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The research in the parameterized 3d-matching problem
has yielded a number of new algorithmic techniques and
an impressive list of improved algorithms. In this
article, a new deterministic algorithm for the problem
is developed that integrates and improves a number of
known techniques, including greedy localization,
dynamic programming, and color coding. The new
algorithm, which either constructs a matching of $k$
triples in a given triple set or correctly reports that
no such a matching exists, runs in time {$ O*(2.80^3 k)
$}, improving a long list of previous algorithms for
the problem.",
acknowledgement = ack-nhfb,
articleno = "6",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bocker:2012:IFP,
author = "Sebastian B{\"o}cker and Quang Bao Anh Bui and Anke
Truss",
title = "Improved Fixed-Parameter Algorithms for Minimum-Flip
Consensus Trees",
journal = j-TALG,
volume = "8",
number = "1",
pages = "7:1--7:??",
month = jan,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2071379.2071386",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Fri Mar 16 15:33:03 MDT 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In computational phylogenetics, the problem of
constructing a consensus tree for a given set of rooted
input trees has frequently been addressed. In this
article we study the Minimum-Flip Problem: the input
trees are transformed into a binary matrix, and we want
to find a perfect phylogeny for this matrix using a
minimum number of flips, that is, corrections of single
entries in the matrix. The graph-theoretical
formulation of the problem is as follows: Given a
bipartite graph {$ G = (V t \cup V c, E) $}, the task
is to find a minimum set of edge modifications such
that the resulting graph has no induced path with four
edges that starts and ends in Vt, where Vt corresponds
to the taxa set and Vc corresponds to the character
set. We present two fixed-parameter algorithms for the
Minimum-Flip Problem, one with running time {$ O(4.83 k
+ \poly (m, n)) $} and another one with running time {$
O(4.42 k + \poly (m, n)) $} for {$n$} taxa, {$m$}
characters, {$k$} flips, and $ \poly (m, n) $ denotes a
polynomial function in $m$ and $n$. Additionally, we
discuss several heuristic improvements. We also report
computational results on phylogenetic data.",
acknowledgement = ack-nhfb,
articleno = "7",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Cygan:2012:EFE,
author = "Marek Cygan and Marcin Pilipczuk",
title = "Even Faster Exact Bandwidth",
journal = j-TALG,
volume = "8",
number = "1",
pages = "8:1--8:??",
month = jan,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2071379.2071387",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Fri Mar 16 15:33:03 MDT 2012",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We deal with exact algorithms for Bandwidth, a long
studied NP-hard problem. For a long time nothing better
than the trivial {$ O^\ast (n!)^1 $} exhaustive search
was known. In 2000, Feige and Kilian [Feige 2000] came
up with a {$ O^\ast (10 n) $}-time and polynomial space
algorithm. In this article we present a new algorithm
that solves Bandwidth in {$ O^\ast (5 n) $} time and {$
O^\ast (2 n) $} space. Then, we take a closer look and
introduce a major modification that makes it run in {$
O(4.83 n) $} time with a cost of a {$ O^\ast (4 n) $}
space complexity. This modification allowed us to
perform the Measure \& Conquer analysis for the time
complexity which was not used for graph layout problems
before.",
acknowledgement = ack-nhfb,
articleno = "8",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Aumann:2012:DIG,
author = "Yonatan Aumann and Moshe Lewenstein and Oren Melamud
and Ron Pinter and Zohar Yakhini",
title = "Dotted interval graphs",
journal = j-TALG,
volume = "8",
number = "2",
pages = "9:1--9:??",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2151171.2151172",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:57 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We introduce a generalization of interval graphs,
which we call Dotted Interval Graphs (DIG). A dotted
interval graph is an intersection graph of arithmetic
progressions (dotted intervals). Coloring of dotted
interval graphs naturally arises in the context of high
throughput genotyping. We study the properties of
dotted interval graphs, with a focus on coloring. We
show that any graph is a DIG, but that DIG$_d$ graphs,
that is, DIGs in which the arithmetic progressions have
a jump of at most $d$, form a strict hierarchy. We show
that coloring DIG$_d$ graphs is NP-complete even for $
d = 2 $. For any fixed $d$, we provide a $ 5 / 6 d +
o(d) $ approximation for the coloring of DIG$_d$
graphs. Finally, we show that finding the maximal
clique in DIG$_d$ graphs is fixed parameter tractable
in $d$.",
acknowledgement = ack-nhfb,
articleno = "9",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bose:2012:SGI,
author = "Prosenjit Bose and Eric Y. Chen and Meng He and Anil
Maheshwari and Pat Morin",
title = "Succinct geometric indexes supporting point location
queries",
journal = j-TALG,
volume = "8",
number = "2",
pages = "10:1--10:??",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2151171.2151173",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:57 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We propose designing data structures called succinct
geometric indexes of negligible space (more precisely,
$ o(n) $ bits) that support geometric queries in
optimal time, by taking advantage of the $n$ points in
the dataset permuted and stored elsewhere as a
sequence. Our first and main result is a succinct
geometric index that can answer point location queries,
a fundamental problem in computational geometry, on
planar triangulations in {$ O(\lg n) $} time. We also
design three variants of this index. The first supports
point location using {$ \lg n + 2 \sqrt {\lg n} +
O(\lg^{1 / 4} n) $} point-line comparisons. The second
supports point location in {$ o(\lg n) $} time when the
coordinates are integers bounded by {$U$}. The last
variant can answer point location queries in {$ O(H +
1) $} expected time, where {$H$} is the entropy of the
query distribution. These results match the query
efficiency of previous point location structures that
occupy {$ O(n) $} words or {$ O(n \lg n) $} bits, while
saving drastic amounts of space. We generalize our
succinct geometric index to planar subdivisions, and
design indexes for other types of queries. Finally, we
apply our techniques to design the first implicit data
structures that support point location in {$ O(\lg^2 n)
$} time.",
acknowledgement = ack-nhfb,
articleno = "10",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Drmota:2012:PAC,
author = "Michael Drmota and Reinhard Kutzelnigg",
title = "A precise analysis of {Cuckoo} hashing",
journal = j-TALG,
volume = "8",
number = "2",
pages = "11:1--11:36",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2151171.2151174",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:57 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/hash.bib;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Cuckoo hashing was introduced by Pagh and Rodler in
2001. Its main feature is that it provides constant
worst-case search time. The aim of this article is to
present a precise average case analysis of Cuckoo
hashing. In particular, we determine the probability
that Cuckoo hashing produces no conflicts and give an
upper bound for the construction time, that is linear
in the size of the table. The analysis rests on a
generating function approach to the so called Cuckoo
Graph, a random bipartite graph, and an application of
a double saddle point method to obtain asymptotic
expansions. Furthermore, we provide some results
concerning the structure of these kinds of random
graphs. Our results extend the analysis of Devroye and
Morin [2003]. Additionally, we provide numerical
results confirming the mathematical analysis.",
acknowledgement = ack-nhfb,
articleno = "11",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Yi:2012:MOT,
author = "Ke Yi and Qin Zhang",
title = "Multidimensional online tracking",
journal = j-TALG,
volume = "8",
number = "2",
pages = "12:1--12:??",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2151171.2151175",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:57 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We propose and study a new class of online problems,
which we call online tracking. Suppose an observer, say
Alice, observes a multivalued function {$ f : Z^+ \to
Z^d $} over time in an online fashion, that is, she
only sees {$ f(t) $} for {$ t \leq t_{\rm now} $} where
{$ t_{\rm now} $} is the current time. She would like
to keep a tracker, say Bob, informed of the current
value of $f$ at all times. Under this setting, Alice
could send new values of $f$ to Bob from time to time,
so that the current value of $f$ is always within a
distance of {$ \Delta $} to the last value received by
Bob. We give competitive online algorithms whose
communication costs are compared with the optimal
offline algorithm that knows the entire {$f$} in
advance. We also consider variations of the problem
where Alice is allowed to send predictions to Bob, to
further reduce communication for well-behaved
functions. These online tracking problems have a
variety of application, ranging from sensor monitoring,
location-based services, to publish/subscribe
systems.",
acknowledgement = ack-nhfb,
articleno = "12",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Demaine:2012:PAN,
author = "Erik D. Demaine and Mohammadtaghi Hajiaghayi and Hamid
Mahini and Morteza Zadimoghaddam",
title = "The price of anarchy in network creation games",
journal = j-TALG,
volume = "8",
number = "2",
pages = "13:1--13:??",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2151171.2151176",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:57 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study Nash equilibria in the setting of network
creation games introduced recently by Fabrikant,
Luthra, Maneva, Papadimitriou, and Shenker. In this
game we have a set of selfish node players, each
creating some incident links, and the goal is to
minimize $ \alpha $ times the cost of the created links
plus sum of the distances to all other players.
Fabrikant et al. proved an upper bound {$ O(\sqrt
\alpha) $} on the price of anarchy: the relative cost
of the lack of coordination. Albers, Eilts, Even-Dar,
Mansour, and Roditty show that the price of anarchy is
constant for {$ \alpha = O(\sqrt n) $} and for {$
\alpha \geq 12 n \lceil \lg n \rceil $}, and that the
price of anarchy is {$ 15 (1 + (\min {\alpha^2 / n, n^2
/ \alpha })^{1 / 3}) $} for any {$ \alpha $}. The
latter bound shows the first sublinear worst-case
bound, {$ O(n^{1 / 3}) $}, for all {$ \alpha $}. But no
better bound is known for {$ \alpha $} between {$
\omega (\sqrt n) $} and $ o(n \lg n) $. Yet $ \alpha
\approx n $ is perhaps the most interesting range, for
it corresponds to considering the average distance
(instead of the sum of distances) to other nodes to be
roughly on par with link creation (effectively dividing
$ \alpha $ by $n$). In this article, we prove the first
$ o(n^\epsilon) $ upper bound for general $ \alpha $,
namely {$ 2^{O(\sqrt {\lg n})} $}. We also prove a
constant upper bound for {$ \alpha = O({n^{1 \epsilon
}}) $} for any fixed {$ \epsilon > 0 $}, substantially
reducing the range of {$ \alpha $} for which constant
bounds have not been obtained. Along the way, we also
improve the constant upper bound by Albers et al. (with
the lead constant of {$ 15 $}) to $6$ for $ \alpha < (n
/ 2)^{1 / 2} $ and to $4$ for $ \alpha < (n / 2)^{1 /
3} $. Next we consider the bilateral network variant of
Corbo and Parkes, in which links can be created only
with the consent of both endpoints and the link price
is shared equally by the two. Corbo and Parkes show an
upper bound of {$ O(\sqrt \alpha) $} and a lower bound
of {$ \Omega (\lg \alpha) $} for {$ \alpha \leq n $}.
In this article, we show that in fact the upper bound
{$ O(\sqrt \alpha) $} is tight for {$ \alpha \leq n $},
by proving a matching lower bound of {$ \Omega (\sqrt
\alpha) $}. For {$ \alpha > n $}, we prove that the
price of anarchy is {$ \Theta (n / \sqrt \alpha) $}.
Finally we introduce a variant of both network creation
games, in which each player desires to minimize {$
\alpha $} times the cost of its created links plus the
maximum distance (instead of the sum of distances) to
the other players. This variant of the problem is
naturally motivated by considering the worst case
instead of the average case. Interestingly, for the
original (unilateral) game, we show that the price of
anarchy is at most {$2$} for {$ \alpha \geq n $}, {$
O(\min \{ 4^{\sqrt {\lg n}}, (n / \alpha)^{1 / 3} \})
$} for {$ 2 \sqrt {\lg n} \leq \alpha \leq n $}, and {$
O(n^{2 / \alpha }) $} for {$ \alpha < 2 \sqrt {\lg n}
$}. For the bilateral game, we prove matching upper and
lower bounds of {$ \Theta (n / \alpha + 1) $} for {$
\alpha \leq n $}, and an upper bound of {$2$} for {$
\alpha > n $}.",
acknowledgement = ack-nhfb,
articleno = "13",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Ye:2012:EG,
author = "Yuli Ye and Allan Borodin",
title = "Elimination graphs",
journal = j-TALG,
volume = "8",
number = "2",
pages = "14:1--14:??",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2151171.2151177",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:57 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article we study graphs with inductive
neighborhood properties. Let {$P$} be a graph property,
a graph {$ G = (V, E) $} with {$n$} vertices is said to
have an inductive neighborhood property with respect to
{$P$} if there is an ordering of vertices {$ v_1 $},
\ldots, {$ v_n $} such that the property {$P$} holds on
the induced subgraph {$ G[N(v_i) \cap V_i] $}, where {$
N(v_i) $} is the neighborhood of {$ v_i $} and {$ V_i =
\{ v_i, \ldots, v_n \} $}. It turns out that if we take
{$P$} as a graph with maximum independent set size no
greater than {$k$}, then this definition gives a
natural generalization of both chordal graphs and {$ (k
+ 1) $}-claw-free graphs. We refer to such graphs as
inductive {$k$}-independent graphs. We study properties
of such families of graphs, and we show that several
natural classes of graphs are inductive $k$-independent
for small $k$. In particular, any intersection graph of
translates of a convex object in a two dimensional
plane is an inductive $3$-independent graph;
furthermore, any planar graph is an inductive
$3$-independent graph. For any fixed constant $k$, we
develop simple, polynomial time approximation
algorithms for inductive $k$-independent graphs with
respect to several well-studied NP-complete problems.
Our generalized formulation unifies and extends several
previously known results.",
acknowledgement = ack-nhfb,
articleno = "14",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Fischer:2012:QCT,
author = "Eldar Fischer and Oded Lachish and Arie Matsliah and
Ilan Newman and Orly Yahalom",
title = "On the query complexity of testing orientations for
being {Eulerian}",
journal = j-TALG,
volume = "8",
number = "2",
pages = "15:1--15:??",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2151171.2151178",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:57 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider testing directed graphs Eulerianity in the
orientation model introduced in Halevy et al. [2005].
Despite the local nature of the Eulerian property, it
turns out to be significantly harder to test than other
properties studied in the orientation model. We show a
nonconstant lower bound on the query complexity of
$2$-sided tests and a linear lower bound on the query
complexity of $1$-sided tests for this property. On the
positive side, we give several $1$-sided and $2$-sided
tests, including a sublinear query complexity $2$-sided
test, for general graphs. For special classes of
graphs, including bounded-degree graphs and expander
graphs, we provide improved results. In particular, we
give a $2$-sided test with constant query complexity
for dense graphs, as well as for expander graphs with a
constant expansion parameter.",
acknowledgement = ack-nhfb,
articleno = "15",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Fujito:2012:HTM,
author = "Toshihiro Fujito",
title = "How to trim a {MST}: a $2$-approximation algorithm for
minimum cost-tree cover",
journal = j-TALG,
volume = "8",
number = "2",
pages = "16:1--16:??",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2151171.2151179",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:57 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The minimum cost-tree cover problem is to compute a
minimum cost-tree {$T$} in a given connected graph
{$G$} with costs on the edges, such that the vertices
spanned by {$T$} form a vertex cover for {$G$}. The
problem is supposed to occur in applications of vertex
cover and in edge-dominating sets when additional
connectivity is required for solutions. Whereas a
linear-time {$2$}-approximation algorithm for the
unweighted case has been known for quite a while, the
best approximation ratio known for the weighted case is
{$3$}. Moreover, the {$3$}-approximation algorithms for
such cases are far from practical due to their
inefficiency. In this article we present a fast, purely
combinatorial $2$-approximation algorithm for the
minimum cost-tree cover problem. It constructs a good
approximate solution by trimming some leaves within a
minimum spanning tree (MST); and, to determine which
leaves to trim, it uses both the primal-dual schema and
an instance layering technique adapted from the local
ratio method.",
acknowledgement = ack-nhfb,
articleno = "16",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Manthey:2012:AMT,
author = "Bodo Manthey",
title = "On approximating multicriteria {TSP}",
journal = j-TALG,
volume = "8",
number = "2",
pages = "17:1--17:??",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2151171.2151180",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:57 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present approximation algorithms for almost all
variants of the multicriteria traveling salesman
problem (TSP). First, we devise randomized
approximation algorithms for multicriteria maximum
traveling salesman problems (Max-TSP). For
multicriteria Max-STSP where the edge weights have to
be symmetric, we devise an algorithm with an
approximation ratio of $ 2 / 3 - \epsilon $ . For
multicriteria Max-ATSP where the edge weights may be
asymmetric, we present an algorithm with a ratio of $ 1
/ 2 - \epsilon $. Our algorithms work for any fixed
number $k$ of objectives. Furthermore, we present a
deterministic algorithm for bicriteria Max-STSP that
achieves an approximation ratio of $ 7 / 27 $. Finally,
we present a randomized approximation algorithm for the
asymmetric multicriteria minimum TSP with triangle
inequality (Min-ATSP). This algorithm achieves a ratio
of $ \log n + \epsilon $.",
acknowledgement = ack-nhfb,
articleno = "17",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bjorklund:2012:TSP,
author = "Andreas Bj{\"o}rklund and Thore Husfeldt and Petteri
Kaski and Mikko Koivisto",
title = "The traveling salesman problem in bounded degree
graphs",
journal = j-TALG,
volume = "8",
number = "2",
pages = "18:1--18:??",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2151171.2151181",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:57 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We show that the traveling salesman problem in
bounded-degree graphs can be solved in time {$ O((2 -
\epsilon)^n) $}, where {$ \epsilon > 0 $} depends only
on the degree bound but not on the number of cities,
{$n$}. The algorithm is a variant of the classical
dynamic programming solution due to Bellman, and,
independently, Held and Karp. In the case of bounded
integer weights on the edges, we also give a
polynomial-space algorithm with running time {$ O((2 -
\epsilon)^n) $} on bounded-degree graphs. In addition,
we present an analogous analysis of Ryser's algorithm
for the permanent of matrices with a bounded number of
nonzero entries in each column.",
acknowledgement = ack-nhfb,
articleno = "18",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Krokhin:2012:HLW,
author = "Andrei Krokhin and D{\'a}niel Marx",
title = "On the hardness of losing weight",
journal = j-TALG,
volume = "8",
number = "2",
pages = "19:1--19:??",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2151171.2151182",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:57 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study the complexity of local search for the
Boolean constraint satisfaction problem (CSP), in the
following form: given a CSP instance, that is, a
collection of constraints, and a solution to it, the
question is whether there is a better (lighter, i.e.,
having strictly less Hamming weight) solution within a
given distance from the initial solution. We classify
the complexity, both classical and parameterized, of
such problems by a Schaefer-style dichotomy result,
that is, with a restricted set of allowed types of
constraints. Our results show that there is a
considerable amount of such problems that are NP-hard,
but fixed-parameter tractable when parameterized by the
distance.",
acknowledgement = ack-nhfb,
articleno = "19",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bateni:2012:APC,
author = "Mohammadhossein Bateni and Mohammadtaghi Hajiaghayi",
title = "Assignment problem in content distribution networks:
{Unsplittable} hard-capacitated facility location",
journal = j-TALG,
volume = "8",
number = "3",
pages = "20:1--20:??",
month = jul,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2229163.2229164",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:59 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In a Content Distribution Network (CDN), there are m
servers storing the data; each of them has a specific
bandwidth. All the requests from a particular client
should be assigned to one server because of the routing
protocol used. The goal is to minimize the total cost
of these assignments-cost of each is proportional to
the distance between the client and the server as well
as the request size-while the load on each server is
kept below its bandwidth limit. When each server also
has a setup cost, this is an unsplittable
hard-capacitated facility location problem. As much
attention as facility location problems have received,
there has been no nontrivial approximation algorithm
when we have hard capacities (i.e., there can only be
one copy of each facility whose capacity cannot be
violated) and demands are unsplittable (i.e., all the
demand from a client has to be assigned to a single
facility). We observe it is NP-hard to approximate the
cost to within any bounded factor in this case. Thus,
for an arbitrary constant $ \epsilon > 0 $, we relax
the capacities to a $ 1 + \epsilon $ factor. For the
case where capacities are almost uniform, we give a
bicriteria {$ O(\log n, 1 + \epsilon) $}-approximation
algorithm for general metrics and a {$ (1 + \epsilon, 1
+ \epsilon) $}-approximation algorithm for tree
metrics. A bicriteria {$ (\alpha, \beta)
$}-approximation algorithm produces a solution of cost
at most {$ \alpha $} times the optimum, while violating
the capacities by no more than a $ \beta $ factor. We
can get the same guarantees for nonuniform capacities
if we allow quasipolynomial running time. In our
algorithm, some clients guess the facility they are
assigned to, and facilities decide the size of the
clients they serve. A straightforward approach results
in exponential running time. When costs do not satisfy
metricity, we show that a 1.5 violation of capacities
is necessary to obtain any approximation. It is worth
noting that our results generalize bin packing (zero
connection costs and facility costs equal to one),
knapsack (single facility with all costs being zero),
minimum makespan scheduling for related machines (all
connection costs being zero), and some facility
location problems.",
acknowledgement = ack-nhfb,
articleno = "20",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Panconesi:2012:EPS,
author = "Alessandro Panconesi and Jaikumar Radhakrishnan",
title = "Expansion properties of (secure) wireless networks",
journal = j-TALG,
volume = "8",
number = "3",
pages = "21:1--21:??",
month = jul,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2229163.2229165",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:59 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We show that some topologies arising naturally in the
context of wireless networking are low-degree, expander
graphs.",
acknowledgement = ack-nhfb,
articleno = "21",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Meyer:2012:ESP,
author = "Ulrich Meyer and Norbert Zeh",
title = "{I/O}-efficient shortest path algorithms for
undirected graphs with random or bounded edge lengths",
journal = j-TALG,
volume = "8",
number = "3",
pages = "22:1--22:??",
month = jul,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2229163.2229166",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:59 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We present I/O-efficient single-source shortest path
algorithms for undirected graphs. Our main result is an
algorithm with I/O complexity {$ O(\sqrt (n m \log L) /
B + {\rm MST}(n, m)) $} on graphs with {$n$} vertices,
{$m$} edges, and arbitrary edge lengths between {$1$}
and {$L$}; {$ {\rm MST}(n, m) $} denotes the I/O
complexity of computing a minimum spanning tree; {$B$}
denotes the disk block size. If the edge lengths are
drawn uniformly at random from $ (0, 1] $, the expected
I/O complexity of the algorithm is $ O(\sqrt n m / B +
(m / B) \log B + {\rm MST}(n, m)) $. A simpler
algorithm has expected I/O complexity $ O(\sqrt (n m
\log B) / B + {\rm MST}(n, m)) $ for uniformly random
edge lengths.",
acknowledgement = ack-nhfb,
articleno = "22",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Chekuri:2012:IAO,
author = "Chandra Chekuri and Nitish Korula and Martin P{\'a}l",
title = "Improved algorithms for orienteering and related
problems",
journal = j-TALG,
volume = "8",
number = "3",
pages = "23:1--23:??",
month = jul,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2229163.2229167",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:59 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article, we consider the orienteering problem
in undirected and directed graphs and obtain improved
approximation algorithms. The point to
point-orienteering problem is the following: Given an
edge-weighted graph {$ G = (V, E) $} (directed or
undirected), two nodes {$ s, t \in V $} and a time
limit {$B$}, find an {$s$}--{$t$} walk in {$G$} of
total length at most {$B$} that maximizes the number of
distinct nodes visited by the walk. This problem is
closely related to tour problems such as TSP as well as
network design problems such as {$k$}-MST. Orienteering
with time-windows is the more general problem in which
each node {$v$} has a specified time-window {$ [R(v),
D(v)] $} and a node {$v$} is counted as visited by the
walk only if {$v$} is visited during its time-window.
We design new and improved algorithms for the
orienteering problem and orienteering with
time-windows. Our main results are the following: --- A
{$ (2 + \epsilon) $} approximation for orienteering in
undirected graphs, improving upon the $3$-approximation
of Bansal et al. [2004]. --- An {$ O(\log^2 {\rm OPT})
$} approximation for orienteering in directed graphs,
where {$ {\rm OPT} \leq n $} is the number of vertices
visited by an optimal solution. Previously, only a
quasipolynomial-time algorithm due to Chekuri and
P{\'a}l [2005] achieved a polylogarithmic approximation
(a ratio of {$ O(\log {\rm OPT}) $}). --- Given an {$
\alpha $} approximation for orienteering, we show an {$
O(\alpha c \{ {\rm maxlog} {\rm OPT}, \log l_{\rm max}
/ l_{\rm min} \}) $} approximation for orienteering
with time-windows, where {$ l_{\rm max} $} and {$
l_{\rm min} $} are the lengths of the longest and
shortest time-windows respectively.",
acknowledgement = ack-nhfb,
articleno = "23",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Asadpour:2012:SCM,
author = "Arash Asadpour and Uriel Feige and Amin Saberi",
title = "{Santa Claus} meets hypergraph matchings",
journal = j-TALG,
volume = "8",
number = "3",
pages = "24:1--24:??",
month = jul,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2229163.2229168",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:59 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the restricted assignment version of the
problem of max-min fair allocation of indivisible
goods, also known as the Santa Claus problem. There are
$m$ items and $n$ players. Every item has some
nonnegative value, and every player is interested in
only some of the items. The goal is to distribute the
items to the players in a way that maximizes the
minimum of the sum of the values of the items given to
any player. It was previously shown via a
nonconstructive proof that uses the Lov{\'a}sz local
lemma that the integrality gap of a certain
configuration LP for the problem is no worse than some
(unspecified) constant. This gives a polynomial-time
algorithm to estimate the optimum value of the problem
within a constant factor, but does not provide a
polynomial-time algorithm for finding a corresponding
allocation. We use a different approach to analyze the
integrality gap. Our approach is based upon local
search techniques for finding perfect matchings in
certain classes of hypergraphs. As a result, we prove
that the integrality gap of the configuration LP is no
worse than $ 1 / 4 $. Our proof provides a local search
algorithm which finds the corresponding allocation, but
is nonconstructive in the sense that this algorithm is
not known to converge to a local optimum in a
polynomial number of steps.",
acknowledgement = ack-nhfb,
articleno = "24",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Fanelli:2012:SCC,
author = "Angelo Fanelli and Michele Flammini and Luca
Moscardelli",
title = "The speed of convergence in congestion games under
best-response dynamics",
journal = j-TALG,
volume = "8",
number = "3",
pages = "25:1--25:??",
month = jul,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2229163.2229169",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:59 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We investigate the speed of convergence of best
response dynamics to approximately optimal solutions in
congestion games with linear delay functions. In
Ackermann et al. [2008] it has been shown that the
convergence time of such dynamics to Nash equilibrium
may be exponential in the number of players $n$.
Motivated by such a negative result, we focus on the
study of the states (not necessarily being equilibria)
reached after a limited number of players' selfish
moves, and we show that {$ \Theta (n \log \log n) $}
best responses are necessary and sufficient to achieve
states that approximate the optimal solution by a
constant factor, under the assumption that every {$
O(n) $} steps each player performs a constant (and
nonnull) number of best responses. We show that such
result is tight also for the simplest case of singleton
congestion games.",
acknowledgement = ack-nhfb,
articleno = "25",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Baptiste:2012:PTA,
author = "Philippe Baptiste and Marek Chrobak and Christoph
D{\"u}rr",
title = "Polynomial-time algorithms for minimum energy
scheduling",
journal = j-TALG,
volume = "8",
number = "3",
pages = "26:1--26:??",
month = jul,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2229163.2229170",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:59 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The aim of power management policies is to reduce the
amount of energy consumed by computer systems while
maintaining a satisfactory level of performance. One
common method for saving energy is to simply suspend
the system during idle times. No energy is consumed in
the suspend mode. However, the process of waking up the
system itself requires a certain fixed amount of
energy, and thus suspending the system is beneficial
only if the idle time is long enough to compensate for
this additional energy expenditure. In the specific
problem studied in the article, we have a set of jobs
with release times and deadlines that need to be
executed on a single processor. Preemptions are
allowed. The processor requires energy $L$ to be woken
up and, when it is on, it uses one unit of energy per
one unit of time. It has been an open problem whether a
schedule minimizing the overall energy consumption can
be computed in polynomial time. We solve this problem
in positive, by providing an {$ O(n^5) $}-time
algorithm. In addition we provide an {$ O(n^4) $}-time
algorithm for computing the minimum energy schedule
when all jobs have unit length.",
acknowledgement = ack-nhfb,
articleno = "26",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Diedrich:2012:TAA,
author = "Florian Diedrich and Klaus Jansen and Lars Pr{\"a}del
and Ulrich M. Schwarz and Ola Svensson",
title = "Tight approximation algorithms for scheduling with
fixed jobs and nonavailability",
journal = j-TALG,
volume = "8",
number = "3",
pages = "27:1--27:??",
month = jul,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2229163.2229171",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:59 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We study two closely related problems in nonpreemptive
scheduling of jobs on identical parallel machines. In
these two settings there are either fixed jobs or
nonavailability intervals during which the machines are
not available; in both cases, the objective is to
minimize the makespan. Both formulations have different
applications, for example, in turnaround scheduling or
overlay computing. For both problems we contribute
approximation algorithms with an improved ratio of $ 3
/ 2 $. For scheduling with fixed jobs, a lower bound of
$ 3 / 2 $ on the approximation ratio has been obtained
by Scharbrodt et al. [1999]; for scheduling with
nonavailability we provide the same lower bound. We use
dual approximation, creation of a gap structure, and a
PTAS for the multiple subset sum problem, combined with
a postprocessing step to assign large jobs.",
acknowledgement = ack-nhfb,
articleno = "27",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Edmonds:2012:SSP,
author = "Jeff Edmonds and Kirk Pruhs",
title = "Scalably scheduling processes with arbitrary speedup
curves",
journal = j-TALG,
volume = "8",
number = "3",
pages = "28:1--28:??",
month = jul,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2229163.2229172",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:59 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We give a scalable ($ (1 + \epsilon) $-speed {$ O(1)
$}-competitive) nonclairvoyant algorithm for scheduling
jobs with sublinear nondecreasing speedup curves on
multiple processors with the objective of average
response time.",
acknowledgement = ack-nhfb,
articleno = "28",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Collette:2012:ETP,
author = "S{\'e}bastien Collette and Vida Dujmovi{\'c} and John
Iacono and Stefan Langerman and Pat Morin",
title = "Entropy, triangulation, and point location in planar
subdivisions",
journal = j-TALG,
volume = "8",
number = "3",
pages = "29:1--29:??",
month = jul,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2229163.2229173",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:59 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A data structure is presented for point location in
connected planar subdivisions when the distribution of
queries is known in advance. The data structure has an
expected query time that is within a constant factor of
optimal. More specifically, an algorithm is presented
that preprocesses a connected planar subdivision {$G$}
of size {$n$} and a query distribution {$D$} to produce
a point location data structure for {$G$}. The expected
number of point-line comparisons performed by this data
structure, when the queries are distributed according
to {$D$}, is {$ \tilde {H} + O(\tilde {H}^{1 / 2} + 1)
$} where {$ \tilde {H} = \tilde {H}(G, D) $} is a lower
bound on the expected number of point-line comparisons
performed by any linear decision tree for point
location in {$G$} under the query distribution {$D$}.
The preprocessing algorithm runs in {$ O(n \log n) $}
time and produces a data structure of size {$ O(n) $}.
These results are obtained by creating a Steiner
triangulation of {$G$} that has near-minimum entropy.",
acknowledgement = ack-nhfb,
articleno = "29",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Damerow:2012:SAL,
author = "Valentina Damerow and Bodo Manthey and Friedhelm
{Meyer Auf Der Heide} and Harald R{\"a}cke and
Christian Scheideler and Christian Sohler and Till
Tantau",
title = "Smoothed analysis of left-to-right maxima with
applications",
journal = j-TALG,
volume = "8",
number = "3",
pages = "30:1--30:??",
month = jul,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2229163.2229174",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:59 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A left-to-right maximum in a sequence of $n$ numbers $
s_1 $, \ldots {}, $ s_n $ is a number that is strictly
larger than all preceding numbers. In this article we
present a smoothed analysis of the number of
left-to-right maxima in the presence of additive random
noise. We show that for every sequence of $n$ numbers $
s_i \in [0, 1] $ that are perturbed by uniform noise
from the interval $ [ - \epsilon, \epsilon] $, the
expected number of left-to-right maxima is {$ \Theta
(\sqrt n / \epsilon + \log n) $} for {$ \epsilon > 1 /
n $}. For Gaussian noise with standard deviation {$
\sigma $} we obtain a bound of {$ O((\log^{3 / 2} n) /
\sigma + \log n) $}. We apply our results to the
analysis of the smoothed height of binary search trees
and the smoothed number of comparisons in the quicksort
algorithm and prove bounds of {$ \Theta (\sqrt n /
\epsilon + \log n) $} and {$ \Theta (n / \epsilon + 1
\sqrt n / \epsilon + n \log n) $}, respectively, for
uniform random noise from the interval {$ [ - \epsilon,
\epsilon] $}. Our results can also be applied to bound
the smoothed number of points on a convex hull of
points in the two-dimensional plane and to smoothed
motion complexity, a concept we describe in this
article. We bound how often one needs to update a data
structure storing the smallest axis-aligned box
enclosing a set of points moving in d -dimensional
space.",
acknowledgement = ack-nhfb,
articleno = "30",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bassino:2012:COF,
author = "Fr{\'e}d{\'e}rique Bassino and Julien Cl{\'e}ment and
Pierre Nicod{\`e}me",
title = "Counting occurrences for a finite set of words:
combinatorial methods",
journal = j-TALG,
volume = "8",
number = "3",
pages = "31:1--31:??",
month = jul,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2229163.2229175",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:59 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article, we provide the multivariate
generating function counting texts according to their
length and to the number of occurrences of words from a
finite set. The application of the inclusion-exclusion
principle to word counting due to Goulden and Jackson
[1979, 1983] is used to derive the result. Unlike some
other techniques which suppose that the set of words is
reduced (i.e., where no two words are factor of one
another), the finite set can be chosen arbitrarily.
Noonan and Zeilberger [1999] already provided a Maple
package treating the nonreduced case, without giving an
expression of the generating function or a detailed
proof. We provide a complete proof validating the use
of the inclusion-exclusion principle. Some formul{\ae}
for expected values, variance, and covariance for
number of occurrences when considering two arbitrary
sets of finite words are given as an application of our
methodology.",
acknowledgement = ack-nhfb,
articleno = "31",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Arvind:2012:TNG,
author = "V. Arvind and Piyush P. Kurur",
title = "Testing nilpotence of {Galois} groups in polynomial
time",
journal = j-TALG,
volume = "8",
number = "3",
pages = "32:1--32:??",
month = jul,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2229163.2229176",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:09:59 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We give the first polynomial-time algorithm for
checking whether the Galois group {$ {\rm Gal}(f) $} of
an input polynomial {$ f(X) \in Q[X] $} is nilpotent:
the running time of our algorithm is bounded by a
polynomial in the size of the coefficients of {$f$} and
the degree of {$f$}. Additionally, we give a
deterministic polynomial-time algorithm that, when
given as input a polynomial {$ f(X) \in Q[X] $} with
nilpotent Galois group, computes for each prime factor
{$p$} of {$ \# {\rm Gal}(f) $}, a polynomial {$ g_p(X)
\in Q[X] $} whose Galois group of is the {$p$}-Sylow
subgroup of {$ {\rm Gal}(f) $}.",
acknowledgement = ack-nhfb,
articleno = "32",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Roditty:2012:RPS,
author = "Liam Roditty and Uri Zwick",
title = "Replacement paths and $k$ simple shortest paths in
unweighted directed graphs",
journal = j-TALG,
volume = "8",
number = "4",
pages = "33:1--33:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2344422.2344423",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:02 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Let {$ G = (V, E) $} be a directed graph and let {$P$}
be a shortest path from {$s$} to {$t$} in {$G$}. In the
replacement paths problem, we are required to find, for
every edge {$e$} on {$P$}, a shortest path from {$s$}
to {$t$} in {$G$} that avoids {$e$}. The only known
algorithm for solving the problem, even for unweighted
directed graphs, is the trivial algorithm in which each
edge on the path, in its turn, is excluded from the
graph and a shortest paths tree is computed from {$s$}.
The running time is {$ O(m n + n^2 \log n) $}. The
replacement paths problem is strongly motivated by two
different applications: (1) The fastest algorithm to
compute the {$k$} simple shortest paths between {$s$}
and {$t$} in directed graphs [Yen 1971; Lawler 1972]
computes the replacement paths between $s$ and $t$. Its
running time is {$ \tilde {O}(m n k) $}. (2) The
replacement paths problem is used to compute the
Vickrey pricing of edges in a distributed network. It
was raised as an open problem by Nisan and Ronen [2001]
whether it is possible to compute the Vickrey pricing
faster than {$n$} computations of a shortest paths
tree. In this article we present the first nontrivial
algorithm for computing replacement paths in unweighted
directed graphs (and in graphs with small integer
weights). Our algorithm is Monte-Carlo and its running
time is {$ \tilde {O}(m \sqrt n) $}. This result
immediately improves the running time of the two
applications mentioned above in a factor of {$ \sqrt n
$}. We also show how to reduce the problem of computing
{$k$} simple shortest paths between {$s$} and $t$ to {$
O(k) $} computations of a second simple shortest path
from {$s$} to {$t$} each time in a different subgraph
of {$G$}. The importance of this result is that
computing a second simple shortest path may turn out to
be an easier problem than computing the replacement
paths, thus, we can focus our efforts to improve the k
simple shortest paths algorithm in obtaining a faster
algorithm for the second shortest path problem.",
acknowledgement = ack-nhfb,
articleno = "33",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Chan:2012:APS,
author = "Timothy M. Chan",
title = "All-pairs shortest paths for unweighted undirected
graphs in $ o(m n) $ time",
journal = j-TALG,
volume = "8",
number = "4",
pages = "34:1--34:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2344422.2344424",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:02 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We revisit the all-pairs-shortest-paths problem for an
unweighted undirected graph with $n$ vertices and $m$
edges. We present new algorithms with the following
running times: {$ O(m n / \log n) $} if {$ m > n \log n
\log \log \log n O(m n \log \log n / \log n) $} if {$ m
> n \log \log n O(n^2 \log^2 \log n / \log n) $} if {$
m \leq n \log \log n $}. These represent the best time
bounds known for the problem for all {$ m \ll n^{1.376}
$} . We also obtain a similar type of result for the
diameter problem for unweighted directed graphs.",
acknowledgement = ack-nhfb,
articleno = "34",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Baswana:2012:FDR,
author = "Surender Baswana and Sumeet Khurana and Soumojit
Sarkar",
title = "Fully dynamic randomized algorithms for graph
spanners",
journal = j-TALG,
volume = "8",
number = "4",
pages = "35:1--35:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2344422.2344425",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:02 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Spanner of an undirected graph {$ G = (V, E) $} is a
subgraph that is sparse and yet preserves all-pairs
distances approximately. More formally, a spanner with
stretch {$ t \in N $} is a subgraph {$ (V, E_S) $},
{E$_S \subseteq E$} such that the distance between any
two vertices in the subgraph is at most {$t$} times
their distance in {$G$}. Though {$G$} is trivially a
{$t$}-spanner of itself, the research as well as
applications of spanners invariably deal with a
{$t$}-spanner that has as small number of edges as
possible. We present fully dynamic algorithms for
maintaining spanners in centralized as well as
synchronized distributed environments. These algorithms
are designed for undirected unweighted graphs and use
randomization in a crucial manner. Our algorithms
significantly improve the existing fully dynamic
algorithms for graph spanners. The expected size
(number of edges) of a {$t$}-spanner maintained at each
stage by our algorithms matches, up to a
polylogarithmic factor, the worst case optimal size of
a $t$-spanner. The expected amortized time (or messages
communicated in distributed environment) to process a
single insertion\slash deletion of an edge by our
algorithms is close to optimal.",
acknowledgement = ack-nhfb,
articleno = "35",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Swamy:2012:ESS,
author = "Chaitanya Swamy",
title = "The effectiveness of {Stackelberg} strategies and
tolls for network congestion games",
journal = j-TALG,
volume = "8",
number = "4",
pages = "36:1--36:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2344422.2344426",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:02 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "It is well known that in a network with arbitrary
(convex) latency functions that are a function of edge
traffic, the worst-case ratio, over all inputs, of the
system delay caused due to selfish behavior versus the
system delay of the optimal centralized solution may be
unbounded even if the system consists of only two
parallel links. This ratio is called the price of
anarchy (PoA). In this article, we investigate ways by
which one can reduce the performance degradation due to
selfish behavior. We investigate two primary methods
(a) Stackelberg routing strategies, where a central
authority, for example, network manager, controls a
fixed fraction of the flow, and can route this flow in
any desired way so as to influence the flow of selfish
users; and (b) network tolls, where tolls are imposed
on the edges to modify the latencies of the edges, and
thereby influence the induced Nash equilibrium. We
obtain results demonstrating the effectiveness of both
Stackelberg strategies and tolls in controlling the
price of anarchy. For Stackelberg strategies, we obtain
the first results for nonatomic routing in graphs more
general than parallel-link graphs, and strengthen
existing results for parallel-link graphs. (i) In
series-parallel graphs, we show that Stackelberg
routing reduces the PoA to a constant (depending on the
fraction of flow controlled). (ii) For general graphs,
we obtain latency-class specific bounds on the PoA with
Stackelberg routing, which give a continuous trade-off
between the fraction of flow controlled and the price
of anarchy. (iii) In parallel-link graphs, we show that
for any given class L of latency functions, Stackelberg
routing reduces the PoA to at most {$ \alpha + (1 -
\alpha) c \rho (L) $}, where {$ \alpha $} is the
fraction of flow controlled and {$ \rho (L) $} is the
PoA of class {$L$} (when {$ \alpha = 0 $}). For network
tolls, motivated by the known strong results for
nonatomic games, we consider the more general setting
of atomic splittable routing games. We show that tolls
inducing an optimal flow always exist, even for general
asymmetric games with heterogeneous users, and can be
computed efficiently by solving a convex program. This
resolves a basic open question about the effectiveness
of tolls for atomic splittable games. Furthermore, we
give a complete characterization of flows that can be
induced via tolls.",
acknowledgement = ack-nhfb,
articleno = "36",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Czyzowicz:2012:HMA,
author = "Jurek Czyzowicz and Andrzej Pelc and Arnaud Labourel",
title = "How to meet asynchronously (almost) everywhere",
journal = j-TALG,
volume = "8",
number = "4",
pages = "37:1--37:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2344422.2344427",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:02 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Two mobile agents (robots) with distinct labels have
to meet in an arbitrary, possibly infinite, unknown
connected graph or in an unknown connected terrain in
the plane. Agents are modeled as points, and the route
of each of them only depends on its label and on the
unknown environment. The actual walk of each agent also
depends on an asynchronous adversary that may
arbitrarily vary the speed of the agent, stop it, or
even move it back and forth, as long as the walk of the
agent is continuous, does not leave its route and
covers all of it. Meeting in a graph means that both
agents must be at the same time in some node or in some
point inside an edge of the graph, while meeting in a
terrain means that both agents must be at the same time
in some point of the terrain. Does there exist a
deterministic algorithm that allows any two agents to
meet in any unknown environment in spite of this very
powerful adversary? We give deterministic rendezvous
algorithms for agents starting at arbitrary nodes of
any anonymous connected graph (finite or infinite) and
for agents starting at any interior points with
rational coordinates in any closed region of the plane
with path-connected interior. In the geometric scenario
agents may have different compasses and different units
of length. While our algorithms work in a very general
setting --- agents can, indeed, meet almost everywhere
--- we show that none of these few limitations imposed
on the environment can be removed. On the other hand,
our algorithm also guarantees the following approximate
rendezvous for agents starting at arbitrary interior
points of a terrain as previously stated agents will
eventually get to within an arbitrarily small positive
distance from each other.",
acknowledgement = ack-nhfb,
articleno = "37",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Binkele-Raible:2012:KPN,
author = "Daniel Binkele-Raible and Henning Fernau and Fedor V.
Fomin and Daniel Lokshtanov and Saket Saurabh and Yngve
Villanger",
title = "Kernel(s) for problems with no kernel: On out-trees
with many leaves",
journal = j-TALG,
volume = "8",
number = "4",
pages = "38:1--38:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2344422.2344428",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:02 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The $k$-Leaf Out-Branching problem is to find an
out-branching, that is a rooted oriented spanning tree,
with at least k leaves in a given digraph. The problem
has recently received much attention from the viewpoint
of parameterized algorithms. Here, we take a
kernelization based approach to the
$k$-Leaf-Out-Branching problem. We give the first
polynomial kernel for Rooted $k$-Leaf-Out-Branching, a
variant of $k$-Leaf-Out-Branching where the root of the
tree searched for is also a part of the input. Our
kernel with O(k$^3$) vertices is obtained using
extremal combinatorics. For the $k$-Leaf-Out-Branching
problem, we show that no polynomial-sized kernel is
possible unless coNP is in NP/poly. However, our
positive results for Rooted $k$-Leaf-Out-Branching
immediately imply that the seemingly intractable k
Leaf-Out-Branching problem admits a data reduction to
$n$ independent polynomial-sized kernels. These two
results, tractability and intractability side by side,
are the first ones separating Karp kernelization from
Turing kernelization. This answers affirmatively an
open problem regarding ``cheat kernelization'' raised
by Mike Fellows and Jiong Guo independently.",
acknowledgement = ack-nhfb,
articleno = "38",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Im:2012:OSA,
author = "Sungjin Im and Benjamin Moseley",
title = "An online scalable algorithm for average flow time in
broadcast scheduling",
journal = j-TALG,
volume = "8",
number = "4",
pages = "39:1--39:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2344422.2344429",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:02 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article, the online pull-based broadcast model
is considered. In this model, there are $n$ pages of
data stored at a server and requests arrive for pages
online. When the server broadcasts page p, all
outstanding requests for the same page p are
simultaneously satisfied. We consider the problem of
minimizing average (total) flow time online where all
pages are unit-sized. For this problem, there has been
a decade-long search for an online algorithm which is
scalable, that is, $ (1 + \epsilon) $-speed {$ O(1)
$}-competitive for any fixed {$ \epsilon > 0 $}. In
this article, we give the first analysis of an online
scalable algorithm.",
acknowledgement = ack-nhfb,
articleno = "39",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Karakostas:2012:FMT,
author = "George Karakostas and Stavros G. Kolliopoulos and Jing
Wang",
title = "An {FPTAS} for the minimum total weighted tardiness
problem with a fixed number of distinct due dates",
journal = j-TALG,
volume = "8",
number = "4",
pages = "40:1--40:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2344422.2344430",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:02 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Given a sequencing of jobs on a single machine, each
one with a weight, processing time, and a due date, the
tardiness of a job is the time needed for its
completion beyond its due date. We present an FPTAS for
the basic scheduling problem of minimizing the total
weighted tardiness when the number of distinct due
dates is fixed. Previously, an FPTAS was known only for
the case where all jobs have a common due date.",
acknowledgement = ack-nhfb,
articleno = "40",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Deshpande:2012:PPF,
author = "Amol Deshpande and Lisa Hellerstein",
title = "Parallel pipelined filter ordering with precedence
constraints",
journal = j-TALG,
volume = "8",
number = "4",
pages = "41:1--41:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2344422.2344431",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:02 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In the parallel pipelined filter ordering problem, we
are given a set of $n$ filters that run in parallel.
The filters need to be applied to a stream of elements,
to determine which elements pass all filters. Each
filter has a rate limit r$_i$ on the number of elements
it can process per unit time, and a selectivity p$_i$,
which is the probability that a random element will
pass the filter. The goal is to maximize throughput.
This problem appears naturally in a variety of
settings, including parallel query optimization in
databases and query processing over Web services. We
present an O(n$^3$) algorithm for this problem, given
tree-structured precedence constraints on the filters.
This extends work of Condon et al. [2009] and Kodialam
[2001], who presented algorithms for solving the
problem without precedence constraints. Our algorithm
is combinatorial and produces a sparse solution.
Motivated by join operators in database queries, we
also give algorithms for versions of the problem in
which ``filter'' selectivities may be greater than or
equal to 1. We prove a strong connection between the
more classical problem of minimizing total work in
sequential filter ordering (A), and the parallel
pipelined filter ordering problem (B). More precisely,
we prove that A is solvable in polynomial time for a
given class of precedence constraints if and only if B
is as well. This equivalence allows us to show that B
is NP-Hard in the presence of arbitrary precedence
constraints (since A is known to be NP-Hard in that
setting).",
acknowledgement = ack-nhfb,
articleno = "41",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{He:2012:SOT,
author = "Meng He and J. Ian Munro and Srinivasa Rao Satti",
title = "Succinct ordinal trees based on tree covering",
journal = j-TALG,
volume = "8",
number = "4",
pages = "42:1--42:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2344422.2344432",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:02 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Various methods have been used to represent a tree on
$n$ nodes in essentially the information-theoretic
minimum space while supporting various navigational
operations in constant time, but different
representations usually support different operations.
Our main contribution is a succinct representation of
ordinal trees, based on that of Geary et al. [2006],
that supports all the navigational operations supported
by various succinct tree representations while
requiring only 2 n + o (n) bits. It also supports
efficient level-order traversal, a useful ordering
previously supported only with a very limited set of
operations. Our second contribution expands on the
notion of a single succinct representation supporting
more than one traversal ordering, by showing that our
method supports two other encoding schemes as abstract
data types. In particular, it supports extracting a
word ({$ O(\lg n) $} bits) of the balanced parenthesis
sequence or depth first unary degree sequence in {$ O(f
(n)) $} time, using at most {$ n / f (n) + o (n) $}
additional bits, for any {$ f(n) $} in {$ O(\lg n) $}
and {$ \Omega (1) $}.",
acknowledgement = ack-nhfb,
articleno = "42",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Agarwal:2012:RSU,
author = "Pankaj K. Agarwal and Siu-Wing Cheng and Ke Yi",
title = "Range searching on uncertain data",
journal = j-TALG,
volume = "8",
number = "4",
pages = "43:1--43:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2344422.2344433",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:02 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Querying uncertain data has emerged as an important
problem in data management due to the imprecise nature
of many measurement data. In this article, we study
answering range queries over uncertain data.
Specifically, we are given a collection {$P$} of {$n$}
uncertain points in {$R$}, each represented by its
one-dimensional probability density function (pdf). The
goal is to build a data structure on {$P$} such that,
given a query interval {$I$} and a probability
threshold {$ \tau $}, we can quickly report all points
of {$P$} that lie in {$I$} with probability at least {$
\tau $}. We present various structures with linear or
near-linear space and (poly)logarithmic query time. Our
structures support pdf's that are either histograms or
more complex ones such as Gaussian or piecewise
algebraic.",
acknowledgement = ack-nhfb,
articleno = "43",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Andoni:2012:SCE,
author = "Alexandr Andoni and Robert Krauthgamer",
title = "The smoothed complexity of edit distance",
journal = j-TALG,
volume = "8",
number = "4",
pages = "44:1--44:??",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2344422.2344434",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:02 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We initiate the study of the smoothed complexity of
sequence alignment, by proposing a semi-random model of
edit distance between two input strings, generated as
follows: First, an adversary chooses two binary strings
of length d and a longest common subsequence A of them.
Then, every character is perturbed independently with
probability p, except that A is perturbed in exactly
the same way inside the two strings. We design two
efficient algorithms that compute the edit distance on
smoothed instances up to a constant factor
approximation. The first algorithm runs in near-linear
time, namely d$^{{1 + \epsilon }}$ for any fixed $
\epsilon > 0 $. The second one runs in time sublinear
in $d$, assuming the edit distance is not too small.
These approximation and runtime guarantees are
significantly better than the bounds that were known
for worst-case inputs. Our technical contribution is
twofold. First, we rely on finding matches between
substrings in the two strings, where two substrings are
considered a match if their edit distance is relatively
small, a prevailing technique in commonly used
heuristics, such as PatternHunter of Ma et al. [2002].
Second, we effectively reduce the smoothed edit
distance to a simpler variant of (worst-case) edit
distance, namely, edit distance on permutations (a.k.a.
Ulam's metric). We are thus able to build on algorithms
developed for the Ulam metric, whose much better
algorithmic guarantees usually do not carry over to
general edit distance.",
acknowledgement = ack-nhfb,
articleno = "44",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Nutov:2012:AMC,
author = "Zeev Nutov",
title = "Approximating minimum-cost connectivity problems via
uncrossable bifamilies",
journal = j-TALG,
volume = "9",
number = "1",
pages = "1:1--1:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2390176.2390177",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:04 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We give approximation algorithms for the Survivable
Network problem. The input consists of a graph $ G =
(V, E) $ with edge/node-costs, a node subset $ S
\subseteq V $, and connectivity requirements $ \{ r(s,
t) : s, t \in T \subseteq V \} $. The goal is to find a
minimum cost subgraph $H$ of $G$ that for all $ s, t
\in T $ contains $ r(s, t) $ pairwise edge-disjoint $ s
t $-paths such that no two of them have a node in $ S
\{ s, t \} $ in common. Three extensively studied
particular cases are: Edge-Connectivity Survivable
Network ($ S = \oslash $), Node-Connectivity Survivable
Network ($ S = V $), and Element-Connectivity
Survivable Network ($ r(s, t) = 0 $ whenever $ s \in S
$ or $ t \in S $). Let $ k = \max \{_{s, t \in T} \}
r(s, t) $. In Rooted Survivable Network, there is $ s
\in T $ such that $ r(u, t) = 0 $ for all $ u \neq s $,
and in the Subset $k$-Connected Subgraph problem $ r(s,
t) = k $ for all $ s, t \in T $. For edge-costs, our
ratios are $ O(k \log k) $ for Rooted Survivable
Network and $ O(k^2 \log k) $ for Subset $k$-Connected
Subgraph. This improves the previous ratio $ O(k^2 \log
n) $, and for constant values of $k$ settles the
approximability of these problems to a constant. For
node-costs, our ratios are as follows. --- $ O(k \log |
T |) $ for Element-Connectivity Survivable Network,
matching the best known ratio for Edge-Connectivity
Survivable Network. --- $ O(k^2 \log | T |) $ for
Rooted Survivable Network and $ O(k^3 \log | T |) $ for
Subset $k$-Connected Subgraph, improving the ratio $
O(k^8 \log^2 | T |) $. --- $ O(k^4 \log^2 | T |) $ for
Survivable Network; this is the first nontrivial
approximation algorithm for the node-costs version of
the problem.",
acknowledgement = ack-nhfb,
articleno = "1",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Hajiaghayi:2012:PCS,
author = "Mohammadtaghi Hajiaghayi and Rohit Khandekar and Guy
Kortsarz and Zeev Nutov",
title = "Prize-collecting {Steiner} network problems",
journal = j-TALG,
volume = "9",
number = "1",
pages = "2:1--2:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2390176.2390178",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:04 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In the Steiner Network problem, we are given a graph
{$G$} with edge-costs and connectivity requirements {$
r_{u v} $} between node pairs {$ u, v $}. The goal is
to find a minimum-cost subgraph {$H$} of {$G$} that
contains {$ r_{uv} $} edge-disjoint paths for all {$ u,
v \in V $}. In Prize-Collecting Steiner Network
problems, we do not need to satisfy all requirements,
but are given a penalty function for violating the
connectivity requirements, and the goal is to find a
subgraph {$H$} that minimizes the cost plus the
penalty. The case when {$ r_{uv} \in \{ 0, 1 \} $} is
the classic Prize-Collecting Steiner Forest problem. In
this article, we present a novel linear programming
relaxation for the Prize-Collecting Steiner Network
problem, and by rounding it, obtain the first
constant-factor approximation algorithm for submodular
and monotone nondecreasing penalty functions. In
particular, our setting includes all-or-nothing penalty
functions, which charge the penalty even if the
connectivity requirement is slightly violated; this
resolves an open question posed by Nagarajan et al.
[2008]. We further generalize our results for
element-connectivity and node-connectivity.",
acknowledgement = ack-nhfb,
articleno = "2",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Awerbuch:2012:DAM,
author = "Baruch Awerbuch and Rohit Khandekar and Satish Rao",
title = "Distributed algorithms for multicommodity flow
problems via approximate steepest descent framework",
journal = j-TALG,
volume = "9",
number = "1",
pages = "3:1--3:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2390176.2390179",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:04 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider solutions for distributed multicommodity
flow problems, which are solved by multiple agents
operating in a cooperative but uncoordinated manner. We
show first distributed solutions that allow $ (1 +
\epsilon) $ approximation and whose convergence time is
essentially linear in the maximal path length, and is
independent of the number of commodities and the size
of the graph. Our algorithms use a very natural
approximate steepest descent framework, combined with a
blocking flow technique to speed up the convergence in
distributed and parallel environment. Previously known
solutions that achieved comparable convergence time and
approximation ratio required exponential computational
and space overhead per agent.",
acknowledgement = ack-nhfb,
articleno = "3",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Chen:2012:CRS,
author = "Wei Chen and Christian Sommer and Shang-Hua Teng and
Yajun Wang",
title = "A compact routing scheme and approximate distance
oracle for power-law graphs",
journal = j-TALG,
volume = "9",
number = "1",
pages = "4:1--4:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2390176.2390180",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:04 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Compact routing addresses the tradeoff between table
sizes and stretch, which is the worst-case ratio
between the length of the path a packet is routed
through by the scheme and the length of an actual
shortest path from source to destination. We adapt the
compact routing scheme by Thorup and Zwick [2001] to
optimize it for power-law graphs. We analyze our
adapted routing scheme based on the theory of
unweighted random power-law graphs with fixed expected
degree sequence by Aiello et al. [2000]. Our result is
the first analytical bound coupled to the parameter of
the power-law graph model for a compact routing scheme.
Let $n$ denote the number of nodes in the network. We
provide a labeled routing scheme that, after a
stretch--5 handshaking step (similar to DNS lookup in
TCP/IP), routes messages along stretch--3 paths. We
prove that, instead of routing tables with {$ \tilde
{O}(n^{1 / 2}) $} bits ({$ \tilde {O} $} suppresses
factors logarithmic in {$n$}) as in the general scheme
by Thorup and Zwick, expected sizes of {$ O(n^\gamma
\log n) $} bits are sufficient, and that all the
routing tables can be constructed at once in expected
time {$ O(n^{1 + \gamma } \log n) $}, with {$ \gamma =
\tau - 22 / \tau - 3 + \epsilon $}, where {$ \tau \in
(2, 3) $} is the power-law exponent and {$ \epsilon 0
$} (which implies $ \epsilon < \gamma < 1 / 3 +
\epsilon $). Both bounds also hold with probability at
least $ 1 - 1 / n $ (independent of $ \epsilon $). The
routing scheme is a labeled scheme, requiring a
stretch--5 handshaking step. The scheme uses addresses
and message headers with {$ O(\log n \log \log n) $}
bits, with probability at least {$ 1 - o(1) $}. We
further demonstrate the effectiveness of our scheme by
simulations on real-world graphs as well as synthetic
power-law graphs. With the same techniques as for the
compact routing scheme, we also adapt the approximate
distance oracle by Thorup and Zwick [2001, 2005] for
stretch-3 and we obtain a new upper bound of expected
{$ \tilde {O}(n^{1 + \gamma }) $} for space and
preprocessing for random power-law graphs. Our distance
oracle is the first one optimized for power-law graphs.
Furthermore, we provide a linear-space data structure
that can answer 5--approximate distance queries in time
at most {$ \tilde {O}(n^{1 / 4 + \epsilon }) $}
(similar to {$ \gamma $}, the exponent actually depends
on {$ \tau $} and lies between {$ \epsilon $} and $ 1 /
4 + \epsilon $).",
acknowledgement = ack-nhfb,
articleno = "4",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Jez:2012:OSP,
author = "Lukasz Jez and Fei Li and Jay Sethuraman and Clifford
Stein",
title = "Online scheduling of packets with agreeable
deadlines",
journal = j-TALG,
volume = "9",
number = "1",
pages = "5:1--5:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2390176.2390181",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:04 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "This article concerns an online packet scheduling
problem that arises as a natural model for buffer
management at a network router. Packets arrive at a
router at integer time steps, and are buffered upon
arrival. Packets have non-negative weights and integer
deadlines that are (weakly) increasing in their arrival
times. In each integer time step, at most one packet
can be sent. The objective is to maximize the sum of
the weights of the packets that are sent by their
deadlines. The main results include an optimal $ (\phi
:= (1 + \sqrt 5) / 2 \approx 1.618) $-competitive
deterministic online algorithm, a $ (4 / 3 \approx
1.33) $-competitive randomized online algorithm against
an oblivious adversary, and a $2$-speed $1$-competitive
deterministic online algorithm. The analysis does not
use a potential function explicitly, but instead
modifies the adversary's buffer and credits the
adversary to account for these modifications.",
acknowledgement = ack-nhfb,
articleno = "5",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bonifaci:2012:ACP,
author = "Vincenzo Bonifaci and Ho-Leung Chan and Alberto
Marchetti-Spaccamela and Nicole Megow",
title = "Algorithms and complexity for periodic real-time
scheduling",
journal = j-TALG,
volume = "9",
number = "1",
pages = "6:1--6:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2390176.2390182",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:04 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We investigate the preemptive scheduling of periodic
tasks with hard deadlines. We show that, even in the
uniprocessor case, no pseudopolynomial-time algorithm
can test the feasibility of a task system within a
constant speedup bound, unless P = NP. This result
contrasts with recent results for sporadic task
systems. For two special cases, synchronous task
systems and systems with a constant number of different
task types, we provide the first polynomial-time
constant-speedup feasibility tests for multiprocessor
platforms. Furthermore, we show that the problem of
testing feasibility is coNP-hard for synchronous
multiprocessor task systems. The complexity of some of
these problems has been open for a long time. We also
propose a weight maximization variant of the
feasibility problem, where every task has a nonnegative
weight, and the goal is to find a subset of tasks that
can be scheduled feasibly and has maximum weight. We
give the first constant-speed, constant-approximation
algorithm for the case of synchronous task systems,
together with related hardness results.",
acknowledgement = ack-nhfb,
articleno = "6",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Halldorsson:2012:WSP,
author = "Magn{\'u}s M. Halld{\'o}rsson",
title = "Wireless scheduling with power control",
journal = j-TALG,
volume = "9",
number = "1",
pages = "7:1--7:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2390176.2390183",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:04 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the scheduling of arbitrary wireless links
in the physical model of interference to minimize the
time for satisfying all requests. We study here the
combined problem of scheduling and power control, where
we seek both an assignment of power settings and a
partition of the links so that each set satisfies the
signal-to-interference-plus-noise (SINR) constraints.
We give an algorithm that attains an approximation
ratio of {$ O(\log n c \log \log \Delta) $}, where
{$n$} is the number of links and {$ \Delta $} is the
ratio between the longest and the shortest link length.
Under the natural assumption that lengths are
represented in binary, this gives the first
approximation ratio that is polylogarithmic in the size
of the input. The algorithm has the desirable property
of using an oblivious power assignment, where the power
assigned to a sender depends only on the length of the
link. We give evidence that this dependence on {$
\Delta $} is unavoidable, showing that any reasonably
behaving oblivious power assignment results in a {$
\Omega (\log \log \Delta) $}-approximation. These
results hold also for the (weighted) capacity problem
of finding a maximum (weighted) subset of links that
can be scheduled in a single time slot. In addition, we
obtain improved approximation for a bidirectional
variant of the scheduling problem, give partial answers
to questions about the utility of graphs for modeling
physical interference, and generalize the setting from
the standard {$2$}-dimensional Euclidean plane to
doubling metrics. Finally, we explore the utility of
graph models in capturing wireless interference.",
acknowledgement = ack-nhfb,
articleno = "7",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Ebrahimi:2012:CAW,
author = "Javad B. Ebrahimi and Christina Fragouli",
title = "Combinatiorial algorithms for wireless information
flow",
journal = j-TALG,
volume = "9",
number = "1",
pages = "8:1--8:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2390176.2390184",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:04 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A long-standing open question in information theory is
to characterize the unicast capacity of a wireless
relay network. The difficulty arises due to the complex
signal interactions induced in the network, since the
wireless channel inherently broadcasts the signals and
there is interference among transmissions. Recently,
Avestimehr et al. [2007b] proposed a linear
deterministic model that takes into account the shared
nature of wireless channels, focusing on the signal
interactions rather than the background noise. They
generalized the min-cut max-flow theorem for graphs to
networks of deterministic channels and proved that the
capacity can be achieved using information theoretical
tools. They showed that the value of the minimum cut is
in this case the minimum rank of all the adjacency
matrices describing source-destination cuts. In this
article, we develop a polynomial-time algorithm that
discovers the relay encoding strategy to achieve the
min-cut value in linear deterministic (wireless)
networks, for the case of a unicast connection. Our
algorithm crucially uses a notion of linear
independence between channels to calculate the capacity
in polynomial time. Moreover, we can achieve the
capacity by using very simple one-symbol processing at
the intermediate nodes, thereby constructively yielding
finite-length strategies that achieve the unicast
capacity of the linear deterministic (wireless) relay
network.",
acknowledgement = ack-nhfb,
articleno = "8",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Chekuri:2012:SMP,
author = "Chandra Chekuri and Kenneth L. Clarkson and Sariel
Har-Peled",
title = "On the set multicover problem in geometric settings",
journal = j-TALG,
volume = "9",
number = "1",
pages = "9:1--9:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2390176.2390185",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:04 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider the set multicover problem in geometric
settings. Given a set of points {$P$} and a collection
of geometric shapes (or sets) {$F$}, we wish to find a
minimum cardinality subset of {$F$} such that each
point {$ p \in P $} is covered by (contained in) at
least {$ d(p) $} sets. Here, {$ d(p) $} is an integer
demand (requirement) for {$p$}. When the demands $ d(p)
= 1 $ for all $p$, this is the standard set cover
problem. The set cover problem in geometric settings
admits an approximation ratio that is better than that
for the general version. In this article, we show that
similar improvements can be obtained for the multicover
problem as well. In particular, we obtain an {$ O(\log
{\rm opt}) $} approximation for set systems of bounded
VC-dimension, and an {$ O(1) $} approximation for
covering points by half-spaces in three dimensions and
for some other classes of shapes.",
acknowledgement = ack-nhfb,
articleno = "9",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Giesen:2012:APC,
author = "Joachim Giesen and Martin Jaggi and S{\"o}ren Laue",
title = "Approximating parameterized convex optimization
problems",
journal = j-TALG,
volume = "9",
number = "1",
pages = "10:1--10:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2390176.2390186",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:04 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We consider parameterized convex optimization problems
over the unit simplex, that depend on one parameter. We
provide a simple and efficient scheme for maintaining
an $ \epsilon $-approximate solution (and a
corresponding $ \epsilon $-coreset) along the entire
parameter path. We prove correctness and optimality of
the method. Practically relevant instances of the
abstract parameterized optimization problem are for
example regularization paths of support vector
machines, multiple kernel learning, and minimum
enclosing balls of moving points.",
acknowledgement = ack-nhfb,
articleno = "10",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Philip:2012:PKD,
author = "Geevarghese Philip and Venkatesh Raman and Somnath
Sikdar",
title = "Polynomial kernels for dominating set in graphs of
bounded degeneracy and beyond",
journal = j-TALG,
volume = "9",
number = "1",
pages = "11:1--11:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2390176.2390187",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:04 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We show that for every fixed $ j \geq i \geq 1 $, the
$k$-Dominating Set problem restricted to graphs that do
not have {$ K_{ij} $} (the complete bipartite graph on
{$ (i + j) $} vertices, where the two parts have {$i$}
and {$j$} vertices, respectively) as a subgraph is
fixed parameter tractable (FPT) and has a polynomial
kernel. We describe a polynomial-time algorithm that,
given a {$ K_{i, j} $}-free graph {$G$} and a
nonnegative integer {$k$}, constructs a graph {$H$}
(the ``kernel'') and an integer {$ k' $} such that (1)
{$G$} has a dominating set of size at most {$k$} if and
only if {$H$} has a dominating set of size at most {$
k' $}, (2) {$H$} has {$ O((j + 1)^{i + 1} k^{i^2}) $}
vertices, and (3) {$ k' = O((j + 1)^{i + 1} k^{i^2})
$}. Since {$d$}-degenerate graphs do not have {$ K_{d +
1, d + 1} $} as a subgraph, this immediately yields a
polynomial kernel on {$ O((d + 2)^{d + 2} {k^{(d +
1)}}^2) $} vertices for the {$k$}-Dominating Set
problem on {$d$}-degenerate graphs, solving an open
problem posed by Alon and Gutner [Alon and Gutner 2008;
Gutner 2009]. The most general class of graphs for
which a polynomial kernel was previously known for
{$k$}-Dominating Set is the class of {$ K_h
$}-topological-minor-free graphs [Gutner 2009]. Graphs
of bounded degeneracy are the most general class of
graphs for which an FPT algorithm was previously known
for this problem. {$ K_h $}-topological-minor-free
graphs are {$ K_{i, j} $}-free for suitable values of
{$i$}, {$j$} (but not vice-versa), and so our results
show that {$k$}-Dominating Set has both FPT algorithms
and polynomial kernels in strictly more general classes
of graphs. Using the same techniques, we also obtain an
{$ O(j k^i) $} vertex-kernel for the {$k$}-Independent
Dominating Set problem on {$ K_{i, j} $}-free graphs.",
acknowledgement = ack-nhfb,
articleno = "11",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bodlaender:2012:EAT,
author = "Hans L. Bodlaender and Fedor V. Fomin and Arie M. C.
A. Koster and Dieter Kratsch and Dimitrios M.
Thilikos",
title = "On exact algorithms for treewidth",
journal = j-TALG,
volume = "9",
number = "1",
pages = "12:1--12:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2390176.2390188",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:04 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We give experimental and theoretical results on the
problem of computing the treewidth of a graph by exact
exponential-time algorithms using exponential space or
using only polynomial space. We first report on an
implementation of a dynamic programming algorithm for
computing the treewidth of a graph with running time O
*(2 $^n$). This algorithm is based on the old dynamic
programming method introduced by Held and Karp for the
Traveling Salesman problem. We use some optimizations
that do not affect the worst case running time but
improve on the running time on actual instances and can
be seen to be practical for small instances. We also
consider the problem of computing Treewidth under the
restriction that the space used is only polynomial and
give a simple O *(4 $^n$) algorithm that requires
polynomial space. We also show that with a more
complicated algorithm using balanced separators,
Treewidth can be computed in O *(2.9512 $^n$) time and
polynomial space.",
acknowledgement = ack-nhfb,
articleno = "12",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Amir:2012:CDC,
author = "Amihood Amir and Estrella Eisenberg and Avivit Levy
and Ely Porat and Natalie Shapira",
title = "Cycle detection and correction",
journal = j-TALG,
volume = "9",
number = "1",
pages = "13:1--13:??",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2390176.2390189",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Sat Mar 2 10:10:04 MST 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Assume that a natural cyclic phenomenon has been
measured, but the data is corrupted by errors. The type
of corruption is application-dependent and may be
caused by measurements errors, or natural features of
the phenomenon. We assume that an appropriate metric
exists, which measures the amount of corruption
experienced. This article studies the problem of
recovering the correct cycle from data corrupted by
various error models, formally defined as the period
recovery problem. Specifically, we define a metric
property which we call pseudolocality and study the
period recovery problem under pseudolocal metrics.
Examples of pseudolocal metrics are the Hamming
distance, the swap distance, and the interchange (or
Cayley) distance. We show that for pseudolocal metrics,
periodicity is a powerful property allowing detecting
the original cycle and correcting the data, under
suitable conditions. Some surprising features of our
algorithm are that we can efficiently identify the
period in the corrupted data, up to a number of
possibilities logarithmic in the length of the data
string, even for metrics whose calculation is NP-hard.
For the Hamming metric, we can reconstruct the
corrupted data in near-linear time even for unbounded
alphabets. This result is achieved using the property
of separation in the self-convolution vector and
Reed--Solomon codes. Finally, we employ our techniques
beyond the scope of pseudo-local metrics and give a
recovery algorithm for the non-pseudolocal Levenshtein
edit metric.",
acknowledgement = ack-nhfb,
articleno = "13",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Weimann:2013:RPD,
author = "Oren Weimann and Raphael Yuster",
title = "Replacement Paths and Distance Sensitivity Oracles via
Fast Matrix Multiplication",
journal = j-TALG,
volume = "9",
number = "2",
pages = "14:1--14:??",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2438645.2438646",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Mon Jun 24 09:39:37 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A distance sensitivity oracle of an $n$-vertex graph
{$ G = (V, E) $} is a data structure that can report
shortest paths when edges of the graph fail. A query
({$ u \in V $}, {$ v \in V $}, {$ S \subseteq E $}) to
this oracle returns a shortest $u$-to-$v$ path in the
graph {$ G' = (V, E \backslash S) $}. We present
randomized (Monte Carlo) algorithms for constructing a
distance sensitivity oracle of size {$ \tilde {O}(n^{3
- \alpha }) $} for {$ | S | = O(\lg n / \lg \lg n) $}
and any choice of $ 0 < \alpha < 1 $. For real
edge-lengths, the oracle is constructed in {$ O(n^{4 -
\alpha }) $} time and a query to this oracle takes {$
\tilde {O} (n^{2 - 2(1 - \alpha) / |S|}) $} time. For
integral edge-lengths in {$ \{ - M, \ldots {}, M \} $},
using the current $ \omega < 2.376 $ matrix
multiplication exponent, the oracle is constructed in
{$ O(M n^{3.376 - \alpha }) $} time with {$ \tilde
{O}({n^{2 - (1 - \alpha) / |S|}}) $} query, or
alternatively in {$ O(M^{0.681} n^{3.575 - \alpha }) $}
time with {$ \tilde {O}(n^{2 - 2(1 - \alpha) / |S|}) $}
query. Distance sensitivity oracles generalize the
replacement paths problem in which $u$ and $v$ are
known in advance and {$ | S | = 1 $}. In other words,
if {$P$} is a shortest path from $u$ to $v$ in {$G$},
then the replacement paths problem asks to compute, for
every edge $e$ on {$P$}, a shortest $u$-to-$v$ path
that avoids $e$. Our new technique for constructing
distance sensitivity oracles using fast matrix
multiplication also yields the first subcubic-time
algorithm for the replacement paths problem when the
edge-lengths are small integers. In particular, it
yields a randomized (Monte Carlo) {$ \tilde {O}(M
n^{2.376} + M^{2 / 3} n^{2.584}) $}-time algorithm for
the replacement paths problem assuming {$ M \leq
n^{0.624} $}. Finally, we mention that both our
replacement paths algorithm and our distance
sensitivity oracle can be made to work, in the same
time and space bounds, for the case of failed vertices
rather than edges, that is, when {$S$} is a set of
vertices and we seek a shortest $u$-to-$v$ path in the
graph obtained from {$G$} by removing all vertices in
{$S$} and their adjacent edges.",
acknowledgement = ack-nhfb,
articleno = "14",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Roditty:2013:AG,
author = "Liam Roditty and Roei Tov",
title = "Approximating the Girth",
journal = j-TALG,
volume = "9",
number = "2",
pages = "15:1--15:??",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2438645.2438647",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Mon Jun 24 09:39:37 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "This article considers the problem of computing a
minimum weight cycle in weighted undirected graphs.
Given a weighted undirected graph {$ G = (V, E, w) $},
let {$C$} be a minimum weight cycle of G, let w (C) be
the weight of {$C$}, and let {$ w_{\rm max}(C) $} be
the weight of the maximum edge of {$C$}. We obtain
three new approximation algorithms for the minimum
weight cycle problem: (1) for integral weights from the
range {$ [1, M] $}, an algorithm that reports a cycle
of weight at most {$ 4 / 3 w (C) $} in {$ O(n^2 \log n
(\log n + \log M)) $} time; (2) For integral weights
from the range {$ [1, M] $}, an algorithm that reports
a cycle of weight at most {$ w(C) + w_{\rm max}(C) $}
in {$ O(n^2 \log n (\log n + \log M)) $} time; (3) For
nonnegative real edge weights, an algorithm that for
any $ \epsilon > 0 $ reports a cycle of weight at most
{$ (4 / 3 + \epsilon) w(C) $} in {$ O(1 \epsilon n^2
\log n (\log \log n)) $} time. In a recent
breakthrough, Williams and Williams [2010] showed that
a subcubic algorithm, that computes the exact minimum
weight cycle in undirected graphs with integral weights
from the range {$ [1, M] $}, implies a subcubic
algorithm for computing all-pairs shortest paths in
directed graphs with integral weights from the range {$
[ - M, M] $}. This implies that in order to get a
subcubic algorithm for computing a minimum weight
cycle, we have to relax the problem and to consider an
approximated solution. Lingas and Lundell [2009] were
the first to consider approximation in the context of
minimum weight cycle in weighted graphs. They presented
a 2-approximation algorithm for integral weights with
{$ O(n^2 \log n (\log n + \log M)) $} running time.
They also posed, as an open problem, the question
whether it is possible to obtain a subcubic algorithm
with a $c$ approximation, where $ c < 2 $. The current
article answers this question in the affirmative, by
presenting an algorithm with 4/3-approximation and the
same running time. Surprisingly, the approximation
factor of 4/3 is not accidental. We show, using the new
result of Williams and Williams [2010], that a subcubic
combinatorial algorithm with $ (4 / 3 - \epsilon)
$-approximation, where $ 0 < \epsilon \leq 1 / 3 $,
implies a subcubic combinatorial algorithm for
multiplying two boolean matrices.",
acknowledgement = ack-nhfb,
articleno = "15",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Kawarabayashi:2013:LAA,
author = "Ken-Ichi Kawarabayashi and Yusuke Kobayashi",
title = "An {$ O(\log n) $}-Approximation Algorithm for the
Edge-Disjoint Paths Problem in {Eulerian} Planar
Graphs",
journal = j-TALG,
volume = "9",
number = "2",
pages = "16:1--16:??",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2438645.2438648",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Mon Jun 24 09:39:37 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "In this article, we study an approximation algorithm
for the maximum edge-disjoint paths problem. In this
problem, we are given a graph and a collection of pairs
of vertices, and the objective is to find the maximum
number of pairs that can be connected by edge-disjoint
paths. We give an {$ O(\log n) $}-approximation
algorithm for the maximum edge-disjoint paths problem
when an input graph is either 4-edge-connected planar
or Eulerian planar. This improves an {$ O(\log^2 n)
$}-approximation algorithm given by Kleinberg [2005]
for Eulerian planar graphs. Our result also generalizes
the result by Chekuri et al. [2004, 2005] who gave an
{$ O(\log n) $}-approximation algorithm for the maximum
edge-disjoint paths problem with congestion two when an
input graph is planar.",
acknowledgement = ack-nhfb,
articleno = "16",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Fraigniaud:2013:DIE,
author = "Pierre Fraigniaud and Andrzej Pelc",
title = "Delays Induce an Exponential Memory Gap for Rendezvous
in Trees",
journal = j-TALG,
volume = "9",
number = "2",
pages = "17:1--17:??",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2438645.2438649",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Mon Jun 24 09:39:37 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The aim of rendezvous in a graph is meeting of two
mobile agents at some node of an unknown anonymous
connected graph. In this article, we focus on
rendezvous in trees, and, analogously to the efforts
that have been made for solving the exploration problem
with compact automata, we study the size of memory of
mobile agents that permits to solve the rendezvous
problem deterministically. We assume that the agents
are identical, and move in synchronous rounds. We first
show that if the delay between the starting times of
the agents is arbitrary, then the lower bound on memory
required for rendezvous is {$ \Omega (\log n) $} bits,
even for the line of length n. This lower bound meets a
previously known upper bound of {$ O(\log n) $} bits
for rendezvous in arbitrary graphs of size at most $n$.
Our main result is a proof that the amount of memory
needed for rendezvous with simultaneous start depends
essentially on the number $l$ of leaves of the tree,
and is exponentially less impacted by the number $n$ of
nodes. Indeed, we present two identical agents with {$
O(\log l + \log \log n) $} bits of memory that solve
the rendezvous problem in all trees with at most $n$
nodes and at most $l$ leaves. Hence, for the class of
trees with polylogarithmically many leaves, there is an
exponential gap in minimum memory size needed for
rendezvous between the scenario with arbitrary delay
and the scenario with delay zero. Moreover, we show
that our upper bound is optimal by proving that {$
\Omega (\log l + \log \log n) $} bits of memory are
required for rendezvous, even in the class of trees
with degrees bounded by 3.",
acknowledgement = ack-nhfb,
articleno = "17",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Bansal:2013:SSA,
author = "Nikhil Bansal and Ho-Leung Chan and Kirk Pruhs",
title = "Speed Scaling with an Arbitrary Power Function",
journal = j-TALG,
volume = "9",
number = "2",
pages = "18:1--18:??",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2438645.2438650",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Mon Jun 24 09:39:37 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "This article initiates a theoretical investigation
into online scheduling problems with speed scaling
where the allowable speeds may be discrete, and the
power function may be arbitrary, and develops
algorithmic analysis techniques for this setting. We
show that a natural algorithm, which uses Shortest
Remaining Processing Time for scheduling and sets the
power to be one more than the number of unfinished
jobs, is 3-competitive for the objective of total flow
time plus energy. We also show that another natural
algorithm, which uses Highest Density First for
scheduling and sets the power to be the fractional
weight of the unfinished jobs, is a 2-competitive
algorithm for the objective of fractional weighted flow
time plus energy.",
acknowledgement = ack-nhfb,
articleno = "18",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Hochbaum:2013:AAM,
author = "Dorit S. Hochbaum and Asaf Levin",
title = "Approximation Algorithms for a Minimization Variant of
the Order-Preserving Submatrices and for Biclustering
Problems",
journal = j-TALG,
volume = "9",
number = "2",
pages = "19:1--19:??",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2438645.2438651",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Mon Jun 24 09:39:37 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "Finding a largest Order-Preserving SubMatrix, OPSM, is
an important problem arising in the discovery of
patterns in gene expression. Ben-Dor et al. formulated
the problem in Ben-Dor et al. [2003]. They further
showed that the problem is NP-complete and provided a
greedy heuristic for the problem. The complement of the
OPSM problem, called MinOPSM, is to delete the least
number of entries in the matrix so that the remaining
submatrix is order preserving. We devise a
5-approximation algorithm for the MinOPSM based on a
formulation of the problem as a quadratic, nonseparable
set cover problem. An alternative formulation combined
with a primal-dual algorithm improves the approximation
factor to 3. The complexity of both algorithms for a
matrix of size $ m \times n $ is {$ O(m^2 n) $}. We
further comment on the related biclustering problem.",
acknowledgement = ack-nhfb,
articleno = "19",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Chawla:2013:FSI,
author = "Shuchi Chawla and Prasad Raghavendra and Dana
Randall",
title = "Foreword to the {Special Issue on SODA'11}",
journal = j-TALG,
volume = "9",
number = "3",
pages = "20:1--20:??",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2483699.2483700",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Mon Jun 24 09:39:46 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
acknowledgement = ack-nhfb,
articleno = "20",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Ailon:2013:AOU,
author = "Nir Ailon and Edo Liberty",
title = "An Almost Optimal Unrestricted Fast
{Johnson--Lindenstrauss Transform}",
journal = j-TALG,
volume = "9",
number = "3",
pages = "21:1--21:??",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2483699.2483701",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Mon Jun 24 09:39:46 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "The problems of random projections and sparse
reconstruction have much in common and individually
received much attention. Surprisingly, until now they
progressed in parallel and remained mostly separate.
Here, we employ new tools from probability in Banach
spaces that were successfully used in the context of
sparse reconstruction to advance on an open problem in
random projection. In particular, we generalize and use
an intricate result by Rudelson and Veshynin [2008] for
sparse reconstruction which uses Dudley's theorem for
bounding Gaussian processes. Our main result states
that any set of {$ N = \exp (\tilde {O}(n)) $} real
vectors in $n$-dimensional space can be linearly mapped
to a space of dimension {$ k = O(\log N \polylog (n))
$}, while (1) preserving the pairwise distances among
the vectors to within any constant distortion and (2)
being able to apply the transformation in time {$ O(n
\log n) $} on each vector. This improves on the best
known bound {$ N = \exp (\tilde {O}(n^{1 / 2})) $}
achieved by Ailon and Liberty [2009] and {$ N = e x
p(\tilde {O}(n^{1 / 3})) $} by Ailon and Chazelle
[2010]. The dependence in the distortion constant
however is suboptimal, and since the publication of an
early version of the work, the gap between upper and
lower bounds has been considerably tightened obtained
by Krahmer and Ward [2011]. For constant distortion,
this settles the open question posed by these authors
up to a $ \polylog (n) $ factor while considerably
simplifying their constructions.",
acknowledgement = ack-nhfb,
articleno = "21",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Chan:2013:PPS,
author = "Timothy M. Chan",
title = "Persistent Predecessor Search and Orthogonal Point
Location on the Word {RAM}",
journal = j-TALG,
volume = "9",
number = "3",
pages = "22:1--22:??",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2483699.2483702",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Mon Jun 24 09:39:46 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We answer a basic data structuring question (e.g.,
raised by Dietz and Raman [1991]): Can van Emde Boas
trees be made persistent, without changing their
asymptotic query/update time? We present a (partially)
persistent data structure that supports predecessor
search in a set of integers in {$ \{ 1, \ldots {}, U \}
$} under an arbitrary sequence of n insertions and
deletions, with {$ O(\log \log U) $} expected query
time and expected amortized update time, and {$ O(n) $}
space. The query bound is optimal in {$U$} for
linear-space structures and improves previous near-{$
O((\log \log U)^2) $} methods. The same method solves a
fundamental problem from computational geometry: point
location in orthogonal planar subdivisions (where edges
are vertical or horizontal). We obtain the first static
data structure achieving {$ O(\log \log U) $}
worst-case query time and linear space. This result is
again optimal in {$U$} for linear-space structures and
improves the previous {$ O((\log \log U)^2) $} method
by de Berg et al. [1995]. The same result also holds
for higher-dimensional subdivisions that are orthogonal
binary space partitions, and for certain nonorthogonal
planar subdivisions such as triangulations without
small angles. Many geometric applications follow,
including improved query times for orthogonal range
reporting for dimensions $ \geq 3 $ on the RAM. Our key
technique is an interesting new van-Emde-Boas--style
recursion that alternates between two strategies, both
quite simple.",
acknowledgement = ack-nhfb,
articleno = "22",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Daskalakis:2013:CAN,
author = "Constantinos Daskalakis",
title = "On the Complexity of Approximating a {Nash}
Equilibrium",
journal = j-TALG,
volume = "9",
number = "3",
pages = "23:1--23:??",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2483699.2483703",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Mon Jun 24 09:39:46 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "We show that computing a relatively (i.e.,
multiplicatively as opposed to additively) approximate
Nash equilibrium in two-player games is PPAD-complete,
even for constant values of the approximation. Our
result is the first constant inapproximability result
for Nash equilibrium, since the original results on the
computational complexity of the problem [Daskalakis et
al. 2006a; Chen and Deng 2006]. Moreover, it provides
an apparent---assuming that PPAD is not contained in
TIME({$ n^{O(\log n)} $})---dichotomy between the
complexities of additive and relative approximations,
as for constant values of additive approximation a
quasi-polynomial-time algorithm is known [Lipton et al.
2003]. Such a dichotomy does not exist for values of
the approximation that scale inverse-polynomially with
the size of the game, where both relative and additive
approximations are PPAD-complete [Chen et al. 2006]. As
a byproduct, our proof shows that (unconditionally) the
sparse-support lemma [Lipton et al. 2003] cannot be
extended to relative notions of constant
approximation.",
acknowledgement = ack-nhfb,
articleno = "23",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Eisenbrand:2013:PDP,
author = "Friedrich Eisenbrand and D{\"o}m{\"o}t{\"o}r
P{\'a}lv{\"o}lgyi and Thomas Rothvo{\ss}",
title = "Bin Packing via Discrepancy of Permutations",
journal = j-TALG,
volume = "9",
number = "3",
pages = "24:1--24:??",
month = jun,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2483699.2483704",
ISSN = "1549-6325 (print), 1549-6333 (electronic)",
ISSN-L = "1549-6325",
bibdate = "Mon Jun 24 09:39:46 MDT 2013",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/talg.bib",
abstract = "A well-studied special case of bin packing is the
3-partition problem, where n items of size > 1/4 have
to be packed in a minimum number of bins of capacity
one. The famous Karmarkar-Karp algorithm transforms a
fractional solution of a suitable LP relaxation for
this problem into an integral solution that requires at
most {$ O(\log n) $} additional bins. The
three-permutations-problem of Beck is the following.
Given any three permutations on n symbols, color the
symbols red and blue, such that in any interval of any
of those permutations, the number of red and blue
symbols is roughly the same. The necessary difference
is called the discrepancy. We establish a surprising
connection between bin packing and Beck's problem: The
additive integrality gap of the 3-partition linear
programming relaxation can be bounded by the
discrepancy of three permutations. This connection
yields an alternative method to establish an {$ O(\log
n) $} bound on the additive integrality gap of the
3-partition. Conversely, making use of a recent example
of three permutations, for which a discrepancy of {$
\Omega (\log n) $} is necessary, we prove the
following: The {$ O(\log^2 n) $} upper bound on the
additive gap for bin packing with arbitrary item sizes
cannot be improved by any technique that is based on
rounding up items. This lower bound holds for a large
class of algorithms including the Karmarkar-Karp
procedure.",
acknowledgement = ack-nhfb,
articleno = "24",
fjournal = "ACM Transactions on Algorithms (TALG)",
journal-URL = "http://portal.acm.org/browse_dl.cfm?idx=J982",
}
@Article{Gawrychowski:2013:OPM,
author = "Pawel Gawrychowski",
title = "Optimal Pattern Matching in {LZW} Compressed Strings",
journal = j-TALG,
volume = "9",
number = "3",
pages