Valid HTML 4.0! Valid CSS!
%%% -*-BibTeX-*-
%%% ====================================================================
%%%  BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.27",
%%%     date            = "23 December 2023",
%%%     time            = "05:34:11 MST",
%%%     filename        = "jea.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     checksum        = "28343 14882 80596 758684",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                        beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "ACM Journal of Experimental Algorithmics;
%%%                        bibliography; BibTeX; JEA",
%%%     license         = "public domain",
%%%     supported       = "yes",
%%%     docstring       = "This is a COMPLETE bibliography of the
%%%                        ACM Journal of Experimental Algorithmics
%%%                        (CODEN none, ISSN 1084-6654).  The journal
%%%                        appears once a year, and has no separate
%%%                        issue numbers.
%%%
%%%                        At version 1.27, the COMPLETE year coverage
%%%                        looked like this:
%%%
%%%                             1996 (   4)    2006 (  17)    2016 (  18)
%%%                             1997 (   5)    2007 (   0)    2017 (  12)
%%%                             1998 (   9)    2008 (  29)    2018 (  14)
%%%                             1999 (   8)    2009 (  28)    2019 (  26)
%%%                             2000 (  17)    2010 (  12)    2020 (  14)
%%%                             2001 (  10)    2011 (  18)    2021 (  15)
%%%                             2002 (  12)    2012 (   2)    2022 (  16)
%%%                             2003 (   6)    2013 (  14)    2023 (  14)
%%%                             2004 (   6)    2014 (   0)
%%%                             2005 (  15)    2015 (  25)
%%%
%%%                             Article:        366
%%%
%%%                             Total entries:  366
%%%
%%%                        The author will be grateful for reports of
%%%                        any errors or omissions in this file; they
%%%                        will be corrected in future editions.
%%%
%%%                        Articles and letters or corrections that
%%%                        comment on them are cross-referenced in both
%%%                        directions, so that citation of one of them
%%%                        will automatically include the others.
%%%
%%%                        The ACM maintains Web pages  for this journal at
%%%
%%%                            http://portal.acm.org/browse_dl.cfm?idx=J430
%%%
%%%                        That data has been automatically converted
%%%                        to BibTeX form, corrected for spelling and
%%%                        page number errors, and merged into this
%%%                        file.
%%%
%%%                        ACM copyrights explicitly permit abstracting
%%%                        with credit, so article abstracts, keywords,
%%%                        and subject classifications have been
%%%                        included in this bibliography wherever
%%%                        available.  Article reviews have been
%%%                        omitted, until their copyright status has
%%%                        been clarified.
%%%
%%%                        Spelling has been verified with the UNIX
%%%                        spell and GNU ispell programs using the
%%%                        exception dictionary stored in the companion
%%%                        file with extension .sok.
%%%
%%%                        BibTeX citation tags are uniformly chosen
%%%                        as name:year:abbrev, where name is the
%%%                        family name of the first author or editor,
%%%                        year is a 4-digit number, and abbrev is a
%%%                        3-letter condensation of important title
%%%                        words. Citation tags were automatically
%%%                        generated by the biblabel software
%%%                        developed for the BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, with the help of
%%%                        ``bibsort -byvolume''.  The bibsort utility,
%%%                        and several related programs for
%%%                        bibliography maintenance, is available on
%%%                        ftp.math.utah.edu in /pub/tex/bib, and at
%%%                        other Internet sites which mirror it,
%%%                        including the Comprehensive TeX Archive
%%%                        Network (CTAN); the command `finger
%%%                        ctan<at>tug.org' will produce a list of
%%%                        CTAN hosts.
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility.",
%%%  }
%%% ====================================================================
@Preamble{
   "\input path.sty"
 # "\hyphenation{ }"
 # "\ifx \undefined \mathbb \def \mathbb #1{{\bf #1}}\fi"
}

%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|https://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:
@String{j-ACM-J-EXP-ALGORITHMICS = "ACM Journal of Experimental Algorithmics"}

%%% ====================================================================
%%% Bibliography entries, sorted in publication order.
@Article{Knuth:1996:II,
  author =       "Donald E. Knuth",
  title =        "Irredundant intervals",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "1",
  pages =        "1:1--1:??",
  month =        "????",
  year =         "1996",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/235141.235146",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:01:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "This expository note presents simplifications of a
                 theorem due to Gy{\H{o}}ri and an algorithm due to
                 Franzblau and Kleitman: Given a family $F$ of $m$
                 intervals on a linearly ordered set of n elements, we
                 can construct in $O(m + n)^2$ steps an irredundant
                 subfamily having maximum cardinality, as well as a
                 generating family having minimum cardinality. The
                 algorithm is of special interest because it solves a
                 problem analogous to finding a maximum independent set,
                 but on a class of objects that is more general than a
                 matroid. This note is also a complete, runnable
                 computer program, which can be used for experiments in
                 conjunction with the public-domain software of The
                 Stanford GraphBase.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Gittleman:1996:PSS,
  author =       "Arthur Gittleman",
  title =        "Predicting string search speed",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "1",
  pages =        "2:1--2:??",
  month =        "????",
  year =         "1996",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/235141.235147",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:01:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "String search is fundamental in many text processing
                 applications. Sunday recently gave several algorithms
                 to find the first occurrence of a pattern string as a
                 substring of a text, providing experimental data from
                 searches in a text of about 200K characters to support
                 his claim that his algorithms are faster than the
                 standard Boyer--Moore algorithm. We present a
                 methodology for the average-case analysis of the
                 performance of string search algorithms---for such
                 algorithms, a worst-case analysis does not yield much
                 useful information, since the performance of the
                 algorithm is directly affected by such characteristics
                 as the size of the character set, the character
                 frequencies, and the structure of the text. Knuth
                 described a finite automaton which can be used to save
                 information about character comparisons. Baeza-Yates,
                 Gonnet, and Regnier gave a probabilistic analysis of
                 the worst- and average-case behavior of a string search
                 algorithm based upon such an automaton. We construct
                 Knuth automata to model Sunday's algorithms and use the
                 methods of Baeza-Yates et al. to obtain an average-case
                 analysis which confirms Sunday's experimental data.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Bader:1996:PPA,
  author =       "David A. Bader and David R. Helman and Joseph
                 J{\'a}J{\'a}",
  title =        "Practical parallel algorithms for personalized
                 communication and integer sorting",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "1",
  pages =        "3:1--3:??",
  month =        "????",
  year =         "1996",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/235141.235148",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:01:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A fundamental challenge for parallel computing is to
                 obtain high-level, architecture independent, algorithms
                 which efficiently execute on general-purpose parallel
                 machines. With the emergence of message passing
                 standards such as MPI, it has become easier to design
                 efficient and portable parallel algorithms by making
                 use of these communication primitives. While existing
                 primitives allow an assortment of collective
                 communication routines, they do not handle an important
                 communication event when most or all processors have
                 non-uniformly sized personalized messages to exchange
                 with each other. We focus in this paper on the
                 h-relation personalized communication whose efficient
                 implementation will allow high performance
                 implementations of a large class of algorithms. While
                 most previous h-relation algorithms use randomization,
                 this paper presents a new deterministic approach for
                 h-relation personalized communication with
                 asymptotically optimal complexity for h>p$^2$. As an
                 application, we present an efficient algorithm for
                 stable integer sorting. The algorithms presented in
                 this paper have been coded in Split-C and run on a
                 variety of platforms, including the Thinking Machines
                 CM-5, IBM SP-1 and SP-2, Cray Research T3D, Meiko
                 Scientific CS-2, and the Intel Paragon. Our
                 experimental results are consistent with the
                 theoretical analysis and illustrate the scalability and
                 efficiency of our algorithms across different
                 platforms. In fact, they seem to outperform all similar
                 algorithms known to the authors on these platforms.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{LaMarca:1996:ICP,
  author =       "Anthony LaMarca and Richard Ladner",
  title =        "The influence of caches on the performance of heaps",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "1",
  pages =        "4:1--4:??",
  month =        "????",
  year =         "1996",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/235141.235145",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:01:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "As memory access times grow larger relative to
                 processor cycle times, the cache performance of
                 algorithms has an increasingly large impact on overall
                 performance. Unfortunately, most commonly used
                 algorithms were not designed with cache performance in
                 mind. This paper investigates the cache performance of
                 implicit heaps. We present optimizations which
                 significantly reduce the cache misses that heaps incur
                 and improve their overall performance. We present an
                 analytical model called collective analysis that allows
                 cache performance to be predicted as a function of both
                 cache configuration and algorithm configuration. As
                 part of our investigation, we perform an approximate
                 analysis of the cache performance of both traditional
                 heaps and our improved heaps in our model. In addition
                 empirical data is given for five architectures to show
                 the impact our optimizations have on overall
                 performance. We also revisit a priority queue study
                 originally performed by Jones [25]. Due to the
                 increases in cache miss penalties, the relative
                 performance results we obtain on today's machines
                 differ greatly from the machines of only ten years ago.
                 We compare the performance of implicit heaps, skew
                 heaps and splay trees and discuss the difference
                 between our results and Jones's.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Buchsbaum:1997:AAS,
  author =       "Adam L. Buchsbaum and Raffaele Giancarlo",
  title =        "Algorithmic aspects in speech recognition: an
                 introduction",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "2",
  pages =        "1:1--1:??",
  month =        "????",
  year =         "1997",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/264216.264219",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:14 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Speech recognition is an area with a considerable
                 literature, but there is little discussion of the topic
                 within the computer science algorithms literature. Many
                 computer scientists, however, are interested in the
                 computational problems of speech recognition. This
                 paper presents the field of speech recognition and
                 describes some of its major open problems from an
                 algorithmic viewpoint. Our goal is to stimulate the
                 interest of algorithm designers and experimenters to
                 investigate the algorithmic problems of effective
                 automatic speech recognition.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "automata theory; graph searching",
}

@Article{Battiti:1997:RSH,
  author =       "Roberto Battiti and Marco Protasi",
  title =        "Reactive search, a history-sensitive heuristic for
                 {MAX}-{SAT}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "2",
  pages =        "2:1--2:??",
  month =        "????",
  year =         "1997",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/264216.264220",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:14 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The Reactive Search (RS) method proposes the
                 integration of a simple history-sensitive (machine
                 learning) scheme into local search for the on-line
                 determination of free parameters. In this paper a new
                 RS algorithm is proposed for the approximated solution
                 of the Maximum Satisfiability problem: a component
                 based on local search with temporary prohibitions (Tabu
                 Search) is complemented with a reactive scheme that
                 determines the appropriate value of the prohibition
                 parameter by monitoring the Hamming distance along the
                 search trajectory. The proposed algorithm (H-RTS) can
                 therefore be characterized as a dynamic version of Tabu
                 Search. In addition, the non-oblivious functions
                 recently introduced in the framework of approximation
                 algorithms are used to discover a better local optimum
                 in the initial part of the search. The algorithm is
                 developed in two phases. First the bias-diversification
                 properties of individual candidate components are
                 analyzed by extensive empirical evaluation, then a
                 reactive scheme is added to the winning component,
                 based on Tabu Search. The final tests on a benchmark of
                 random MAX-3-SAT and MAX-4-SAT problems demonstrate the
                 superiority of H-RTS with respect to alternative
                 heuristics.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Smith:1997:EHF,
  author =       "Bradley J. Smith and Gregory L. Heileman and Chaouki
                 Abdallah",
  title =        "The exponential hash function",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "2",
  pages =        "3:1--3:??",
  month =        "????",
  year =         "1997",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/264216.264221",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:14 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper an efficient open address hash function
                 called exponential hashing is developed. The motivation
                 for this hash function resulted from our ongoing
                 efforts to apply dynamical systems theory to the study
                 of hashing; however, the analysis conducted in this
                 paper is primarily based on traditional number theory.
                 Proofs of optimal table parameter choices are provided
                 for a number of hash functions. We also demonstrate
                 experimentally that exponential hashing essentially
                 matches the performance of a widely-used optimal double
                 hash function for uniform data distributions, and
                 performs significantly better for nonuniform data
                 distributions. We show that exponential hashing
                 exhibits a higher integer Lyapunov exponent and entropy
                 than double hashing for initial data probes, which
                 offers one explanation for its improved performance on
                 nonuniform data distributions.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "ADT; chaos; dynamic dictionary; dynamical systems
                 theory; exponential hashing; Lyapunov exponent; number
                 theory",
}

@Article{Purchase:1997:ESB,
  author =       "H. C. Purchase and R. F. Cohen and M. I. James",
  title =        "An experimental study of the basis for graph drawing
                 algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "2",
  pages =        "4:1--4:??",
  month =        "????",
  year =         "1997",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/264216.264222",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:14 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Designers of graph drawing algorithms and systems
                 claim to illuminate application data by producing
                 layouts that optimise measurable aesthetic qualities.
                 Examples of these aesthetics include symmetry (where
                 possible, a symmetrical view of the graph should be
                 displayed), minimise arc crossing(the number of arc
                 crossings in the display should be minimised), and
                 minimise bends (the total number of bends in polyline
                 arcs should be minimised).The aim of this paper is to
                 describe our work to validate these claims by
                 performing empirical studies of human understanding of
                 graphs drawn using various layout aesthetics. This work
                 is important since it helps indicate to algorithm and
                 system designers what are the aesthetic qualities most
                 important to aid understanding, and consequently to
                 build more effective systems.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "human-computer interaction",
}

@Article{Alberts:1997:ESD,
  author =       "David Alberts and Giuseppe Cattaneo and Giuseppe F.
                 Italiano",
  title =        "An empirical study of dynamic graph algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "2",
  pages =        "5:1--5:??",
  month =        "????",
  year =         "1997",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/264216.264223",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:14 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The contributions of this paper are both of
                 theoretical and of experimental nature. From the
                 experimental point of view, we conduct an empirical
                 study on some dynamic connectivity algorithms which
                 where developed recently. In particular, the following
                 implementations were tested and compared with simple
                 algorithms: simple sparsification by Eppstein et al.
                 and the recent randomized algorithm by Henzinger and
                 King. In our experiments, we considered both random and
                 non-random inputs. Moreover, we present a simplified
                 variant of the algorithm by Henzinger and King, which
                 for random inputs was always faster than the original
                 implementation. For non-random inputs, simple
                 sparsification was the fastest algorithm for small
                 sequences of updates; for medium and large sequences of
                 updates, the original algorithm by Henzinger and King
                 was faster. From the theoretical point of view, we
                 analyze the average case running time of simple
                 sparsification and prove that for dynamic random graphs
                 its logarithmic overhead vanishes.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Hemaspaandra:1998:PBA,
  author =       "Lane A. Hemaspaandra and Kulathur S. Rajasethupathy
                 and Prasanna Sethupathy and Marius Zimand",
  title =        "Power balance and apportionment algorithms for the
                 {United States Congress}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "1:1--1:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/297096.297106",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We measure the performance, in the task of
                 apportioning the Congress of the United States, of an
                 algorithm combining a heuristic-driven (simulated
                 annealing) search with an exact-computation dynamic
                 programming evaluation of the apportionments visited in
                 the search. We compare this with the actual algorithm
                 currently used in the United States to apportion
                 Congress, and with a number of other algorithms that
                 have been proposed. We conclude that on every set of
                 census data in this country's history, the
                 heuristic-driven apportionment provably yields far
                 fairer apportionments than those of any of the other
                 algorithm considered, including the algorithm currently
                 used by the United States for Congressional
                 apportionment.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "apportionment algorithms; power indices; simulated
                 annealing",
}

@Article{Cho:1998:WBL,
  author =       "Seonghun Cho and Sartaj Sahni",
  title =        "Weight-biased leftist trees and modified skip lists",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "2:1--2:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/297096.297111",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We propose the weight biased leftist tree as an
                 alternative to traditional leftist trees [CRAN72] for
                 the representation of mergeable priority queues. A
                 modified version of skip lists [PUGH90] that uses fixed
                 size nodes is also proposed. Experimental results show
                 our modified skip list structure is faster than the
                 original skip list structure for the representation of
                 dictionaries. Experimental results comparing weight
                 biased leftist trees and competing priority queue
                 structures are presented.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dictionary; leftist trees; priority queues; skip
                 lists",
}

@Article{Yan:1998:LBE,
  author =       "Yong Yan and Xiaodong Zhang",
  title =        "Lock bypassing: an efficient algorithm for
                 concurrently accessing priority heaps",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "3:1--3:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/297096.297116",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The heap representation of priority queues is one of
                 the most widely used data structures in the design of
                 parallel algorithms. Efficiently exploiting the
                 parallelism of a priority heap has significant
                 influence on the efficiency of a wide range of
                 applications and parallel algorithms. In this paper, we
                 propose an aggressive priority heap operating
                 algorithm, called the lock bypassing algorithm (LB) on
                 shared memory systems. This algorithm minimizes
                 interference of concurrent enqueue and dequeue
                 operations on priority heaps while keeping the strict
                 priority property: a dequeue always returns the minimum
                 of a heap. The unique idea that distinguishes the LB
                 algorithm from previous concurrent algorithms on
                 priority heaps is the use of locking-on-demand and
                 lock-bypassing techniques to minimize locking
                 granularity and to maximize parallelism. The LB
                 algorithm allows an enqueue operation to bypass the
                 locks along its insertion path until it reaches a
                 possible place where it can perform the insertion.
                 Meanwhile a dequeue operation also makes its locking
                 range and locking period as small as possible by
                 carefully tuning its execution procedure. The LB
                 algorithm is shown to be correct in terms of deadlock
                 freedom and heap consistency. The performance of the LB
                 algorithm was evaluated analytically and experimentally
                 in comparison with previous algorithms. Analytical
                 results show that the LB algorithm reduces by half the
                 number of locks waited for in the worst case by
                 previous algorithms. The experimental results show that
                 the LB algorithm outperforms previously designed
                 algorithms by up to a factor of 2 in hold time.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "aggressive locking; parallel algorithm; performance
                 evaluation; priority heap; shared-memory system",
}

@Article{Helman:1998:NDP,
  author =       "David R. Helman and Joseph J{\'a}J{\'a} and David A.
                 Bader",
  title =        "A new deterministic parallel sorting algorithm with an
                 experimental evaluation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "4:1--4:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/297096.297128",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We introduce a new deterministic parallel sorting
                 algorithm for distributed memory machines based on the
                 regular sampling approach. The algorithm uses only two
                 rounds of regular all-to-all personalized communication
                 in a scheme that yields very good load balancing with
                 virtually no overhead. Moreover, unlike previous
                 variations, our algorithm efficiently handles the
                 presence of duplicate values without the overhead of
                 tagging each element with a unique identifier. This
                 algorithm was implemented in SPLIT-C and run on a
                 variety of platforms, including the Thinking Machines
                 CM-5, the IBM SP-2-WN, and the Cray Research T3D. We
                 ran our code using widely different benchmarks to
                 examine the dependence of our algorithm on the input
                 distribution. Our experimental results illustrate the
                 efficiency and scalability of our algorithm across
                 different platforms. In fact, the performance compares
                 closely to that of our random sample sort algorithm,
                 which seems to outperform all similar algorithms known
                 to the authors on these platforms. Together, their
                 performance is nearly invariant over the set of input
                 distributions, unlike previous efficient algorithms.
                 However, unlike our randomized sorting algorithm, the
                 performance and memory requirements of our regular
                 sorting algorithm can be deterministically
                 guaranteed.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "generalized sorting; integer sorting; parallel
                 algorithms; parallel performance; sorting by regular
                 sampling",
}

@Article{Frigioni:1998:EAD,
  author =       "Daniele Frigioni and Mario Ioffreda and Umberto Nanni
                 and Giulio Pasqualone",
  title =        "Experimental analysis of dynamic algorithms for the
                 single",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "5:1--5:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/297096.297147",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper we propose the first experimental study
                 of the fully dynamic single-source shortest-paths
                 problem on directed graphs with positive real edge
                 weights. In particular, we perform an experimental
                 analysis of three different algorithms: Dijkstra's
                 algorithm, and the two output bounded algorithms
                 proposed by Ramalingam and Reps in [30] and by
                 Frigioni, Marchetti-Spaccamela and Nanni in [18],
                 respectively. The main goal of this paper is to provide
                 a first experimental evidence for: (a) the
                 effectiveness of dynamic algorithms for shortest paths
                 with respect to a traditional static approach to this
                 problem; (b) the validity of the theoretical model of
                 output boundedness to analyze dynamic graph algorithms.
                 Beside random generated graphs, useful to capture the
                 'asymptotic' behavior of the algorithms, we also
                 developed experiments by considering a widely used
                 graph from the real world, i.e., the Internet graph.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dynamic algorithms; experimental analysis of
                 algorithms; shortest paths",
}

@Article{Magun:1998:GMA,
  author =       "Jakob Magun",
  title =        "Greeding matching algorithms, an experimental study",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "6:1--6:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/297096.297131",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We conduct an experimental study of several greedy
                 algorithms for finding large matchings in graphs.
                 Further we propose a new graph reduction, called
                 $k$-Block Reduction, and present two novel algorithms
                 using extra heuristics in the matching step and
                 $k$-Block Reduction for $k = 3$. Greedy matching
                 algorithms can be used for finding a good approximation
                 of the maximum matching in a graph $G$ if no exact
                 solution is required, or as a fast preprocessing step
                 to some other matching algorithm. The studied greedy
                 algorithms run in $O(m)$. They are easy to implement
                 and their correctness and their running time are simple
                 to prove. Our experiments show that a good greedy
                 algorithm looses on average at most one edge on random
                 graphs from $G(n,p)$ with up to 10,000 vertices.
                 Furthermore the experiments show for which edge
                 densities in random graphs the maximum matching problem
                 is difficult to solve.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Andersson:1998:IR,
  author =       "Arne Andersson and Stefan Nilsson",
  title =        "Implementing radixsort",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "7:1--7:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/297096.297136",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present and evaluate several optimization and
                 implementation techniques for string sorting. In
                 particular, we study a recently published radix sorting
                 algorithm, Forward radixsort, that has a provably good
                 worst-case behavior. Our experimental results indicate
                 that radix sorting is considerably faster (often more
                 than twice as fast) than comparison-based sorting
                 methods. This is true even for small input sequences.
                 We also show that it is possible to implement a
                 radixsort with good worst-case running time without
                 sacrificing average-case performance. Our
                 implementations are competitive with the best
                 previously published string sorting programs.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "adaptive radixsort; algorithms; forward radixsort;
                 radix sorting; sorting; string sorting",
}

@Article{Cherkassky:1998:APC,
  author =       "Boris V. Cherkassky and Andrew V. Goldberg and Paul
                 Martin and Joao C. Setubal and Jorge Stolfi",
  title =        "Augment or push: a computational study of bipartite
                 matching and unit-capacity flow algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "8:1--8:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/297096.297140",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We conduct a computational study of unit capacity flow
                 and bipartite matching algorithms. Our goal is to
                 determine which variant of the push-relabel method is
                 most efficient in practice and to compare push-relabel
                 algorithms with augmenting path algorithms. We have
                 implemented and compared three push-relabel algorithms,
                 three augmenting-path algorithms (one of which is new),
                 and one augment-relabel algorithm. The depth-first
                 search augmenting path algorithm was thought to be a
                 good choice for the bipartite matching problem, but our
                 study shows that it is not robust (meaning that it is
                 not consistently fast on all or most inputs). For the
                 problems we study, our implementations of the FIFO and
                 lowest-level selection push-relabel algorithms have the
                 most robust asymptotic rate of growth and work best
                 overall. Augmenting path algorithms, although not as
                 robust, on some problem classes are faster by a
                 moderate constant factor. Our study includes several
                 new problem families and input graphs with as many as
                 $5 \times 105$ vertices.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Radzik:1998:IDT,
  author =       "Tomasz Radzik",
  title =        "Implementation of dynamic trees with in-subtree
                 operations",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "3",
  pages =        "9:1--9:??",
  month =        "????",
  year =         "1998",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/297096.297144",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We describe an implementation of dynamic trees with
                 'in-subtree' operations. Our implementation follows
                 Sleator and Tarjan's framework of dynamic-tree
                 implementations based on splay trees. We consider the
                 following two examples of 'in-subtree' operations. (a)
                 For a given node v, find a node with the minimum key in
                 the subtree rooted at v. (b) For a given node v, find a
                 random node with key X in the subtree rooted at v
                 (value X is fixed throughout the whole computation).
                 The first operation may provide support for edge
                 deletions in the dynamic minimum spanning tree problem.
                 The second one may be useful in local search methods
                 for degree-constrained minimum spanning tree problems.
                 We conducted experiments with our dynamic-tree
                 implementation within these two contexts, and the
                 results suggest that this implementation may lead to
                 considerably faster codes than straightforward
                 approaches do.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; design; dynamic minimum spanning tree;
                 dynamic trees; experimentation; performance; splay
                 trees",
}

@Article{Burke:1999:MAS,
  author =       "E. K. Burke and A. J. Smith",
  title =        "A memetic algorithm to schedule planned maintenance
                 for the national grid",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "1:1--1:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/347792.347801",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The combination of local search operators, problem
                 specific information and a genetic algorithm has
                 provided very good results in certain scheduling
                 problems, particularly in timetabling and maintenance
                 scheduling problems. The resulting algorithm from this
                 hybrid approach has been termed a Memetic Algorithm.
                 This paper investigates the use of such an algorithm
                 for the scheduling of transmission line maintenance for
                 a known problem that has been addressed in the
                 literature using a combination of a genetic algorithm
                 and greedy optimisers. This problem is concerned with
                 the scheduling of maintenance for an electricity
                 transmission network where every transmission line must
                 be maintained once within a specified time period. The
                 objective is to avoid situations where sections of the
                 network are disconnected, and to minimise the
                 overloading of lines which are in service. In this
                 paper we look at scheduling maintenance for the South
                 Wales region of the national transmission network. We
                 present and discuss, in some detail, a memetic
                 algorithm that incorporates local search operators
                 including tabu search and simulated annealing. A
                 comparison is made both with the results from previous
                 work, and against a selection of optimising techniques.
                 The approach presented in this paper shows a
                 significant improvement over previously published
                 results on previously tackled problems. We also present
                 results on another problem which has not been tackled
                 in the literature but which is closer to the real world
                 maintenance scheduling problems faced by such companies
                 as The National Grid Company plc using the South Wales
                 region.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "heuristics; hill climbing; maintenance scheduling;
                 memetic algorithms; simulated annealing; tabu search",
}

@Article{Kim:1999:NSP,
  author =       "Sun Kim",
  title =        "A new string-pattern matching algorithm using
                 partitioning and hashing efficiently",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "2:1--2:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/347792.347803",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper, we present a new string-pattern
                 matching algorithm that partitions the text into
                 segments of the input pattern length and searches for
                 pattern occurrences using a simple hashing scheme.
                 Unlike the well known Boyer--Moore style algorithm, our
                 algorithm does not compute variable shift length, thus
                 providing a conceptually simpler way to search for
                 patterns. Empirical evaluation shows that our algorithm
                 runs significantly faster than Sunday's and Horspool's
                 extensions of the Boyer--Moore algorithm. The notion of
                 the non-occurrence heuristic used in our algorithm,
                 together with a text partitioning scheme, leads to a
                 simplified scheme for searching for pattern
                 occurrences, thus yielding better run time
                 performance.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Eiron:1999:MMC,
  author =       "N. Eiron and M. Rodeh and I. Steinwarts",
  title =        "Matrix multiplication: a case study of enhanced data
                 cache utilization",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "3:1--3:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/347792.347806",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Modern machines present two challenges to algorithm
                 engineers and compiler writers: They have superscalar,
                 super-pipelined structure, and they have elaborate
                 memory subsystems specifically designed to reduce
                 latency and increase bandwidth. Matrix multiplication
                 is a classical benchmark for experimenting with
                 techniques used to exploit machine architecture and to
                 overcome the limitations of contemporary memory
                 subsystems. This research aims at advancing the state
                 of the art of algorithm engineering by balancing
                 instruction level parallelism, two levels of data
                 tiling, copying to provably avoid any cache conflicts,
                 and prefetching in parallel to computational
                 operations, in order to fully exploit the memory
                 bandwidth. Measurements on IBM's RS/6000 43P
                 workstation show that the resultant matrix
                 multiplication algorithm outperforms IBM's ESSL by
                 6.8-31.8\%, is less sensitive to the size of the input
                 data, and scales better. In this paper we introduce a
                 cache aware algorithm for matrix multiplication. We
                 also suggest generic guidelines that may be applied to
                 compute intensive algorithm to efficiently utilize the
                 data cache. We believe that some of our concepts may be
                 embodied in compilers.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; BLAS; blocking; cache; matrix
                 multiplication; performance; prefetching",
}

@Article{Erlebach:1999:EIO,
  author =       "T. Erlebach and K. Jansen",
  title =        "Efficient implementation of an optimal greedy
                 algorithm for wavelength assignment in directed tree
                 networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "4:1--4:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/347792.347808",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In all-optical networks with wavelength-division
                 multiplexing several connections can share a physical
                 link if the signals are transmitted on different
                 wavelengths. As the number of available wavelengths is
                 limited in practice, it is important to find wavelength
                 assignments minimizing the number of different
                 wavelengths used. This path coloring problem is
                 NP-hard, and the best known polynomial-time
                 approximation algorithm for directed tree networks
                 achieves approximation ratio $5 / 3$, which is optimal
                 in the class of greedy algorithms for this problem. It
                 is shown how the algorithm can be modified in order to
                 improve its running-time to $O({\rm Tec}(N,L))$ for
                 sets of paths with maximum load $L$ in trees with $N$
                 nodes, where ${\rm Tec}(n, k)$ is the time for
                 edge-coloring a $k$-regular bipartite graph with n
                 nodes. An implementation of this efficient version of
                 the algorithm in C++ using the LEDA class library is
                 described, and experimental results regarding the
                 running-times and the number of wavelengths used are
                 reported. An additional heuristic that reduces the
                 number of wavelengths used in the average case while
                 maintaining the worst-case bound of $5 L / 3$ is
                 described.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; bipartite edge coloring; directed tree
                 networks; experimentation; path coloring",
}

@Article{Huson:1999:HTR,
  author =       "D. Huson and S. Nettles and K. Rice and T. Warnow and
                 S. Yooseph",
  title =        "Hybrid tree reconstruction methods",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "5:1--5:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/347792.347812",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A major computational problem in biology is the
                 reconstruction of evolutionary trees for species sets,
                 and accuracy is measured by comparing the topologies of
                 the reconstructed tree and the model tree. One of the
                 major debates in the field is whether large
                 evolutionary trees can be even approximately accurately
                 reconstructed from biomolecular sequences of
                 realistically bounded lengths (up to about 2000
                 nucleotides) using standard techniques (polynomial-time
                 distance methods, and heuristics for NP-hard
                 optimization problems). Using both analytical and
                 experimental techniques, we show that on large trees,
                 the two most popular methods in systematic biology,
                 Neighbor-Joining and Maximum Parsimony heuristics, as
                 well as two promising methods introduced by theoretical
                 computer scientists, are all likely to have significant
                 errors in the topology reconstruction of the model
                 tree. We also present a new general technique for
                 combining outputs of different methods (thus producing
                 hybrid methods), and show experimentally how one such
                 hybrid method has better performance than its
                 constituent parts.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Jacob:1999:CSR,
  author =       "R. Jacob and M. Marathe and K. Nagel",
  title =        "A computational study of routing algorithms for
                 realistic transportation networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "6:1--6:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/347792.347814",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We carry out an experimental analysis of a number of
                 shortest-path (routing) algorithms investigated in the
                 context of the TRANSIMS (TRansportation ANalysis and
                 SIMulation System) project. The main focus of the paper
                 is to study how various heuristic as well as exact
                 solutions and associated data structures affect the
                 computational performance of the software developed for
                 realistic transportation networks. For this purpose we
                 have used a road network representing, with high degree
                 of resolution, the Dallas Fort-Worth urban area. We
                 discuss and experimentally analyze various one-to-one
                 shortest-path algorithms. These include classical exact
                 algorithms studied in the literature as well as
                 heuristic solutions that are designed to take into
                 account the geometric structure of the input instances.
                 Computational results are provided to compare
                 empirically the efficiency of various algorithms. Our
                 studies indicate that a modified Dijkstra's algorithm
                 is computationally fast and an excellent candidate for
                 use in various transportation planning applications as
                 well as ITS related technologies.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "design and analysis of algorithms; experimental
                 analysis; network design; shortest-paths algorithms;
                 transportation planning",
}

@Article{Muller-Hannemann:1999:IWM,
  author =       "M. M{\"u}ller-Hannemann and A. Schwartz",
  title =        "Implementing weighted $b$-matching algorithms: towards
                 a flexible software design",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "7:1--7:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/347792.347815",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present a case study on the design of an
                 implementation of a fundamental combinatorial
                 optimization problem: weighted b-matching. Although
                 this problem is well-understood in theory and efficient
                 algorithms are known, only little experience with
                 implementations is available. This study was motivated
                 by the practical need for an efficient b-matching
                 solver as a subroutine in our approach to a mesh
                 refinement problem in computer-aided design (CAD).The
                 intent of this paper is to demonstrate the importance
                 of flexibility and adaptability in the design of
                 complex algorithms, but also to discuss how such goals
                 can be achieved for matching algorithms by the use of
                 design patterns. Starting from the basis of the famous
                 blossom algorithm we explain how to exploit in
                 different ways the flexibility of our software design
                 which allows an incremental improvement of efficiency
                 by exchanging subalgorithms and data structures. In a
                 comparison with a code by Miller and Pekny we also
                 demonstrate that our implementation is even without
                 fine-tuning very competitive. Our code is significantly
                 faster, with improvement factors ranging between 15 and
                 466 on TSPLIB instances.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; b-matching; blossom algorithm; design
                 patterns; experimentation; object-oriented design;
                 software design",
}

@Article{Schwerdt:1999:CWT,
  author =       "J. Schwerdt and M. Smid and J. Majhi and R. Janardan",
  title =        "Computing the width of a three-dimensional point set:
                 an experimental study",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "4",
  pages =        "8:1--8:??",
  month =        "????",
  year =         "1999",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/347792.347816",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:02:52 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We describe a robust, exact, and efficient
                 implementation of an algorithm that computes the width
                 of a three-dimensional point set. The algorithm is
                 based on efficient solutions to problems that are at
                 the heart of computational geometry: three-dimensional
                 convex hulls, point location in planar graphs, and
                 computing intersections between line segments. The
                 latter two problems have to be solved for planar graphs
                 and segments on the unit sphere, rather than in the
                 two-dimensional plane. The implementation is based on
                 LEDA, and the geometric objects are represented using
                 exact rational arithmetic.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "computational geometry; implementation; layered
                 manufacturing; spherical geometry",
}

@Article{Eppstein:2000:FHC,
  author =       "David Eppstein",
  title =        "Fast hierarchical clustering and other applications of
                 dynamic closest pairs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "1:1--1:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.351829",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We develop data structures for dynamic closest pair
                 problems with arbitrary distance functions, that do not
                 necessarily come from any geometric structure on the
                 objects. Based on a technique previously used by the
                 author for Euclidean closest pairs, we show how to
                 insert and delete objects from an n-object set,
                 maintaining the closest pair, in $O(n \log^2 n)$ time
                 per update and $O(n)$ space. With quadratic space, we
                 can instead use a quadtree-like structure to achieve an
                 optimal time bound, $O(n)$ per update. We apply these
                 data structures to hierarchical clustering, greedy
                 matching, and TSP heuristics, and discuss other
                 potential applications in machine learning, Gr{\"o}bner
                 bases, and local improvement algorithms for partition
                 and placement problems. Experiments show our new
                 methods to be faster in practice than previously used
                 heuristics.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "conga line data structure; matching; nearest-neighbor
                 heuristic; quadtree; TSP",
}

@Article{Chong:2000:CBD,
  author =       "Kyn-Rak Chong and Sartaj Sahni",
  title =        "Correspondence-based data structures for double-ended
                 priority queues",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "2:1--2:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.351828",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We describe three general methods--total, dual, and
                 leaf correspondence--that may be used to derive
                 efficient double-ended priority queues from
                 single-ended priority queues. These methods are
                 illustrated by developing double-ended priority queues
                 based on the classical heap. Experimental results
                 indicate that the leaf-correspondence method generally
                 leads to a faster double-ended priority queue than
                 either total or dual correspondence. On randomly
                 generated test sets, however, the splay tree
                 outperforms the tested correspondence-based
                 double-ended priority queues.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "correspondence-based data structures; double-ended
                 priority queues; heaps; leftist trees; runtime
                 efficiency; splay trees",
}

@Article{Xiao:2000:IMP,
  author =       "Li Xiao and Xiaodong Zhang and Stefan A. Kubricht",
  title =        "Improving memory performance of sorting algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "3:1--3:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.384245",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Memory hierarchy considerations during sorting
                 algorithm design and implementation play an important
                 role in significantly improving execution performance.
                 Existing algorithms mainly attempt to reduce capacity
                 misses on direct-mapped caches. To reduce other types
                 of cache misses that occur in the more common
                 set-associative caches and the TLB, we restructure the
                 mergesort and quicksort algorithms further by
                 integrating tiling, padding, and buffering techniques
                 and by repartitioning the data set. Our study shows
                 that substantial performance improvements can be
                 obtained using our new methods.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "caches; memory performance; mergesort; quicksort;
                 TLB",
}

@Article{Navarro:2000:FFS,
  author =       "Gonzalo Navarro and Mathieu Raffinot",
  title =        "Fast and flexible string matching by combining
                 bit-parallelism and suffix automata",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "4:1--4:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.384246",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The most important features of a string matching
                 algorithm are its efficiency and its flexibility.
                 Efficiency has traditionally received more attention,
                 while flexibility in the search pattern is becoming a
                 more and more important issue. Most classical string
                 matching algorithms are aimed at quickly finding an
                 exact pattern in a text, being Knuth--Morris--Pratt
                 (KMP) and the Boyer--Moore (BM) family the most famous
                 ones. A recent development uses deterministic 'suffix
                 automata' to design new optimal string matching
                 algorithms, e.g. BDM and TurboBDM. Flexibility has been
                 addressed quite separately by the use of
                 'bit-parallelism', which simulates automata in their
                 nondeterministic form by using bits and exploiting the
                 intrinsic parallelism inside the computer word, e.g.
                 the Shift-Or algorithm. Those algorithms are extended
                 to handle classes of characters and errors in the
                 pattern and/or in the text, their drawback being their
                 inability to skip text characters. In this paper we
                 merge bit-parallelism and suffix automata, so that a
                 nondeterministic suffix automaton is simulated using
                 bit-parallelism. The resulting algorithm, called BNDM,
                 obtains the best from both worlds. It is much simpler
                 to implement than BDM and nearly as simple as Shift-Or.
                 It inherits from Shift-Or the ability to handle
                 flexible patterns and from BDM the ability to skip
                 characters. BNDM is 30\%-40\% faster than BDM and up to
                 7 times faster than Shift-Or. When compared to the
                 fastest existing algorithms on exact patterns (which
                 belong to the BM family), BNDM is from 20\% slower to 3
                 times faster, depending on the alphabet size. With
                 respect to flexible pattern searching, BNDM is by far
                 the fastest technique to deal with classes of
                 characters and is competitive to search allowing
                 errors. In particular, BNDM seems very adequate for
                 computational biology applications, since it is the
                 fastest algorithm to search on DNA sequences and
                 flexible searching is an important problem in that
                 area. As a theoretical development related to flexible
                 pattern matching, we introduce a new automaton to
                 recognize suffixes of patterns with classes of
                 characters. To the best of our knowledge, this
                 automaton has not been studied before.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Caldwell:2000:DIM,
  author =       "Andrew E. Caldwell and Andrew B. Kahng and Igor L.
                 Markov",
  title =        "Design and implementation of move-based heuristics for
                 {VLSI} hypergraph partitioning",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "5:1--5:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.384247",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We summarize the techniques of implementing move-based
                 hypergraph partitioning heuristics and evaluating their
                 performance in the context of VLSI design applications.
                 Our first contribution is a detailed software
                 architecture, consisting of seven reusable components,
                 that allows flexible, efficient and accurate assessment
                 of the practical implications of new move-based
                 algorithms and partitioning formulations. Our second
                 contribution is an assessment of the modern context for
                 hypergraph partitioning research for VLSI design
                 applications. In particular, we discuss the current
                 level of sophistication in implementation know-how and
                 experimental evaluation, and we note how requirements
                 for real-world partitioners --- if used as motivation
                 for research --- should affect the evaluation of
                 prospective contributions. Two 'implicit decisions' in
                 the implementation of the Fiduccia-Mattheyses heuristic
                 are used to illustrate the difficulty of achieving
                 meaningful experimental evaluation of new algorithmic
                 ideas.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; balanced min-cut hypergraph partitioning;
                 experimentation; measurement; performance; VLSI CAD",
}

@Article{Levine:2000:FRC,
  author =       "Matthew S. Levine",
  title =        "Finding the right cutting planes for the {TSP}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "6:1--6:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.384248",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Given an instance of the Traveling Salesman Problem
                 (TSP), a reasonable way to get a lower bound on the
                 optimal answer is to solve a linear programming
                 relaxation of an integer programming formulation of the
                 problem. These linear programs typically have an
                 exponential number of constraints, but in theory they
                 can be solved efficiently with the ellipsoid method as
                 long as we have an algorithm that can take a solution
                 and either declare it feasible or find a violated
                 constraint. In practice, it is often the case that many
                 constraints are violated, which raises the question of
                 how to choose among them so as to improve performance.
                 For the simplest TSP formulation it is possible to
                 efficiently find all the violated constraints, which
                 gives us a good chance to try to answer this question
                 empirically. Looking at random two dimensional
                 Euclidean instances and the large instances from
                 TSPLIB, we ran experiments to evaluate several
                 strategies for picking among the violated constraints.
                 We found some information about which constraints to
                 prefer, which resulted in modest gains, but were unable
                 to get large improvements in performance.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; combinatorial optimization; cutting plane;
                 experimentation; minimum cut; performance; traveling
                 salesman problem",
}

@Article{Sanders:2000:FPQ,
  author =       "Peter Sanders",
  title =        "Fast priority queues for cached memory",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "7:1--7:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.384249",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The cache hierarchy prevalent in todays high
                 performance processors has to be taken into account in
                 order to design algorithms that perform well in
                 practice. This paper advocates the adaption of external
                 memory algorithms to this purpose. This idea and the
                 practical issues involved are exemplified by
                 engineering a fast priority queue suited to external
                 memory and cached memory that is based on k-way
                 merging. It improves previous external memory
                 algorithms by constant factors crucial for transferring
                 it to cached memory. Running in the cache hierarchy of
                 a workstation the algorithm is at least two times
                 faster than an optimized implementation of binary heaps
                 and 4-ary heaps for large inputs.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "cache; cache efficiency; data structure; external
                 memory; heap; implementation; multi way merging;
                 priority queue; secondary storage",
}

@Article{Muller-Hannemann:2000:IWM,
  author =       "Matthias M{\"u}ller-Hannemann and Alexander Schwartz",
  title =        "Implementing weighted $b$-matching algorithms:
                 insights from a computational study",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "8:1--8:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.384250",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present an experimental study of an implementation
                 of weighted perfect b-matching based on the primal-dual
                 blossom algorithm. Although this problem is
                 well-understood in theory and efficient algorithms are
                 known, only little experience with implementations is
                 available. In this paper several algorithmic variants
                 are compared on synthetic and application problem data
                 of very sparse graphs. This study was motivated by the
                 practical need for an efficient b-matching solver for
                 the latter application, namely as a subroutine in our
                 approach to a mesh refinement problem in computer-aided
                 design (CAD).Linear regression and operation counting
                 is used to analyze code variants. The experiments
                 confirm that a fractional jump-start speeds up the
                 algorithm, they indicate that a variant based on
                 pairing heaps is slightly superior to a k-heap variant,
                 and that scaling of large b-values is not necessary,
                 whereas a delayed blossom shrinking heuristic
                 significantly improves running times only for graphs
                 with average degree two. The fastest variant of our
                 implementation appears to be highly superior to a code
                 by Miller and Pekny (1995).",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "b-matching; blossom algorithm; operation counting",
}

@Article{Shibuya:2000:CSP,
  author =       "Tetsuo Shibuya",
  title =        "Computing the $n \times m$ shortest path efficiently",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "9:1--9:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.384251",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Computation of all the shortest paths between multiple
                 sources and multiple destinations on various networks
                 is required in many problems, such as the traveling
                 salesperson problem (TSP) and the vehicle routing
                 problem (VRP). This paper proposes new algorithms that
                 compute the set of shortest paths efficiently by using
                 the A* algorithm. The efficiency and properties of
                 these algorithms are examined by using the results of
                 experiments on an actual road network.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "*{$<$} m shortest paths; algorithm; algorithms; A{$<$}
                 experimentation; GIS; n \times /sup{$>$} sup&gt",
}

@Article{Vishkin:2000:ELR,
  author =       "Dascal Vishkin and Uzi Vishkin",
  title =        "Experiments with list ranking for explicit
                 multi-threaded {(XMT)} instruction parallelism",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "10:1--10:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.384252",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Algorithms for the problem of list ranking are
                 empirically studied with respect to the Explicit
                 Multi-Threaded (XMT) platform for instruction-level
                 parallelism (ILP). The main goal of this study is to
                 understand the differences between XMT and more
                 traditional parallel computing implementation
                 platforms/models as they pertain to the well studied
                 list ranking problem. The main two findings are: (i)
                 good speedups for much smaller inputs are possible and
                 (ii) in part, the first finding is based on a new
                 variant of a 1984 algorithm, called the No-Cut
                 algorithm. The paper incorporates analytic
                 (non-asymptotic) performance analysis into experimental
                 performance analysis for relatively small inputs. This
                 provides an interesting example where experimental
                 research and theoretical analysis complement one
                 another. Explicit Multi-Threading (XMT) is a
                 fine-grained computation framework introduced in our
                 SPAA'98 paper. Building on some key ideas of parallel
                 computing, XMT covers the spectrum from algorithms
                 through architecture to implementation; the main
                 implementation related innovation in XMT was through
                 the incorporation of low-overhead hardware and software
                 mechanisms (for more effective fine-grained
                 parallelism). The reader is referred to that paper for
                 detail on these mechanisms. The XMT platform aims at
                 faster single-task completion time by way of ILP.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Werneck:2000:FMC,
  author =       "Renato Werneck and Jo{\~a}o Setubal and Arlindo da
                 Conceic{\~a}o",
  title =        "Finding minimum congestion spanning trees",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "11:1--11:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.384253",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Given a weighted graph $G = (V, E)$, a positive
                 integer $k$, and a penalty function $w_p$, we want to
                 find $k$ spanning trees on $G$, not necessarily
                 disjoint, of minimum total weight, such that the weight
                 of each edge is subject to a penalty given by $w_p$ if
                 it belongs to more than one tree. The objective
                 function to be minimized is $\sum_{e \in E} W_e(i_e)$,
                 where $i_e$ is the number of times edge $e$ appears in
                 the solution and $W_e(i_e) = i_e w_p(e, i_e)$ is the
                 aggregate cost of using edge $e$ $i_e$ times. For the
                 case when $W_e$ is weakly convex, which should have
                 wide application in congestion problems, we present a
                 polynomial time algorithm; the algorithm's complexity
                 is quadratic in $k$. We also present two heuristics
                 with complexity linear in $k$. In an experimental study
                 we show that these heuristics are much faster than the
                 exact algorithm also in practice. These experiments
                 present a diverse combination of input families (four),
                 varying $k$ (up to 1000), and penalty functions (two).
                 In most inputs tested the solutions given by the
                 heuristics were within 1\% of optimal or much better,
                 especially for large $k$. The worst quality observed
                 was 3.2\% of optimal.",
  acknowledgement = ack-nhfb,
  articleno =    "11",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Schulz:2000:DAL,
  author =       "Frank Schulz and Dorothea Wagner and Karsten Weihe",
  title =        "{Dijkstra}'s algorithm on-line: an empirical case
                 study from public railroad transport",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "12:1--12:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.384254",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Traffic information systems are among the most
                 prominent real-world applications of Dijkstra's
                 algorithm for shortest paths. We consider the scenario
                 of a central information server in the realm of public
                 railroad transport on wide-area networks. Such a system
                 has to process a large number of on-line queries for
                 optimal travel connections in real time. In practice,
                 this problem is usually solved by heuristic variations
                 of Dijkstra's algorithm, which do not guarantee an
                 optimal result. We report results from a pilot study,
                 in which we focused on the travel time as the only
                 optimization criterion. In this study, various speed-up
                 techniques for Dijkstra's algorithm were analysed
                 empirically. This analysis was based on the timetable
                 data of all German trains and on a 'snapshot' of half a
                 million customer queries.",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Flato:2000:DIP,
  author =       "Eyal Flato and Dan Halperin and Iddo Hanniel and Oren
                 Nechushtan and Eti Ezra",
  title =        "The design and implementation of planar maps in
                 {CGAL}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "13:1--13:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.384255",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Planar maps are fundamental structures in
                 computational geometry. They are used to represent the
                 subdivision of the plane into regions and have numerous
                 applications. We describe the planar map package of
                 CGAL--a Computational Geometry Algorithms Library. We
                 discuss its modular design and implementation. In
                 particular we introduce the two main classes of the
                 design--planar maps and topological maps that enable
                 the convenient separation between geometry and
                 topology. The modular design is implemented using a
                 generic programming approach. By switching a template
                 parameter--the geometric traits class, one can use the
                 same code for planar maps of different objects such as
                 line segments or circular arcs. More flexibility is
                 achieved by choosing a point location algorithm out of
                 three implemented algorithms or plugging in an
                 algorithm implemented by the user. The user of the
                 planar maps package can benefit both from its
                 flexibility and robustness. We present several examples
                 of geometric traits classes and point location
                 algorithms which demonstrate the possibility to adapt
                 the general package to specific needs.",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Rahman:2000:ACE,
  author =       "Naila Rahman and Rajeev Raman",
  title =        "Analysing cache effects in distribution sorting",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "14:1--14:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.384256",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study cache effects in distribution sorting
                 algorithms for sorting keys drawn independently at
                 random from a uniform distribution (`uniform keys'). We
                 note that the performance of a recently-published
                 distribution sorting algorithm, Flashsort1, which sorts
                 $n$ uniform floating-point keys in $O(n)$ expected
                 time, does not scale well with the input size due to
                 poor cache utilisation. We present an approximate
                 analysis for distribution sorting uniform keys which,
                 as validated by simulation results, predicts the
                 expected cache misses of Flashsort1 quite well. Using
                 this analysis, we design a multiple-pass variant of
                 Flashsort1 which outperforms Flashsort1 and
                 comparison-based algorithms on uniform floating-point
                 keys for moderate to large values of $n$. Using
                 experimental results we also show that the integer
                 distribution sorting algorithm MSB radix sort performs
                 well on both uniform integer and uniform floating-point
                 keys.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "cache; efficient sorting algorithms; external-memory
                 algorithms; memory hierarchy",
}

@Article{Bojesen:2000:PEC,
  author =       "Jesper Bojesen and Jyrki Katajainen and Maz Spork",
  title =        "Performance engineering case study: heap
                 construction",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "15:1--15:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.384257",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The behaviour of three methods for constructing a
                 binary heap on a computer with a hierarchical memory is
                 studied. The methods considered are the original one
                 proposed by Williams [1964], in which elements are
                 repeatedly inserted into a single heap; the improvement
                 by Floyd [1964], in which small heaps are repeatedly
                 merged to bigger heaps; and a recent method proposed,
                 e.g., by Fadel et al. [1999] in which a heap is built
                 layerwise. Both the worst-case number of instructions
                 and that of cache misses are analysed. It is well-known
                 that Floyd's method has the best instruction count. Let
                 N denote the size of the heap to be constructed, B the
                 number of elements that fit into a cache line, and let
                 c and d be some positive constants. Our analysis shows
                 that, under reasonable assumptions, repeated insertion
                 and layerwise construction both incur at most cN/B
                 cache misses, whereas repeated merging, as programmed
                 by Floyd, can incur more than (dN log2 B)/B cache
                 misses. However, for our memory-tuned versions of
                 repeated insertion and repeated merging the number of
                 cache misses incurred is close to the optimal bound
                 N/B. In addition to these theoretical findings, we
                 communicate many practical experiences which we hope to
                 be valuable for others doing experimental algorithmic
                 work.",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; binary heaps; code tuning;
                 experimentation; memory tuning; performance; theory",
}

@Article{Boghossian:2000:RSP,
  author =       "N. P. Boghossian and O. Kohlbacher and H. P. Lenhof",
  title =        "Rapid software prototyping in molecular modeling using
                 the biochemical algorithms library {(BALL)}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "16:1--16:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.384258",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In the next century, virtual laboratories will play a
                 key role in biotechnology. Computer experiments will
                 not only replace some of the time-consuming and
                 expensive real-world experiments, but they will also
                 provide insights that cannot be obtained using 'wet'
                 experiments. The field that deals with the modeling of
                 atoms, molecules, and their reactions is called
                 Molecular Modeling. The advent of Life Sciences gave
                 rise to numerous new developments in this area.
                 However, the implementation of new simulation tools is
                 extremely time-consuming. This is mainly due to the
                 large amount of supporting code that is required in
                 addition to the code necessary to implement the new
                 idea. The only way to reduce the development time is to
                 reuse reliable code, preferably using object-oriented
                 approaches. We have designed and implemented BALL, the
                 first object-oriented application framework for rapid
                 prototyping in Molecular Modeling. By the use of the
                 composite design pattern and polymorphism we were able
                 to model the multitude of complex biochemical concepts
                 in a well-structured and comprehensible class
                 hierarchy, the BALL kernel classes. The isomorphism
                 between the biochemical structures and the kernel
                 classes leads to an intuitive interface. Since BALL was
                 designed for rapid software prototyping, ease of use,
                 extensibility, and robustness were our principal design
                 goals. Besides the kernel classes, BALL provides
                 fundamental components for import/export of data in
                 various file formats, Molecular Mechanics simulations,
                 three-dimensional visualization, and more complex ones
                 like a numerical solver for the Poisson--Boltzmann
                 equation.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "libraries; molecular modeling; rapid software
                 prototyping",
}

@Article{Brengel:2000:ESP,
  author =       "Klaus Brengel and Andreas Crauser and Paolo Ferragina
                 and Ulrich Meyer",
  title =        "An experimental study of priority queues in external
                 memory",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "5",
  pages =        "17:1--17:??",
  month =        "????",
  year =         "2000",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/351827.384259",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:09 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper we compare the performance of eight
                 different priority queue implementations: four of them
                 are explicitly designed to work in an external-memory
                 setting, the others are standard internal-memory queues
                 available in the LEDA library [Mehlhorn and N{\"a}her
                 1999]. Two of the external-memory priority queues are
                 obtained by engineering known internal-memory priority
                 queues with the aim of achieving effective performance
                 on external storage devices (i.e., Radix heaps [Ahuja
                 et al. 1990] and array heaps [Thorup 1996]). Our
                 experimental framework includes some simple tests, like
                 random sequences of insert or delete-minimum
                 operations, as well as more advanced tests consisting
                 of intermixed sequences of update operations and
                 'application driven' update sequences originated by
                 simulations of Dijkstra's algorithm on large graph
                 instances. Our variegate spectrum of experimental
                 results gives a good picture of the features of these
                 priority queues, thus being helpful to anyone
                 interested in the use of such data structures on very
                 large data sets.",
  acknowledgement = ack-nhfb,
  articleno =    "17",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Arge:2001:EAP,
  author =       "Lars Arge and Laura Toma and Jeffrey Scott Vitter",
  title =        "{I/O}-Efficient Algorithms for Problems on Grid-Based
                 Terrains",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "1:1--1:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/945394.945395",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The potential and use of Geographic Information
                 Systems is rapidly increasing due to the increasing
                 availability of massive amounts of geospatial data from
                 projects like NASA's Mission to Planet Earth. However,
                 the use of these massive datasets also exposes
                 scalability problems with existing GIS algorithms.
                 These scalability problems are mainly due to the fact
                 that most GIS algorithms have been designed to minimize
                 internal computation time, while I/O communication
                 often is the bottleneck when processing massive amounts
                 of data. In this paper, we consider I/O-efficient
                 algorithms for problems on grid-based terrains.
                 Detailed grid-based terrain data is rapidly becoming
                 available for much of the Earth's surface. We describe
                 [EQUATION] I/O algorithms for several problems on
                 [EQUATION] grids for which only $O(N)$ algorithms were
                 previously known. Here $M$ denotes the size of the main
                 memory and $B$ the size of a disk block. We demonstrate
                 the practical merits of our work by comparing the
                 empirical performance of our new algorithm for the {\em
                 flow accumulation\/} problem with that of the
                 previously best known algorithm. Flow accumulation,
                 which models flow of water through a terrain, is one of
                 the most basic hydrologic attributes of a terrain. We
                 present the results of an extensive set of experiments
                 on real-life terrain datasets of different sizes and
                 characteristics. Our experiments show that while our
                 new algorithm scales nicely with dataset size, the
                 previously known algorithm 'breaks down' once the size
                 of the dataset becomes bigger than the available main
                 memory. For example, while our algorithm computes the
                 flow accumulation for the Appalachian Mountains in
                 about three hours, the previously known algorithm takes
                 several weeks.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Demestrescu:2001:BCM,
  author =       "Camil Demestrescu and Irene Finocchi",
  title =        "Breaking cycles for minimizing crossings",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "2:1--2:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/945394.945396",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the one-sided crossing minimization
                 problem (CP): given a bipartite graph $G$ and a
                 permutation $x_0$ of the vertices on a layer, find a
                 permutation $x_1$ of the vertices on the other layer
                 which minimizes the number of edge crossings in any
                 straightline drawing of $G$ where vertices are placed
                 on two parallel lines and sorted according to $x_0$ and
                 $x_1$. Solving CP represents a fundamental step in the
                 construction of aesthetically pleasing layouts of
                 hierarchies and directed graphs, but unfortunately this
                 problem has been proved to be NP-complete.\par

                 In this paper we address the strong relation between CP
                 and the problem of computing minimum feedback arc sets
                 in directed graphs and we devise a new approximation
                 algorithm for CP, called PM, that exploits this
                 dependency. We experimentally and visually compare the
                 performance of PM with the performance of well-known
                 algorithms and of recent attractive strategies.
                 Experiments are carried out on different families of
                 randomly generated graphs, on pathological instances,
                 and on real test sets. Performance indicators include
                 both number of edge crossings and running time, as well
                 as structural measures of the problem instances. We
                 found CP to be a very interesting and rich problem from
                 a combinatorial point of view. Our results clearly
                 separate the behavior of the algorithms, proving the
                 effectiveness of PM on most test sets and showing
                 tradeoffs between quality of the solutions and running
                 time. However, if the visual complexity of the drawings
                 is considered, we found no clear winner. This confirms
                 the importance of optimizing also other aesthetic
                 criteria such as symmetry, edge length, and angular
                 resolution.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "bipartite graphs; crossing minimization; experimental
                 algorithms",
}

@Article{Gabow:2001:NFB,
  author =       "Harold Gabow and Tadayoshi Kohno",
  title =        "A Network-Flow-Based Scheduler: Design, Performance
                 History, and Experimental Analysis",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "3:1--3:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/945394.945397",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We describe a program that schedules physician
                 attending teams at Denver Health Medical Center. The
                 program uses network flow techniques to prune an
                 exponentially sized search space. We describe the
                 program design, its performance history at the
                 hospital, and experiments on a simplified version of
                 the program.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "graphs; matroids; scheduling",
}

@Article{Iyer:2001:ESP,
  author =       "Raj Iyer and David Karger and Hariharan Rahul and
                 Mikkel Thorup",
  title =        "An Experimental Study of Polylogarithmic, Fully
                 Dynamic, Connectivity Algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "4:1--4:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/945394.945398",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present an experimental study of different variants
                 of the amortized $O(\log^n)$-time fully-dynamic
                 connectivity algorithm of Holm, de Lichtenberg, and
                 Thorup (STOC'98). The experiments build upon
                 experiments provided by Alberts, Cattaneo, and Italiano
                 (SODA'96) on the randomized amortized $O(\log^3 n)$
                 fully-dynamic connectivity algorithm of Henzinger and
                 King (STOC'95). Our experiments shed light upon
                 similarities and differences between the two
                 algorithms. We also present a slightly modified version
                 of the Henzinger--King algorithm that runs in $O(\log^2
                 n)$ time, which resulted from our experiments.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Liberatore:2001:CSB,
  author =       "Vincenzo Liberatore",
  title =        "Caching and Scheduling for Broadcast Disk Systems",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "5:1--5:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/945394.945399",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Unicast connections lead to performance and
                 scalability problems when a large client population
                 attempts to access the same data. Broadcast push and
                 broadcast disk technology address the problem by
                 broadcasting data items from a server to a large number
                 of clients. Broadcast disk performance depends mainly
                 on caching strategies at the client site and on how the
                 broadcast is scheduled at the server site. An on-line
                 broadcast disk paging strategy makes caching decisions
                 without knowing future page requests or access
                 probabilities. This paper gives new implementations of
                 existing on-line algorithms and reports on extensive
                 empirical investigations. The gray algorithm [Khanna
                 and Liberatore 2000] always outperformed other on-line
                 strategies on both synthetic and Web traces. Moreover,
                 caching limited the skewness of broadcast schedules,
                 and led to favor efficient caching algorithms over
                 refined scheduling strategies when the cache was
                 large.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "broadcast disk; caching; scheduling",
}

@Article{Narasimhan:2001:GMS,
  author =       "Giri Narasimhan and Martin Zachariasen",
  title =        "Geometric Minimum Spanning Trees via Well-Separated
                 Pair Decompositions",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "6:1--6:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/945394.945400",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Let $S$ be a set of $n$ points in $\Re^d$. We present
                 an algorithm that uses the well-separated pair
                 decomposition and computes the minimum spanning tree of
                 $S$ under any $L_p$ or polyhedral metric. A theoretical
                 analysis shows that it has an expected running time of
                 $O(n \log n)$ for uniform point distributions; this is
                 verified experimentally. Extensive experimental results
                 show that this approach is practical. Under a variety
                 of input distributions, the resulting implementation is
                 robust and performs well for points in higher
                 dimensional space.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Rahman:2001:ARS,
  author =       "Naila Rahman and Rajeev Raman",
  title =        "Adapting Radix Sort to the Memory Hierarchy",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "7:1--7:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/945394.945401",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We demonstrate the importance of reducing misses in
                 the translation-lookaside buffer (TLB) for obtaining
                 good performance on modern computer architectures. We
                 focus on least-significant bit first (LSB) radix sort,
                 standard implementations of which make many TLB misses.
                 We give three techniques which simultaneously reduce
                 cache and TLB misses for LSB radix sort: reducing
                 working set size, explicit block transfer and
                 pre-sorting. We note that: \item All the techniques
                 above yield algorithms whose implementations outperform
                 optimised cache-tuned implementations of LSB radix sort
                 and comparison-based sorting algorithms. The fastest
                 running times are obtained by the pre-sorting approach
                 and these are over twice as fast as optimised
                 cache-tuned implementations of LSB radix sort and
                 quicksort. Even the simplest optimisation, using the
                 TLB size to guide the choice of radix in standard
                 implementations of LSB radix sort, gives good
                 improvements over cache-tuned algorithms. \item One of
                 the pre-sorting algorithms and explicit block transfer
                 make few cache and TLB misses in the worst case. This
                 is not true of standard implementations of LSB radix
                 sort. We also apply these techniques to the problem of
                 permuting an array of integers, and obtain gains of
                 over 30\% relative to the naive algorithm by using
                 explicit block transfer.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "cache; efficient sorting algorithms; external-memory
                 algorithms; locality of reference; memory hierarchy;
                 radix sort; translation-lookaside buffer (TLB)",
}

@Article{Stallmann:2001:HES,
  author =       "Matthias Stallmann and Franc Brglez and Debabrata
                 Ghosh",
  title =        "Heuristics, Experimental Subjects, and Treatment
                 Evaluation in Bigraph Crossing Minimization",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "8:1--8:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/945394.945402",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The bigraph crossing problem, embedding the two node
                 sets of a bipartite graph along two parallel lines so
                 that edge crossings are minimized, has applications to
                 circuit layout and graph drawing. Experimental results
                 for several previously known and two new heuristics
                 suggest continued exploration of the problem,
                 particularly sparse instances. We emphasize careful
                 design of experimental subject classes and present
                 novel views of the results. All source code, data, and
                 scripts are available on-line",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "crossing number; design of experiments; graph drawing;
                 graph embedding; graph equivalence classes; layout",
}

@Article{Frigioni:2001:ESD,
  author =       "Daniele Frigioni and Tobias Miller and Christos
                 Zaroliagis",
  title =        "An Experimental Study of Dynamic Algorithms for
                 Transitive Closure",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "9:1--9:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/945394.945403",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We perform an extensive experimental study of several
                 dynamic algorithms for transitive closure. In
                 particular, we implemented algorithms given by
                 Italiano, Yellin, Cicerone et al., and two recent
                 randomized algorithms by Henzinger and King. We propose
                 a fine-tuned version of Italiano's algorithms as well
                 as a new variant of them, both of which were always
                 faster than any of the other implementations of the
                 dynamic algorithms. We also considered simple-minded
                 algorithms that were easy to implement and likely to be
                 fast in practice. Wetested and compared the above
                 implementations on random inputs, on non-random inputs
                 that are worst-case inputs for the dynamic algorithms,
                 and on an input motivated by a real-world graph.",
  acknowledgement = ack-nhfb,
  articleno =    "9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dynamic algorithm; experimentation; transitive
                 closure",
}

@Article{Matias:2001:EFP,
  author =       "Yossi Matias and Nasir Rajpoot and Cenk Sahinalp",
  title =        "The Effect of Flexible Parsing for Dynamic
                 Dictionary-Based Data Compression",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "6",
  pages =        "10:1--10:??",
  month =        "????",
  year =         "2001",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/945394.945404",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:03:55 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We report on the performance evaluation of greedy
                 parsing with a single step lookahead (which we call
                 flexible Parsing or {\em FP\/}) as an alternative to
                 the commonly used greedy parsing (with no-lookaheads)
                 scheme. Greedy parsing is the basis of most popular
                 compression programs including UNIX {\tt compress} and
                 {\tt gzip}, however it usually results in far from
                 optimal parsing\slash compression with regard to the
                 dictionary construction scheme in use. Flexible
                 parsing, however, is optimal [MS99], i.e. partitions
                 any given input to the smallest number of phrases
                 possible, for dictionary construction schemes which
                 satisfy the prefix property throughout their
                 execution.\par

                 We focus on the application of {\em FP\/} in the
                 context of the LZW variant of the Lempel--Ziv'78
                 dictionary construction method [Wel84, ZL78], which is
                 of considerable practical interest. We implement two
                 compression algorithms which use (1) {\em FP\/} with
                 LZW dictionary (LZW-{\em FP\/}), and (2) {\em FP\/}
                 with an alternative flexible dictionary (FPA as
                 introduced in [Hor95]). Our implementations are based
                 on novel on-line data structures enabling us to use
                 linear time and space. We test our implementations on a
                 collection of input sequences which includes textual
                 files, DNA sequences, medical images, and pseudorandom
                 binary files, and compare our results with two of the
                 most popular compression programs UNIX {\tt compress}
                 and {\tt gzip}. Our results demonstrate that flexible
                 parsing is especially useful for non-textual data, on
                 which it improves over the compression rates of {\tt
                 compress} and {\tt gzip} by up to 20\% and 35\%,
                 respectively.",
  acknowledgement = ack-nhfb,
  articleno =    "10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Backes:2002:HLB,
  author =       "Werner Backes and Susanne Wetzel",
  title =        "Heuristics on lattice basis reduction in practice",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "1--1",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/944618.944619",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper we provide a survey on LLL lattice basis
                 reduction in practice. We introduce several new
                 heuristics as to speed up known lattice basis reduction
                 methods and improve the quality of the computed reduced
                 lattice basis in practice. We analyze substantial
                 experimental data and to our knowledge, we are the
                 first to present general heuristics for determining
                 which variant of the reduction algorithm, for varied
                 parameter choices, yields the most efficient reduction
                 strategy for reducing a particular problem instance.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dynamic approximation; general reduction heuristics;
                 lattice basis reduction; modular and iterative
                 heuristics",
}

@Article{Iwama:2002:PLS,
  author =       "Kazuo Iwama and Daisuke Kawai and Shuichi Miyazaki and
                 Yasuo Okabe and Jun Umemoto",
  title =        "Parallelizing local search for {CNF} satisfiability
                 using vectorization and {PVM}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "2--2",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/944618.944620",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The purpose of this paper is to speed up the local
                 search algorithm for the CNF Satisfiability problem.
                 Our basic strategy is to run some 10$^5$ independent
                 search paths simultaneously using PVM on a vector
                 supercomputer VPP800, which consists of 40 vector
                 processors. Using the above parallelization and
                 vectorization together with some improvement of data
                 structure, we obtained 600-times speedup in terms of
                 the number of flips the local search can make per
                 second, compared to the original GSAT by Selman and
                 Kautz. We ran our parallel GSAT for benchmark instances
                 and compared the running time with those of existing
                 SAT programs. We could observe an apparent benefit of
                 parallelization: Especially, we were able to solve two
                 instances that have never been solved before this
                 paper. We also tested parallel local search for the SAT
                 encoding of the class scheduling problem. Again we were
                 able to get almost the best answer in reasonable
                 time.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; CNF Satisfiability; distributed computing;
                 experimentation; local search algorithms;
                 parallelization; PVM; vector supercomputer;
                 vectorization",
}

@Article{Albers:2002:ESO,
  author =       "Susanne Albers and Bianca Schr{\"o}der",
  title =        "An experimental study of online scheduling
                 algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "3--3",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/944618.944621",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present the first comprehensive experimental study
                 of online algorithms for Graham's scheduling problem.
                 Graham's scheduling problem is a fundamental problem in
                 scheduling theory where a sequence of jobs has to be
                 scheduled on $m$ identical parallel machines so as to
                 minimize the makespan. Graham gave an elegant algorithm
                 that is $(2 - 1 / m)$-competitive. Recently a number of
                 new online algorithms were developed that achieve
                 competitive ratios around 1.9. Since competitive
                 analysis can only capture the worst case behavior of an
                 algorithm a question often asked is: Are these new
                 algorithms geared only towards a pathological case or
                 do they perform better in practice, too?We address this
                 question by analyzing the algorithms on various job
                 sequences. In our actual tests, we analyzed the
                 algorithms (1) on real world jobs and (2) on jobs
                 generated by probability distributions. It turns out
                 that the performance of the algorithms depends heavily
                 on the characteristics of the respective work load. On
                 job sequences that are generated by standard
                 probability distributions, Graham's strategy is clearly
                 the best. However, on the real world jobs the new
                 algorithms often outperform Graham's strategy. Our
                 experimental study confirms theoretical results in the
                 sense that there are also job sequences in practice on
                 which the new online algorithms perform better. Our
                 study can help to inform practitioners about the new
                 scheduling strategies as an alternative to Graham's
                 algorithm.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; experimentation; online algorithms;
                 performance; scheduling",
}

@Article{Mehlhorn:2002:IWM,
  author =       "Kurt Mehlhorn and Guido Sch{\"a}fer",
  title =        "Implementation of {$O(nm \log n)$} weighted matchings
                 in general graphs: the power of data structures",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "4--4",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/944618.944622",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We describe the implementation of an algorithm which
                 solves the weighted matching problem in general graphs
                 with $n$ vertices and $m$ edges in time $O(nm \log n)$.
                 Our algorithm is a variant of the algorithm of Galil,
                 Micali and Gabow [Galil et al. 1986] and extensively
                 uses sophisticated data structures, in particular {\em
                 concatenable priority queues}, so as to reduce the time
                 needed to perform dual adjustments and to find tight
                 edges in Edmonds' blossom-shrinking algorithm. We
                 compare our implementation to the experimentally
                 fastest implementation, named {\em Blossom IV}, due to
                 Cook and Rohe [Cook and Rohe 1997]. Blossom IV requires
                 only very simple data structures and has an asymptotic
                 running time of $O(n^2 m)$. Our experiments show that
                 our new implementation is superior to Blossom IV. A
                 closer inspection reveals that the running time of
                 Edmonds' blossom-shrinking algorithm in practice
                 heavily depends on the time spent to perform dual
                 adjustments and to find tight edges. Therefore,
                 optimizing these operations, as is done in our
                 implementation, indeed speeds-up the practical
                 performance of implementations of Edmonds' algorithm.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Edelkamp:2002:IHQ,
  author =       "Stefan Edelkamp and Patrick Stiegeler",
  title =        "Implementing {{\em HEAPSORT\/}} with $(n \log n - 0.9
                 n)$ and {{\em QUICKSORT}\/} with $(n \log n + 0.2 n)$
                 comparisons",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "5--5",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/944618.944623",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "With refinements to the {\em WEAK-HEAPSORT\/}
                 algorithm we establish the general and practical
                 relevant sequential sorting algorithm {\em
                 INDEX-WEAK-HEAPSORT\/} with exactly $n \lceil \log n
                 \rceil - 2^{\lceil \log n \rceil} + 1 \leq n \log n 0.9
                 n$ comparisons and at most $n \log n + 0.1 n$
                 transpositions on any given input. It comprises an
                 integer array of size $n$ and is best used to generate
                 an index for the data set. With {\em
                 RELAXED-WEAK-HEAPSORT\/} and {\em
                 GREEDY-WEAK-HEAPSORT\/} we discuss modifications for a
                 smaller set of pending element transpositions. If extra
                 space to create an index is not available, with {\em
                 QUICK-WEAK-HEAPSORT\/} we propose an efficient {\em
                 QUICKSORT\/} variant with $n \log n + 0.2 n + o(n)$
                 comparisons on the average. Furthermore, we present
                 data showing that {\em WEAK-HEAPSORT,
                 INDEX-WEAK-HEAPSORT\/} and {\em QUICK-WEAK-HEAPSORT\/}
                 compete with other performant {\em QUICKSORT\/} and
                 {\em HEAPSORT\/} variants.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Erlebach:2002:IAA,
  author =       "Thomas Erlebach and Klaus Jansen",
  title =        "Implementation of approximation algorithms for
                 weighted and unweighted edge-disjoint paths in
                 bidirected trees",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "6--6",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/944618.944624",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Given a set of weighted directed paths in a bidirected
                 tree, the maximum weight edge-disjoint paths problem
                 (MWEDP) is to select a subset of the given paths such
                 that the selected paths are edge-disjoint and the total
                 weight of the selected paths is maximized. MWEDP is
                 {\em NP\/}-hard for bidirected trees of unbounded
                 degree, even if all weights are the same (the
                 unweighted case). Three different approximation
                 algorithms are implemented: a known combinatorial $(5/3
                 + \epsilon)$-approximation algorithm $A_1$ for the
                 unweighted case, a new combinatorial 2-approximation
                 algorithm $A_2$ for the weighted case, and a known $(5
                 / 3 + \epsilon)$-approximation algorithm $A_3$ for the
                 weighted case that is based on linear programming. For
                 algorithm $A_1$, it is shown how efficient data
                 structures can be used to obtain a worst-case
                 running-time of $O(m + n + 4^{1/\epsilon} \sqrt n c m)$
                 for instances consisting of $m$ paths in a tree with
                 $n$ nodes. Experimental results regarding the
                 running-times and the quality of the solutions obtained
                 by the three approximation algorithms are reported.
                 Where possible, the approximate solutions are compared
                 to the optimal solutions, which were computed by
                 running CPLEX on an integer linear programming
                 formulation of MWEDP.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; combinatorial optimization;
                 experimentation; linear programming",
}

@Article{Lassous:2002:PLR,
  author =       "Isabelle Gu{\'e}rin Lassous and Jens Gustedt",
  title =        "Portable list ranking: an experimental study",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "7--7",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/944618.944625",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present and analyze two portable algorithms for the
                 List Ranking Problem in the Coarse Grained
                 Multicomputer model (CGM). We report on implementations
                 of these algorithms and experiments that were done with
                 these on a variety of parallel and distributed
                 architectures, ranging from PC clusters to a mainframe
                 parallel machine. With these experiments, we validate
                 the chosen CGM model, and also show the possible gains
                 and limits of such algorithms.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Vahrenhold:2002:PPL,
  author =       "Jan Vahrenhold and Klaus H. Hinrichs",
  title =        "Planar point location for large data sets: to seek or
                 not to seek",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "8--8",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/944618.944626",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present an algorithm for external memory planar
                 point location that is both effective and easy to
                 implement. The base algorithm is an external memory
                 variant of the bucket method by Edahiro, Kokubo and
                 Asano that is combined with Lee and Yang's batched
                 internal memory algorithm for planar point location.
                 Although our algorithm is not optimal in terms of its
                 worst-case behavior, we show its efficiency for both
                 batched and single-shot queries by experiments with
                 real-world data. The experiments show that the
                 algorithm benefits from the mainly sequential disk
                 access pattern and significantly outperforms the
                 fastest algorithm for internal memory. Due to its
                 simple concept, the algorithm can take advantage of
                 multiple disks and processors in a rather
                 straightforward yet efficient way.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Wickremesinghe:2002:ESU,
  author =       "Rajiv Wickremesinghe and Lars Arge and Jeffrey S.
                 Chase and Jeffrey Scott Vitter",
  title =        "Efficient sorting using registers and caches",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "9--9",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/944618.944627",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Modern computer systems have increasingly complex
                 memory systems. Common machine models for algorithm
                 analysis do not reflect many of the features of these
                 systems, e.g., large register sets, lockup-free caches,
                 cache hierarchies, associativity, cache line fetching,
                 and streaming behavior. Inadequate models lead to poor
                 algorithmic choices and an incomplete understanding of
                 algorithm behavior on real machines. A key step toward
                 developing better models is to quantify the performance
                 effects of features not reflected in the models. This
                 paper explores the effect of memory system features on
                 sorting performance. We introduce a new cache-conscious
                 sorting algorithm, R-MERGE, which achieves better
                 performance in practice over algorithms that are
                 superior in the theoretical models. R-MERGE is designed
                 to minimize memory stall cycles rather than cache
                 misses by considering features common to many system
                 designs.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Herrmann:2002:FCN,
  author =       "Francine Herrmann and Alain Hertz",
  title =        "Finding the chromatic number by means of critical
                 graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "10--10",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/944618.944628",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We propose a new exact algorithm for finding the
                 chromatic number of a graph $G$. The algorithm attempts
                 to determine the smallest possible induced subgraph
                 $G'$ of $G$ which has the same chromatic number as $G$.
                 Such a subgraph is said critical since all proper
                 induced sub-graph of $G'$ have a chromatic number
                 strictly smaller than $G'$. The proposed method is
                 particularly helpful when a $k$-coloring of a
                 non-critical graph is known, and it has to be proved
                 that no $(k - 1)$-coloring of $G$ exists. Computational
                 experiments on random graphs and on DIMACS benchmark
                 problems demonstrate that the new proposed algorithm
                 can solve larger problem than previous known exact
                 methods.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; experimentation; performance",
}

@Article{Fekete:2002:SHP,
  author =       "S{\'a}ndor P. Fekete and Henk Meijer and Andr{\'e}
                 Rohe and Walter Tietze",
  title =        "Solving a 'Hard' problem to approximate an 'Easy' one:
                 heuristics for maximum matchings and maximum traveling
                 salesman problems",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "11--11",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/944618.944629",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider geometric instances of the Maximum
                 Weighted Matching Problem (MWMP) and the Maximum
                 Traveling Salesman Problem (MTSP) with up to 3,000,000
                 vertices. Making use of a geometric duality
                 relationship between MWMP, MTSP, and the
                 Fermat--Weber-Problem (FWP), we develop a heuristic
                 approach that yields in near-linear time solutions as
                 well as upper bounds. Using various computational
                 tools, we get solutions within considerably less than
                 1\% of the optimum. An interesting feature of our
                 approach is that, even though an FWP is hard to compute
                 in theory and Edmonds' algorithm for maximum weighted
                 matching yields a polynomial solution for the MWMP, the
                 practical behavior is just the opposite, and we can
                 solve the FWP with high accuracy in order to find a
                 good heuristic solution for the MWMP.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "approximation; Fermat--Weber problem; geometric
                 optimization; geometric problems; heuristics; maximum
                 traveling salesman problem (MTSP); maximum weighted
                 matching; near-linear algorithms",
}

@Article{Neri:2002:RCL,
  author =       "Filippo Neri",
  title =        "Relational concept learning by cooperative evolution",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "7",
  pages =        "12--12",
  month =        "????",
  year =         "2002",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/944618.944630",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Concept learning is a computationally demanding task
                 that involves searching large hypothesis spaces
                 containing candidate descriptions. Stochastic search
                 combined with parallel processing provide a promising
                 approach to successfully deal with such computationally
                 intensive tasks. Learning systems based on distributed
                 genetic algorithms (GA) were able to find concept
                 descriptions as accurate as the ones found by
                 state-of-the-art learning systems based on alternative
                 approaches. However, genetic algorithms' exploitation
                 has the drawback of being computationally demanding. We
                 show how a suitable architectural choice, named
                 cooperative evolution, allows to solve complex
                 applications in an acceptable user waiting time and
                 with a reasonable computational cost by using GA-based
                 learning systems because of the effective exploitation
                 of distributed computation. A variety of experimental
                 settings is analyzed and an explanation for the
                 empirical observations is proposed.",
  acknowledgement = ack-nhfb,
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "distributed genetic algorithm; first order logic
                 concept learning; relational concept learning",
}

@Article{Kumar:2003:AME,
  author =       "Piyush Kumar and Joseph S. B. Mitchell and E. Alper
                 Yildirim",
  title =        "Approximate minimum enclosing balls in high dimensions
                 using core-sets",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "8",
  pages =        "1.1:1--1.1:??",
  month =        "????",
  year =         "2003",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/996546.996548",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:56 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study the minimum enclosing ball (MEB) problem for
                 sets of points or balls in high dimensions. Using
                 techniques of second-order cone programming and
                 'core-sets', we have developed $(1 +
                 \epsilon)$-approximation algorithms that perform well
                 in practice, especially for very high dimensions, in
                 addition to having provable guarantees. We prove the
                 existence of core-sets of size $O(1/\epsilon)$,
                 improving the previous bound of $O(1/\epsilon^2)$, and
                 we study empirically how the core-set size grows with
                 dimension. We show that our algorithm, which is simple
                 to implement, results in fast computation of nearly
                 optimal solutions for point sets in much higher
                 dimension than previously computable using exact
                 techniques.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "approximation algorithms; minimum enclosing ball;
                 second-order cone programming",
}

@Article{Arge:2003:EPL,
  author =       "Lars Arge and Andrew Danner and Sha-Mayn Teh",
  title =        "{I/O}-efficient point location using persistent
                 {B}-trees",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "8",
  pages =        "1.2:1--1.2:??",
  month =        "????",
  year =         "2003",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/996546.996549",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:56 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present an external planar point location data
                 structure that is I/O-efficient both in theory and
                 practice. The developed structure uses linear space and
                 answers a query in optimal $O(\log B N)$ I/Os, where
                 $B$ is the disk block size. It is based on a persistent
                 B-tree, and all previously developed such structures
                 assume a total order on the elements in the structure.
                 As a theoretical result of independent interest, we
                 show how to remove this assumption. Most previous
                 theoretical I/O-efficient planar point location
                 structures are relatively complicated and have not been
                 implemented. Based on a bucket approach, Vahrenhold and
                 Hinrichs therefore developed a simple and practical,
                 but theoretically non-optimal, heuristic structure. We
                 present an extensive experimental evaluation that shows
                 that, on a range of real-world Geographic Information
                 Systems (GIS) data, our structure uses a similar number
                 of I/Os as the structure of Vahrenhold and Hinrichs to
                 answer a query. On a synthetically generated worst-case
                 dataset our structure uses significantly fewer I/Os.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Buchsbaum:2003:FPM,
  author =       "Adam L. Buchsbaum and Glenn S. Fowler and Balachannder
                 Kirishnamurthy and Kiem-Phong Vo and Jia Wang",
  title =        "Fast prefix matching of bounded strings",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "8",
  pages =        "1.3:1--1.3:??",
  month =        "????",
  year =         "2003",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/996546.996550",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:56 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Longest Prefix Matching (LPM) is the problem of
                 finding which string from a given set is the longest
                 prefix of another, given string. LPM is a core problem
                 in many applications, including IP routing, network
                 data clustering, and telephone network management.
                 These applications typically require very fast matching
                 of bounded strings, i.e., strings that are short and
                 based on small alphabets. We note a simple
                 correspondence between bounded strings and natural
                 numbers that maps prefixes to nested intervals so that
                 computing the longest prefix matching a string is
                 equivalent to finding the shortest interval containing
                 its corresponding integer value. We then present {\em
                 retries}, a fast and compact data structure for LPM on
                 general alphabets. Performance results show that
                 retries often outperform previously published data
                 structures for IP look-up. By extending LPM to general
                 alphabets, retries admit new applications that could
                 not exploit prior LPM solutions designed for IP
                 look-ups.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "IP routing; prefix matching; table look-up; tries",
}

@Article{Breimer:2003:LAL,
  author =       "Eric A. Breimer and Mark K. Goldberg and Darren T.
                 Lim",
  title =        "A learning algorithm for the longest common
                 subsequence problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "8",
  pages =        "2.1:1--2.1:??",
  month =        "????",
  year =         "2003",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/996546.996552",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:56 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present an experimental study of a learning
                 algorithm for the longest common subsequence problem,
                 {\em LCS}. Given an arbitrary input domain, the
                 algorithm learns an {\em LCS\/}-procedure tailored to
                 that domain. The learning is done with the help of an
                 oracle, which can be any {\em LCS\/}-algorithm. After
                 solving a limited number of training inputs using an
                 oracle, the learning algorithm outputs a new {\em
                 LCS\/}-procedure. Our experiments demonstrate that, by
                 allowing a slight loss of optimality, learning yields a
                 procedure which is significantly faster than the
                 oracle. The oracle used for the experiments is the {\em
                 np\/}-procedure by Wu {\em et al.}, a modification of
                 Myers' classical {\em LCS\/}-algorithm. We show how to
                 scale up the results of learning on small inputs to
                 inputs of arbitrary lengths. For the domain of two
                 random 2-symbol inputs of length $n$, learning yields a
                 program with 0.999 expected accuracy, which runs in
                 $O(n^{1.41})$-time, in contrast with $O(n^2 \log n)$
                 running time of the fastest theoretical algorithm that
                 produces optimal solutions. For the domain of random
                 2-symbol inputs of length 100,000, the program runs
                 10.5 times faster than the {\em np\/}-procedure,
                 producing 0.999- accurate outputs. The scaled version
                 of the evolved algorithm applied to random inputs of
                 length 1 million runs approximately 30 times faster
                 than the {\em np\/}-procedure while constructing 0.999-
                 accurate solutions. We apply the evolved algorithm to
                 DNA sequences of various lengths by training on random
                 4-symbol sequences of up to length 10,000. The evolved
                 algorithm, scaled up to the lengths of up to 1.8
                 million, produces solutions with the 0.998-accuracy in
                 a fraction of the time used by the {\em np}.",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Venkataraman:2003:BAP,
  author =       "Gayathri Venkataraman and Sartaj Sahni and Srabani
                 Mukhopadhyaya",
  title =        "A blocked all-pairs shortest-paths algorithm",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "8",
  pages =        "2.2:1--2.2:??",
  month =        "????",
  year =         "2003",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/996546.996553",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:56 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We propose a blocked version of Floyd's all-pairs
                 shortest-paths algorithm. The blocked algorithm makes
                 better utilization of cache than does Floyd's original
                 algorithm. Experiments indicate that the blocked
                 algorithm delivers a speedup (relative to the unblocked
                 Floyd's algorithm) between 1.6 and 1.9 on a Sun Ultra
                 Enterprise 4000/5000 for graphs that have between 480
                 and 3200 vertices. The measured speedup on an SGI O2
                 for graphs with between 240 and 1200 vertices is
                 between 1.6 and 2.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "all pairs shortest paths; blocking; cache; speedup",
}

@Article{Petit:2003:EML,
  author =       "Jordi Petit",
  title =        "Experiments on the minimum linear arrangement
                 problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "8",
  pages =        "2.3:1--2.3:??",
  month =        "????",
  year =         "2003",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/996546.996554",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:04:56 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "This paper deals with the Minimum Linear Arrangement
                 problem from an experimental point of view. Using a
                 testsuite of sparse graphs, we experimentally compare
                 several algorithms to obtain upper and lower bounds for
                 this problem. The algorithms considered include
                 Successive Augmentation heuristics, Local Search
                 heuristics and Spectral Sequencing. The testsuite is
                 based on two random models and 'real life' graphs. As a
                 consequence of this study, two main conclusions can be
                 drawn: On one hand, the best approximations are usually
                 obtained using Simulated Annealing, which involves a
                 large amount of computation time. Solutions found with
                 Spectral Sequencing are close to the ones found with
                 Simulated Annealing and can be obtained in
                 significantly less time. On the other hand, we notice
                 that there exists a big gap between the best obtained
                 upper bounds and the best obtained lower bounds. These
                 two facts together show that, in practice, finding
                 lower and upper bounds for the Minimum Linear
                 Arrangement problem is hard.",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Brandes:2004:GNC,
  author =       "Ulrik Brandes and Frank Schulz and Dorothea Wagner and
                 Thomas Willhalm",
  title =        "Generating node coordinates for shortest-path
                 computations in transportation networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "9",
  pages =        "1.1:1--1.1:??",
  month =        "????",
  year =         "2004",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1005813.1005815",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:22 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Speed-up techniques that exploit given node
                 coordinates have proven useful for shortest-path
                 computations in transportation networks and geographic
                 information systems. To facilitate the use of such
                 techniques when coordinates are missing from some, or
                 even all, of the nodes in a network we generate
                 artificial coordinates using methods from graph
                 drawing. Experiments on a large set of German train
                 timetables indicate that the speed-up achieved with
                 coordinates from our drawings is close to that achieved
                 with the true coordinates---and in some special cases
                 even better.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "graph drawing; shortest paths; transportation
                 networks; travel planning",
}

@Article{Niewiadomski:2004:PSD,
  author =       "Robert Niewiadomski and Jos{\'e} Nelson Amaral and
                 Robert C. Holte",
  title =        "A performance study of data layout techniques for
                 improving data locality in refinement-based
                 pathfinding",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "9",
  pages =        "1.2:1--1.2:??",
  month =        "????",
  year =         "2004",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1005813.1041511",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:22 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The widening gap between processor speed and memory
                 latency increases the importance of crafting data
                 structures and algorithms to exploit temporal and
                 spatial locality. Refinement-based pathfinding
                 algorithms, such as Classic Refinement (CR), find
                 quality paths in very large sparse graphs where
                 traditional search techniques fail to generate paths in
                 acceptable time. In this paper, we present a
                 performance evaluation study of three simple data
                 structure transformations aimed at improving the data
                 reference locality of CR. These transformations are
                 robust to changes in computer architecture and the
                 degree of compiler optimization. We test our
                 alternative designs on four contemporary architectures,
                 using two compilers for each machine. In our
                 experiments, the application of these techniques
                 results in performance improvements of up to 67\% with
                 consistent improvements above 15\%. Analysis reveals
                 that these improvements stem from improved data
                 reference locality at the page level and to a lesser
                 extent at the cache line level.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "cache-conscious algorithms; classical refinement;
                 pathfinding",
}

@Article{Marathe:2004:ESS,
  author =       "Madhav V. Marathe and Alessandro Panconesi and Larry
                 D. {Risinger, Jr.}",
  title =        "An experimental study of a simple, distributed
                 edge-coloring algorithm",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "9",
  pages =        "1.3:1--1.3:??",
  month =        "????",
  year =         "2004",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1005813.1041515",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:22 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We conduct an experimental analysis of a distributed
                 randomized algorithm for edge coloring simple
                 undirected graphs. The algorithm is extremely simple
                 yet, according to the probabilistic analysis, it
                 computes nearly optimal colorings very quickly [Grable
                 and Panconesi 1997]. We test the algorithm on a number
                 of random as well as nonrandom graph families. The test
                 cases were chosen based on two objectives: (i) to
                 provide insights into the worst-case behavior (in terms
                 of time and quality) of the algorithm and (ii) to test
                 the performance of the algorithm with instances that
                 are likely to arise in practice. Our main results
                 include the following:(1) The empirical results
                 obtained compare very well with the recent empirical
                 results reported by other researchers [Durand et al.
                 1994, 1998; Jain and Werth 1995].(2) The empirical
                 results confirm the bounds on the running time and the
                 solution quality as claimed in the theoretical paper.
                 Our results show that for certain classes of graphs the
                 algorithm is likely to perform much better than the
                 analysis suggests.(3) The results demonstrate that the
                 algorithm might be well suited (from a theoretical as
                 well as practical standpoint) for edge coloring graphs
                 quickly and efficiently in a distributed setting. Based
                 on our empirical study, we propose a simple
                 modification of the original algorithm with
                 substantially improved performance in practice.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "distributed algorithms; edge coloring; experimental
                 analysis of algorithms; high performance computing;
                 randomized algorithms; scheduling",
}

@Article{Fredriksson:2004:AOS,
  author =       "Kimmo Fredriksson and Gonzalo Navarro",
  title =        "Average-optimal single and multiple approximate string
                 matching",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "9",
  pages =        "1.4:1--1.4:??",
  month =        "????",
  year =         "2004",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1005813.1041513",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:22 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present a new algorithm for multiple approximate
                 string matching. It is based on reading backwards
                 enough l-grams from text windows so as to prove that no
                 occurrence can contain the part of the window read, and
                 then shifting the window. We show analytically that our
                 algorithm is optimal on average. Hence our first
                 contribution is to fill an important gap in the area,
                 since no average-optimal algorithm existed for multiple
                 approximate string matching. We consider several
                 variants and practical improvements to our algorithm,
                 and show experimentally that they are resistant to the
                 number of patterns and the fastest for low difference
                 ratios, displacing the long-standing best algorithms.
                 Hence our second contribution is to give a practical
                 algorithm for this problem, by far better than any
                 existing alternative in many cases of interest. On
                 real-life texts, our algorithm is especially
                 interesting for computational biology applications. In
                 particular, we show that our algorithm can be
                 successfully used to search for one pattern, where many
                 more competing algorithms exist. Our algorithm is also
                 average-optimal in this case, being the second after
                 that of Chang and Marr. However, our algorithm permits
                 higher difference ratios than Chang and Marr, and this
                 is our third contribution. In practice, our algorithm
                 is competitive in this scenario too, being the fastest
                 for low difference ratios and moderate alphabet sizes.
                 This is our fourth contribution, which also answers
                 affirmatively the question of whether a practical
                 average-optimal approximate string-matching algorithm
                 existed.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; approximate string matching; biological
                 sequences; multiple string matching; optimality",
}

@Article{Sinha:2004:CCS,
  author =       "Ranjan Sinha and Justin Zobel",
  title =        "Cache-conscious sorting of large sets of strings with
                 dynamic tries",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "9",
  pages =        "1.5:1--1.5:??",
  month =        "????",
  year =         "2004",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1005813.1041517",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:22 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Ongoing changes in computer architecture are affecting
                 the efficiency of string-sorting algorithms. The size
                 of main memory in typical computers continues to grow
                 but memory accesses require increasing numbers of
                 instruction cycles, which is a problem for the most
                 efficient of the existing string-sorting algorithms as
                 they do not utilize cache well for large data sets. We
                 propose a new sorting algorithm for strings, burstsort,
                 based on dynamic construction of a compact trie in
                 which strings are kept in buckets. It is simple, fast,
                 and efficient. We experimentally explore key
                 implementation options and compare burstsort to
                 existing string-sorting algorithms on large and small
                 sets of strings with a range of characteristics. These
                 experiments show that, for large sets of strings,
                 burstsort is almost twice as fast as any previous
                 algorithm, primarily due to a lower rate of cache
                 miss.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Goh:2004:TAP,
  author =       "Rick Siow Mong Goh and Ian Li-Jin Thng",
  title =        "Twol-amalgamated priority queues",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "9",
  pages =        "1.6:1--1.6:??",
  month =        "????",
  year =         "2004",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1005813.1057625",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:22 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Priority queues are essential function blocks in
                 numerous applications such as discrete event
                 simulations. This paper describes and exemplifies the
                 ease of obtaining high performance priority queues
                 using a two-tier list-based structure. This new
                 implementation, called the {\em Twol\/} structure, is
                 amalgamated with three priority queues, namely, the
                 Henriksen's queue, splay tree and skew heap, to enhance
                 the efficiency of these {\em basal\/} priority queue
                 structures. Using a model that combines traditional
                 average case and amortized complexity analysis,
                 Twol-amalgamated priority queues that maintain $N$
                 active events are theoretically proven to offer $O(1)$
                 {\em expected amortized complexity\/} under reasonable
                 assumptions. They are also demonstrated empirically to
                 offer stable near $O(1)$ performance for widely varying
                 priority increment distributions and for queue sizes
                 ranging from 10 to 10 million. Extensive empirical
                 results show that the Twol-amalgamated priority queues
                 consistently outperform those basal structures (i.e.,
                 without the Twol structure) with an average speedup of
                 about three to five times on widely different hardware
                 architectures. These results provide testimony that the
                 Twol-amalgamated priority queues are suitable for
                 implementation in sizable application scenarios such
                 as, but not limited to, large-scale discrete event
                 simulation.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithm analysis; calendar queue; discrete event
                 simulation; future event list; Henriksen's; pending
                 event set; priority queue; simulator; skew heap; splay
                 tree; tree",
}

@Article{Ioannidis:2005:ADS,
  author =       "Ioannis Ioannidis and Ananth Grama and Mikhail
                 Atallah",
  title =        "Adaptive data structures for {IP} lookups",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "1.1:1--1.1:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1064546.1064548",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The problem of efficient data structures for IP
                 lookups has been well studied in the literature.
                 Techniques such as LC tries and extensible hashing are
                 commonly used. In this paper, we address the problem of
                 generalizing LC tries, based on traces of past lookups,
                 to provide performance guarantees for memory suboptimal
                 structures. As a specific example, if a memory-optimal
                 (LC) trie takes 6 MB and the total memory at the router
                 is 8 MB, how should the trie be modified to make best
                 use of the 2 MB of excess memory? We present a greedy
                 algorithm for this problem and prove that, if for the
                 optimal data structure there are $b$ fewer memory
                 accesses on average for each lookup compared with the
                 original trie, the solution produced by the greedy
                 algorithm will have at least $9 \times b /11$ fewer
                 memory accesses on average (compared to the original
                 trie). An efficient implementation of this algorithm
                 presents significant additional challenges. We describe
                 an implementation with a time complexity of $O(\xi(d) n
                 \log n)$ and a space complexity of $O(n)$, where $n$ is
                 the number of nodes of the trie and $d$ its depth. The
                 depth of a trie is fixed for a given version of the
                 Internet protocol and is typically $O(\log n)$. In this
                 case, $\xi(d) = O(\log^2 n)$. We also demonstrate
                 experimentally the performance and scalability of the
                 algorithm on actual routing data.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "IP lookups; level compression",
}

@Article{Lesh:2005:NHI,
  author =       "N. Lesh and J. Marks and A. McMahon and M.
                 Mitzenmacher",
  title =        "New heuristic and interactive approaches to {$2$D}
                 rectangular strip packing",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "1.2:1--1.2:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1064546.1083322",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper, we consider the two-dimensional
                 rectangular strip packing problem. A standard simple
                 heuristic, Bottom-Left-Decreasing (BLD), has been shown
                 to perform quite well in practice. We introduce and
                 demonstrate the effectiveness of BLD*, a stochastic
                 search variation of BLD. While BLD places the
                 rectangles in decreasing order of height, width, area,
                 and perimeter, BLD* successively tries random
                 orderings, chosen from a distribution determined by
                 their Kendall-tau distance from one of these fixed
                 orderings. Our experiments on benchmark problems show
                 that BLD* produces significantly better packings than
                 BLD after only 1 min of computation. Furthermore, we
                 also show that BLD* outperforms recently reported
                 metaheuristics. Furthermore, we observe that people
                 seem able to reason about packing problems extremely
                 well. We incorporate our new algorithms in an
                 interactive system that combines the advantages of
                 computer speed and human reasoning. Using the
                 interactive system, we are able to quickly produce
                 significantly better solutions than BLD* by itself.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "2D rectangular strip packing; cutting stock/trim;
                 interactive methods",
}

@Article{Wagner:2005:GCE,
  author =       "Dorothea Wagner and Thomas Willhalm and Christos
                 Zaroliagis",
  title =        "Geometric containers for efficient shortest-path
                 computation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "1.3:1--1.3:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1064546.1103378",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A fundamental approach in finding efficiently best
                 routes or optimal itineraries in traffic information
                 systems is to reduce the search space (part of graph
                 visited) of the most commonly used shortest path
                 routine (Dijkstra's algorithm) on a suitably defined
                 graph. We investigate reduction of the search space
                 while simultaneously retaining data structures, created
                 during a preprocessing phase, of size linear (i.e.,
                 optimal) to the size of the graph. We show that the
                 search space of Dijkstra's algorithm can be
                 significantly reduced by extracting geometric
                 information from a given layout of the graph and by
                 encapsulating precomputed shortest-path information in
                 resulted geometric objects (containers). We present an
                 extensive experimental study comparing the impact of
                 different types of geometric containers using test data
                 from real-world traffic networks. We also present new
                 algorithms as well as an empirical study for the
                 dynamic case of this problem, where edge weights are
                 subject to change and the geometric containers have to
                 be updated and show that our new methods are two to
                 three times faster than recomputing everything from
                 scratch. Finally, in an appendix, we discuss the
                 software framework that we developed to realize the
                 implementations of all of our variants of Dijkstra's
                 algorithm. Such a framework is not trivial to achieve
                 as our goal was to maintain a common code base that is,
                 at the same time, small, efficient, and flexible, as we
                 wanted to enhance and combine several variants in any
                 possible way.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "data structures and algorithms; Dijkstra's algorithm;
                 geometric container; graph algorithms; shortest path;
                 traffic network",
}

@Article{Lopez-Ortiz:2005:FSS,
  author =       "Alejandro L{\'o}pez-Ortiz and Mehdi Mirzazadeh and
                 Mohammad Ali Safari and Hossein Sheikhattar",
  title =        "Fast string sorting using order-preserving
                 compression",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "1.4:1--1.4:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1064546.1180611",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We give experimental evidence for the benefits of
                 order-preserving compression in sorting algorithms.
                 While, in general, any algorithm might benefit from
                 compressed data because of reduced paging requirements,
                 we identified two natural candidates that would further
                 benefit from order-preserving compression, namely
                 string-oriented sorting algorithms and word-RAM
                 algorithms for keys of bounded length. The word-RAM
                 model has some of the fastest known sorting algorithms
                 in practice. These algorithms are designed for keys of
                 bounded length, usually 32 or 64 bits, which limits
                 their direct applicability for strings. One possibility
                 is to use an order-preserving compression scheme, so
                 that a bounded-key-length algorithm can be applied. For
                 the case of standard algorithms, we took what is
                 considered to be the among the fastest nonword RAM
                 string sorting algorithms, Fast MKQSort, and measured
                 its performance on compressed data. The Fast MKQSort
                 algorithm of Bentley and Sedgewick is optimized to
                 handle text strings. Our experiments show that
                 order-compression techniques results in savings of
                 approximately 15\% over the same algorithm on
                 noncompressed data. For the word-RAM, we modified
                 Andersson's sorting algorithm to handle variable-length
                 keys. The resulting algorithm is faster than the
                 standard Unix sort by a factor of 1.5 $X$. Last, we
                 used an order-preserving scheme that is within a
                 constant additive term of the optimal Hu--Tucker, but
                 requires linear time rather than $O(m \log m)$, where
                 $m = |\Sigma|$ is the size of the alphabet.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "order-preserving compression; sorting; unit-cost RAM;
                 word-RAM",
}

@Article{Ribeiro:2005:P,
  author =       "Celso C. Ribeiro and Simone L. Martins",
  title =        "Preface",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.1:1--2.1:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1064546.1180620",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Sinha:2005:URS,
  author =       "Ranjan Sinha and Justin Zobel",
  title =        "Using random sampling to build approximate tries for
                 efficient string sorting",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.10:1--2.10:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1064546.1180622",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Algorithms for sorting large datasets can be made more
                 efficient with careful use of memory hierarchies and
                 reduction in the number of costly memory accesses. In
                 earlier work, we introduced burstsort, a new
                 string-sorting algorithm that on large sets of strings
                 is almost twice as fast as previous algorithms,
                 primarily because it is more cache efficient. Burstsort
                 dynamically builds a small trie that is used to rapidly
                 allocate each string to a bucket. In this paper, we
                 introduce new variants of our algorithm: SR-burstsort,
                 DR-burstsort, and DRL-burstsort. These algorithms use a
                 random sample of the strings to construct an
                 approximation to the trie prior to sorting. Our
                 experimental results with sets of over 30 million
                 strings show that the new variants reduce, by up to
                 37\%, cache misses further than did the original
                 burstsort, while simultaneously reducing instruction
                 counts by up to 24\%. In pathological cases, even
                 further savings can be obtained.",
  acknowledgement = ack-nhfb,
  articleno =    "2.10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "cache-aware; cache-conscious; data structure;
                 in-memory; sorting; string",
}

@Article{Bracht:2005:GAA,
  author =       "Evandro C. Bracht and Luis and A. A. Meira and F. K.
                 Miyazawa",
  title =        "A greedy approximation algorithm for the uniform
                 metric labeling problem analyzed by a primal-dual
                 technique",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.11:1--2.11:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1064546.1180623",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the uniform metric labeling problem. This
                 NP-hard problem considers how to assign objects to
                 labels respecting assignment and separation costs. The
                 known approximation algorithms are based on solutions
                 of large linear programs and are impractical for
                 moderate- and large-size instances. We present an 8log
                 $n$-approximation algorithm that can be applied to
                 large-size instances. The algorithm is greedy and is
                 analyzed by a primal-dual technique. We implemented the
                 presented algorithm and two known approximation
                 algorithms and compared them at randomized instances.
                 The gain of time was considerable with small error
                 ratios. We also show that the analysis is tight, up to
                 a constant factor.",
  acknowledgement = ack-nhfb,
  articleno =    "2.11",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "approximation algorithms; graph labeling",
}

@Article{deSouza:2005:DMP,
  author =       "Cid C. de Souza and Andre M. Lima and Guido Araujo and
                 Nahri B. Moreano",
  title =        "The datapath merging problem in reconfigurable
                 systems: {Complexity}, dual bounds and heuristic
                 evaluation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.2:1--2.2:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1064546.1180613",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper, we investigate the data path merging
                 problem (DPM) in reconfigurable systems. DPM is modeled
                 as a graph optimization problem and is shown to be {\em
                 NP\/}-hard. An Integer Programming (IP) formulation of
                 the problem is presented and some valid inequalities
                 for the convex hull of integer solutions are
                 introduced. These inequalities form the basis of a
                 branch-and-cut algorithm that we implemented. This
                 algorithm was used to compute lower bounds for a set of
                 DPM instances, allowing us to assess the performance of
                 two heuristics proposed earlier in the literature for
                 the problem. Moreover, the branch-and-cut algorithm
                 also was proved to be a valuable tool to solve
                 small-sized DPM instances to optimality.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "data path merging; heuristics; lower bounds;
                 reconfigurable systems",
}

@Article{Du:2005:IAA,
  author =       "Jingde Du and Stavros G. Kolliopoulos",
  title =        "Implementing approximation algorithms for the
                 single-source unsplittable flow problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.3:1--2.3:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1064546.1180614",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In the {\em single-source unsplittable flow\/}
                 problem, commodities must be routed simultaneously from
                 a common source vertex to certain sinks in a given
                 graph with edge capacities. The demand of each
                 commodity must be routed along a single path so that
                 the total flow through any edge is at most, its
                 capacity. This problem was introduced by Kleinberg
                 [1996a] and generalizes several NP-complete problems. A
                 cost value per unit of flow may also be defined for
                 every edge. In this paper, we implement the
                 2-approximation algorithm of Dinitz et al. [1999] for
                 congestion, which is the best known, and the (3,
                 1)-approximation algorithm of Skutella [2002] for
                 congestion and cost, which is the best known bicriteria
                 approximation. We experimentally study the quality of
                 approximation achieved by the algorithms and the effect
                 of heuristics on their performance. We also compare
                 these algorithms against the previous best ones by
                 Kolliopoulos and Stein [1999].",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "approximation algorithms; network flow; unsplittable
                 flow",
}

@Article{Duch:2005:IPM,
  author =       "Amalia Duch and Conrado Mart{\'\i}nez",
  title =        "Improving the performance of multidimensional search
                 using fingers",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.4:1--2.4:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1064546.1180615",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We propose two variants of $K$-d trees where {\em
                 fingers\/} are used to improve the performance of
                 orthogonal range search and nearest neighbor queries
                 when they exhibit locality of reference. The
                 experiments show that the second alternative yields
                 significant savings. Although it yields more modest
                 improvements, the first variant does it with much less
                 memory requirements and great simplicity, which makes
                 it more attractive on practical grounds.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "experimental algorithmics; Finger search; K-d trees;
                 locality; multidimensional data structures;
                 nearest-neighbors searching; orthogonal range
                 searching",
}

@Article{Holzer:2005:CST,
  author =       "Martin Holzer and Frank Schulz and Dorothea Wagner and
                 Thomas Willhalm",
  title =        "Combining speed-up techniques for shortest-path
                 computations",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.5:1--2.5:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1064546.1180616",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In practice, computing a shortest path from one node
                 to another in a directed graph is a very common task.
                 This problem is classically solved by Dijkstra's
                 algorithm. Many techniques are known to speed up this
                 algorithm heuristically, while optimality of the
                 solution can still be guaranteed. In most studies, such
                 techniques are considered individually. The focus of
                 our work is {\em combination\/} of speed-up techniques
                 for Dijkstra's algorithm. We consider all possible
                 combinations of four known techniques, namely, {\em
                 goal-directed search}, {\em bidirectional search}, {\em
                 multilevel approach}, and {\em shortest-path
                 containers}, and show how these can be implemented. In
                 an extensive experimental study, we compare the
                 performance of the various combinations and analyze how
                 the techniques harmonize when jointly applied. Several
                 real-world graphs from road maps and public transport
                 and three types of generated random graphs are taken
                 into account.",
  acknowledgement = ack-nhfb,
  articleno =    "2.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "combination; Dijkstra's algorithm; shortest path;
                 speed-up",
}

@Article{Hyyro:2005:IBP,
  author =       "Heikki Hyyr{\"o} and Kimmo Fredriksson and Gonzalo
                 Navarro",
  title =        "Increased bit-parallelism for approximate and multiple
                 string matching",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.6:1--2.6:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1064546.1180617",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Bit-parallelism permits executing several operations
                 simultaneously over a set of bits or numbers stored in
                 a single computer word. This technique permits
                 searching for the approximate occurrences of a pattern
                 of length $m$ in a text of length $n$ in time $O(\lceil
                 m / w \rceil n)$, where $w$ is the number of bits in
                 the computer word. Although this is asymptotically the
                 optimal bit-parallel speedup over the basic $O(mn)$
                 time algorithm, it wastes bit-parallelism's power in
                 the common case where $m$ is much smaller than $w$,
                 since $w - m$ bits in the computer words are unused. In
                 this paper, we explore different ways to increase the
                 bit-parallelism when the search pattern is short.
                 First, we show how multiple patterns can be packed into
                 a single computer word so as to search for all them
                 simultaneously. Instead of spending $O(rn)$ time to
                 search for $r$ patterns of length $m \leq w / 2$, we
                 need $O(\lceil rm / w \rceil n)$ time. Second, we show
                 how the mechanism permits boosting the search for a
                 single pattern of length $m \leq w / 2$, which can be
                 searched for in $O(\lceil n / \lfloor w / m \rfloor
                 \rceil)$ bit-parallel steps instead of $O(n)$. Third,
                 we show how to extend these algorithms so that the time
                 bounds essentially depend on $k$ instead of $m$, where
                 $k$ is the maximum number of differences permitted.
                 Finally, we show how the ideas can be applied to other
                 problems such as multiple exact string matching and
                 one-against-all computation of edit distance and
                 longest common subsequences. Our experimental results
                 show that the new algorithms work well in practice,
                 obtaining significant speedups over the best existing
                 alternatives, especially on short patterns and moderate
                 number of differences allowed. This work fills an
                 important gap in the field, where little work has
                 focused on very short patterns.",
  acknowledgement = ack-nhfb,
  articleno =    "2.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "approximate string matching; bit-parallelism; multiple
                 string matching",
}

@Article{Nikolov:2005:SEH,
  author =       "Nikola S. Nikolov and Alexandre Tarassov and
                 J{\"u}rgen Branke",
  title =        "In search for efficient heuristics for minimum-width
                 graph layering with consideration of dummy nodes",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.7:1--2.7:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1064546.1180618",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We propose two fast heuristics for solving the NP-hard
                 problem of graph layering with the minimum width and
                 consideration of dummy nodes. Our heuristics can be
                 used at the layer-assignment phase of the Sugiyama
                 method for drawing of directed graphs. We evaluate our
                 heuristics by comparing them to the widely used
                 fast-layering algorithms in an extensive computational
                 study with nearly 6000 input graphs. We also
                 demonstrate how the well-known longest-path and
                 Coffman--Graham algorithms can be used for finding
                 narrow layerings with acceptable aesthetic
                 properties.",
  acknowledgement = ack-nhfb,
  articleno =    "2.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dummy vertices; hierarchical graph drawing; layer
                 assignment; layered graphs; layering",
}

@Article{Pemmaraju:2005:AIC,
  author =       "Sriram V. Pemmaraju and Sriram Penumatcha and Rajiv
                 Raman",
  title =        "Approximating interval coloring and max-coloring in
                 chordal graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.8:1--2.8:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1064546.1180619",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider two coloring problems: interval coloring
                 and max-coloring for chordal graphs. Given a graph $G =
                 (V, E)$ and positive-integral vertex weights $w: V
                 \rightarrow N$, the {\em interval-coloring\/} problem
                 seeks to find an assignment of a real interval $I(u)$
                 to each vertex $u \in V$, such that two constraints are
                 satisfied: (i) for every vertex $u \in V$, $|I(u)| =
                 w(u)$ and (ii) for every pair of adjacent vertices $u$
                 and $v$, $I(u) \cap I(v) = \emptyset$. The goal is to
                 minimize the {\em span\/} $|\cup_{v \in V} I(v)|$. The
                 {\em max-coloring problem\/} seeks to find a proper
                 vertex coloring of $G$ whose color classes $C_1$,
                 $C_2$, \ldots{}, $C_k$, minimize the sum of the weights
                 of the heaviest vertices in the color classes, that is,
                 $\sum^k_i = 1 \hbox{max}_{v \epsilon C i w (v)}$. Both
                 problems arise in efficient memory allocation for
                 programs. The interval-coloring problem models the
                 compile-time memory allocation problem and has a rich
                 history dating back at least to the 1970s. The
                 max-coloring problem arises in minimizing the total
                 buffer size needed by a dedicated memory manager for
                 programs. In another application, this problem models
                 scheduling of conflicting jobs in batches to minimize
                 the {\em makespan}. Both problems are NP-complete even
                 for interval graphs, although there are constant-factor
                 approximation algorithms for both problems on interval
                 graphs. In this paper, we consider these problems for
                 {\em chordal graphs}, a subclass of perfect graphs.
                 These graphs naturally generalize interval graphs and
                 can be defined as the class of graphs that have no
                 induced cycle of length $> 3$. Recently, a
                 4-approximation algorithm (which we call GeomFit) has
                 been presented for the max-coloring problem on perfect
                 graphs (Pemmaraju and Raman 2005). This algorithm can
                 be used to obtain an interval coloring as well, but
                 without the constant-factor approximation guarantee. In
                 fact, there is no known constant-factor approximation
                 algorithm for the interval-coloring problem on perfect
                 graphs. We study the performance of GeomFit and several
                 simple $O(\log(n))$-factor approximation algorithms for
                 both problems. We experimentally evaluate and compare
                 four simple heuristics: first-fit, best-fit, GeomFit,
                 and a heuristic based on partitioning the graph into
                 vertex sets of similar weight. Both for max-coloring
                 and for interval coloring, GeomFit deviates from OPT by
                 about 1.5\%, on average. The performance of first-fit
                 comes close second, deviating from OPT by less than
                 6\%, on average, for both problems. Best-fit comes
                 third and graph-partitioning heuristic comes a distant
                 last. Our basic data comes from about 10,000 runs of
                 each of the heuristics for each of the two problems on
                 randomly generated chordal graphs of various sizes,
                 sparsity, and structure.",
  acknowledgement = ack-nhfb,
  articleno =    "2.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "chordal graphs; dynamic storage allocation; graph
                 coloring; perfect graphs",
}

@Article{Santos:2005:TSH,
  author =       "Haroldo G. Santos and Luiz S. Ochi and Marcone J. F.
                 Souza",
  title =        "A {Tabu} search heuristic with efficient
                 diversification strategies for the class\slash teacher
                 timetabling problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "10",
  pages =        "2.9:1--2.9:??",
  month =        "????",
  year =         "2005",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1064546.1180621",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:05:40 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The Class/Teacher Timetabling Problem (CTTP) deals
                 with the weekly scheduling of encounters between
                 teachers and classes of an educational institution.
                 Since CTTP is a NP-hard problem for nearly all of its
                 variants, the use of heuristic methods for its
                 resolution is justified. This paper presents an
                 efficient Tabu Search (TS) heuristic with two different
                 memory based diversification strategies for CTTP.
                 Results obtained through an application of the method
                 to a set of real world problems show that it produces
                 better solutions than a previously proposed TS found in
                 the literature and faster times are observed in the
                 production of good quality solutions.",
  acknowledgement = ack-nhfb,
  articleno =    "2.9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "metaheuristics; tabu search; timetabling",
}

@Article{Salmela:2006:MSM,
  author =       "Leena Salmela and Jorma Tarhio and Jari Kyt{\"o}joki",
  title =        "Multipattern string matching with $q$-grams",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "1.1:1--1.1:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1187438",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present three algorithms for exact string matching
                 of multiple patterns. Our algorithms are filtering
                 methods, which apply $q$-grams and bit parallelism. We
                 ran extensive experiments with them and compared them
                 with various versions of earlier algorithms, e.g.,
                 different trie implementations of the Aho--Corasick
                 algorithm. All of our algorithms appeared to be
                 substantially faster than earlier solutions for sets of
                 1,000--10,000 patterns and the good performance of two
                 of them continues to 100,000 patterns. The gain is
                 because of the improved filtering efficiency caused by
                 $q$-grams.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "content scanning; intrusion detection; multiple string
                 matching",
}

@Article{Sinha:2006:CES,
  author =       "Ranjan Sinha and Justin Zobel and David Ring",
  title =        "Cache-efficient string sorting using copying",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "1.2:1--1.2:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1187439",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Burstsort is a cache-oriented sorting technique that
                 uses a dynamic trie to efficiently divide large sets of
                 string keys into related subsets small enough to sort
                 in cache. In our original burstsort, string keys
                 sharing a common prefix were managed via a bucket of
                 pointers represented as a list or array; this approach
                 was found to be up to twice as fast as the previous
                 best string sorts, mostly because of a sharp reduction
                 in out-of-cache references. In this paper, we introduce
                 C-burstsort, which copies the unexamined tail of each
                 key to the bucket and discards the original key to
                 improve data locality. On both Intel and PowerPC
                 architectures, and on a wide range of string types, we
                 show that sorting is typically twice as fast as our
                 original burstsort and four to five times faster than
                 multikey quicksort and previous radixsorts. A variant
                 that copies both suffixes and record pointers to
                 buckets, CP-burstsort, uses more memory, but provides
                 stable sorting. In current computers, where performance
                 is limited by memory access latencies, these new
                 algorithms can dramatically reduce the time needed for
                 internal sorting of large numbers of strings.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; cache; experimental algorithms; sorting;
                 string management; tries",
}

@Article{Penner:2006:CFI,
  author =       "Michael Penner and Viktor K. Prasanna",
  title =        "Cache-Friendly implementations of transitive closure",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "1.3:1--1.3:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1210586",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The topic of cache performance has been well studied
                 in recent years. Compiler optimizations exist and
                 optimizations have been done for many problems. Much of
                 this work has focused on dense linear algebra problems.
                 At first glance, the Floyd--Warshall algorithm appears
                 to fall into this category. In this paper, we begin by
                 applying two standard cache-friendly optimizations to
                 the Floyd--Warshall algorithm and show limited
                 performance improvements. We then discuss the
                 unidirectional space time representation (USTR). We
                 show analytically that the USTR can be used to reduce
                 the amount of processor-memory traffic by a factor of
                 $O(\sqrt C)$, where $C$ is the cache size, for a large
                 class of algorithms. Since the USTR leads to a tiled
                 implementation, we develop a tile size selection
                 heuristic to intelligently narrow the search space for
                 the tile size that minimizes total execution time.
                 Using the USTR, we develop a cache-friendly
                 implementation of the Floyd--Warshall algorithm. We
                 show experimentally that this implementation minimizes
                 the level-1 and level-2 cache misses and TLB misses
                 and, therefore, exhibits the best overall performance.
                 Using this implementation, we show a $2 \times$
                 improvement in performance over the best compiler
                 optimized implementation on three different
                 architectures. Finally, we show analytically that our
                 implementation of the Floyd--Warshall algorithm is
                 asymptotically optimal with respect to processor-memory
                 traffic. We show experimental results for the Pentium
                 III, Alpha, and MIPS R12000 machines using problem
                 sizes between 1024 and 2048 vertices. We demonstrate
                 improved cache performance using the Simplescalar
                 simulator.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "data structures; Floyd--Warshall algorithm; systolic
                 array algorithms",
}

@Article{Goshi:2006:ADM,
  author =       "Justin Goshi and Richard E. Ladner",
  title =        "Algorithms for dynamic multicast key distribution",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "1.4:1--1.4:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1210587",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study the problem of multicast key distribution for
                 group security. Secure group communication systems
                 typically rely on a group key, which is a secret shared
                 among the members of the group. This key is used to
                 provide privacy by encrypting all group communications.
                 Because groups can be large and highly dynamic, it
                 becomes necessary to change the group key in a scalable
                 and secure fashion when members join and leave the
                 group. We present a series of algorithms for solving
                 this problem based on key trees. The algorithms attempt
                 to minimize the worst-case communication cost of
                 updates by maintaining balanced key tree structures. We
                 focus on the trade-off between the communication cost
                 because of the structure of the tree and that due to
                 the overhead of restructuring the tree to maintain its
                 balanced structure. The algorithms are analyzed for
                 worst-case tree structure bounds and evaluated
                 empirically via simulations.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dynamic key distribution; experimental algorithms;
                 multicast",
}

@Article{Aleksandrov:2006:PPG,
  author =       "Lyudmil Aleksandrov and Hristo Djidjev and Hua Guo and
                 Anil Maheshwari",
  title =        "Partitioning planar graphs with costs and weights",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "1.5:1--1.5:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1210588",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A graph separator is a set of vertices or edges whose
                 removal divides an input graph into components of
                 bounded size. This paper describes new algorithms for
                 computing separators in planar graphs as well as
                 techniques that can be used to speed up the
                 implementation of graph partitioning algorithms and
                 improve the partition quality. In particular, we
                 consider planar graphs with costs and weights on the
                 vertices, where weights are used to estimate the sizes
                 of the partitions and costs are used to estimate the
                 size of the separator. We show that in these graphs one
                 can always find a small cost separator (consisting of
                 vertices or edges) that partitions the graph into
                 components of bounded weight. We describe
                 implementations of the partitioning algorithms and
                 discuss results of our experiments.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "graph algorithms; graph partitioning algorithms; graph
                 separators; implementation",
}

@Article{Ilinkin:2006:HEC,
  author =       "Ivayio Ilinkin and Ravi Janardan and Michiel Smid and
                 Eric Johnson and Paul Castillo and J{\"o}rg Schwerdt",
  title =        "Heuristics for estimating contact area of supports in
                 layered manufacturing",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "1.6:1--1.6:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1210589",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Layered manufacturing is a technology that allows
                 physical prototypes of three-dimensional(3D) models to
                 be built directly from their digital representation, as
                 a stack of two-dimensional(2D) layers. A key design
                 problem here is the choice of a suitable direction in
                 which the digital model should be oriented and built so
                 as to minimize the area of contact between the
                 prototype and temporary support structures that are
                 generated during the build. Devising an efficient
                 algorithm for computing such a direction has remained a
                 difficult problem for quite some time. In this paper, a
                 suite of efficient and practical heuristics is
                 presented for estimating the minimum contact area. Also
                 given is a technique for evaluating the quality of the
                 estimate provided by any heuristic, which does not
                 require knowledge of the (unknown and hard-to-compute)
                 optimal solution; instead, it provides an indirect
                 upper bound on the quality of the estimate via two
                 relatively easy-to-compute quantities. The algorithms
                 are based on various techniques from computational
                 geometry, such as ray-shooting, convex hulls, boolean
                 operations on polygons, and spherical arrangements, and
                 have been implemented and tested. Experimental results
                 on a wide range of real-world models show that the
                 heuristics perform quite well in practice.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithm implementation and testing; computational
                 geometry",
}

@Article{Pearce:2006:DTS,
  author =       "David J. Pearce and Paul H. J. Kelly",
  title =        "A dynamic topological sort algorithm for directed
                 acyclic graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "1.7:1--1.7:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1210590",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the problem of maintaining the topological
                 order of a directed acyclic graph (DAG) in the presence
                 of edge insertions and deletions. We present a new
                 algorithm and, although this has inferior time
                 complexity compared with the best previously known
                 result, we find that its simplicity leads to better
                 performance in practice. In addition, we provide an
                 empirical comparison against the three main
                 alternatives over a large number of random DAGs. The
                 results show our algorithm is the best for sparse
                 digraphs and only a constant factor slower than the
                 best on dense digraphs.",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dynamic graph algorithms; topological sort",
}

@Article{Flammini:2006:RAF,
  author =       "Michele Flammini and Alfredo Navarra and Stephane
                 Perennes",
  title =        "The ``real'' approximation factor of the {MST}
                 heuristic for the minimum energy broadcasting",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.10:1--2.10:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1216587",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "This paper deals with one of the most studied problems
                 in the last few years in the field of wireless
                 communication in ad-hoc networks. The problem consists
                 of reducing the total energy consumption of wireless
                 radio stations distributed over a given area of
                 interest in order to perform the basic pattern of
                 communication by a broadcast. Recently, a tight
                 6-approximation of the minimum spanning tree heuristic
                 has been proven. While such a bound is theoretically
                 optimal if compared to the known lower bound of 6,
                 there is an obvious gap with practical experimental
                 results. By extensive experiments, proposing a new
                 technique to generate input instances and supported by
                 theoretical results, we show how the approximation
                 ratio can be actually considered close to 4 for a
                 ``real-world'' set of instances. We consider, in fact,
                 instances more representative of common practices.
                 Those are usually composed by considerable number of
                 nodes uniformly and randomly distributed inside the
                 area of interest.",
  acknowledgement = ack-nhfb,
  articleno =    "2.10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "ad-hoc networks; broadcast; energy saving; spanning
                 tree",
}

@Article{Nikoletseas:2006:JSS,
  author =       "Sotiris Nikoletseas",
  title =        "{JEA Special Section}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.1:1--2.1:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1216578",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Fahle:2006:FBB,
  author =       "Torsten Fahle and Karsten Tiemann",
  title =        "A faster branch-and-bound algorithm for the test-cover
                 problem based on set-covering techniques",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.2:1--2.2:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1216579",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The test-cover problem asks for the minimal number of
                 tests needed to uniquely identify a disease, infection,
                 etc. A collection of branch-and-bound algorithms was
                 proposed by De Bontridder et al. [2002]. Based on their
                 work, we introduce several improvements that are
                 compatible with all techniques described in De
                 Bontridder et al. [2002] and the more general setting
                 of {\em weighted\/} test-cover problems. We present a
                 faster data structure, cost-based variable fixing, and
                 adapt well-known set-covering techniques, including
                 Lagrangian relaxation and upper-bound heuristics. The
                 resulting algorithm solves benchmark instances up to 10
                 times faster than the former approach and up to 100
                 times faster than a general MIP solver.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "branch-and-bound; Lagrangian relaxation; set-cover
                 problem; test-cover problem; variable fixing",
}

@Article{Leone:2006:FPN,
  author =       "Pierre Leone and Jose Rolim and Paul Albuquerque and
                 Christian Mazza",
  title =        "A framework for probabilistic numerical evaluation of
                 sensor networks: a case study of a localization
                 protocol",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.3:1--2.3:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1216580",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper we show how to use stochastic estimation
                 methods to investigate topological properties of sensor
                 networks as well as the behavior of dynamical processes
                 on these networks. The framework is particularly
                 important to study problems for which no theoretical
                 results are known, or cannot be directly applied in
                 practice, for instance, when only asymptotic results
                 are available. We also interpret Russo's formula in the
                 context of sensor networks and thus obtain practical
                 information on their reliability. As a case study, we
                 analyze a localization protocol for wireless sensor
                 networks and validate our approach by numerical
                 experiments. Finally, we mention three applications of
                 our approach: estimating the number of pivotal sensors
                 in a real network, minimizing the number of such
                 sensors for robustness purposes during the network
                 design and estimating the distance between successive
                 localized positions for mobile sensor networks.",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "localization process and reliability; sensor networks;
                 stochastic recursive estimation",
}

@Article{Festa:2006:GPR,
  author =       "Paola Festa and Panos M. Pardalos and Leonidas S.
                 Pitsoulis and Mauricio G. C. Resende",
  title =        "{GRASP} with path relinking for the weighted {MAXSAT}
                 problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.4:1--2.4:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1216581",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A GRASP with path relinking for finding good-quality
                 solutions of the weighted maximum satisfiability
                 problem (MAX-SAT) is described in this paper. GRASP, or
                 Greedy Randomized Adaptive Search Procedure, is a
                 randomized multistart metaheuristic, where, at each
                 iteration, locally optimal solutions are constructed,
                 each independent of the others. Previous experimental
                 results indicate its effectiveness for solving weighted
                 MAX-SAT instances. Path relinking is a procedure used
                 to intensify the search around good-quality isolated
                 solutions that have been produced by the GRASP
                 heuristic. Experimental comparison of the pure GRASP
                 (without path relinking) and the GRASP with path
                 relinking illustrates the effectiveness of path
                 relinking in decreasing the average time needed to find
                 a good-quality solution for the weighted maximum
                 satisfiability problem.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; experimentation; GRASP; heuristics; path
                 relinking; performance; time-to-target plots",
}

@Article{Mehlhorn:2006:IMC,
  author =       "Kurt Mehlhorn and Dimitrios Michail",
  title =        "Implementing minimum cycle basis algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.5:1--2.5:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1216582",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this paper, we consider the problem of computing a
                 minimum cycle basis of an undirected graph $G$ = ($V$,
                 $E$) with $n$ vertices and $m$ edges. We describe an
                 efficient implementation of an $O(m^3 + mn^2 \log n)$
                 algorithm. For sparse graphs, this is the currently
                 best-known algorithm. This algorithm's running time can
                 be partitioned into two parts with time $O(m^3)$ and
                 $O(m^2 n + mn^2 \log n)$, respectively. Our
                 experimental findings imply that for random graphs the
                 true bottleneck of a sophisticated implementation is
                 the $O(m^2 n + mn^2 \log n)$ part. A straightforward
                 implementation would require $\Omega(n m)$
                 shortest-path computations. Thus, we develop several
                 heuristics in order to get a practical algorithm. Our
                 experiments show that in random graphs our techniques
                 result in a significant speed-up. Based on our
                 experimental observations, we combine the two
                 fundamentally different approaches to compute a minimum
                 cycle basis to obtain a new hybrid algorithm with
                 running time $O(m^2 n^2)$. The hybrid algorithm is very
                 efficient, in practice, for random dense unweighted
                 graphs. Finally, we compare these two algorithms with a
                 number of previous implementations for finding a
                 minimum cycle basis of an undirected graph.",
  acknowledgement = ack-nhfb,
  articleno =    "2.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "cycle basis; graph algorithms",
}

@Article{Heinrich-Litan:2006:RCR,
  author =       "Laura Heinrich-Litan and Marco E. L{\"u}bbecke",
  title =        "Rectangle covers revisited computationally",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.6:1--2.6:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1216583",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the problem of covering an orthogonal
                 polygon with a minimum number of axis-parallel
                 rectangles from a computational point of view. We
                 propose an integer program which is the first general
                 approach to obtain provably optimal solutions to this
                 well-studied NP-hard problem. It applies to common
                 variants like covering only the corners or the boundary
                 of the polygon and also to the weighted case. In
                 experiments, it turns out that the linear programming
                 relaxation is extremely tight and rounding a fractional
                 solution is an immediate high-quality heuristic. We
                 obtain excellent experimental results for polygons
                 originating from VLSI design, fax data sheets, black
                 and white images, and for random instances. Making use
                 of the dual linear program, we propose a stronger lower
                 bound on the optimum, namely, the cardinality of a
                 fractional stable set. Furthermore, we outline ideas
                 how to make use of this bound in primal--dual-based
                 algorithms. We give partial results, which make us
                 believe that our proposals have a strong potential to
                 settle the main open problem in the area: To find a
                 constant factor approximation algorithm for the
                 rectangle cover problem.",
  acknowledgement = ack-nhfb,
  articleno =    "2.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "integer programming; linear programming",
}

@Article{Panagopoulou:2006:APN,
  author =       "Panagiota N. Panagopoulou and Paul G. Spirakis",
  title =        "Algorithms for pure {Nash} equilibria in weighted
                 congestion games",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.7:1--2.7:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1216584",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In large-scale or evolving networks, such as the
                 Internet, there is no authority possible to enforce a
                 centralized traffic management. In such situations,
                 game theory, and especially the concepts of Nash
                 equilibria and congestion games [Rosenthal 1973] are a
                 suitable framework for analyzing the equilibrium
                 effects of selfish routes selection to network delays.
                 We focus here on {\em single-commodity\/} networks
                 where selfish users select paths to route their loads
                 (represented by arbitrary integer {\em weights\/}). We
                 assume that individual link delays are equal to the
                 total load of the link. We then focus on the algorithm
                 suggested in Fotakis et al. [2005], i.e., a
                 potential-based method for finding {\em pure\/} Nash
                 equilibria in such networks. A superficial analysis of
                 this algorithm gives an upper bound on its time, which
                 is polynomial in $n$ (the number of users) and the sum
                 of their weights $W$. This bound can be exponential in
                 $n$ when some weights are exponential. We provide
                 strong experimental evidence that this algorithm
                 actually converges to a pure Nash equilibrium in {\em
                 polynomial time}. More specifically, our experimental
                 findings suggest that the running time is a polynomial
                 function of $n$ and $\log W$. In addition, we propose
                 an initial allocation of users to paths that
                 dramatically accelerates this algorithm, compared to an
                 arbitrary initial allocation. A by-product of our
                 research is the discovery of a weighted potential
                 function when link delays are {\em exponential\/} to
                 their loads. This asserts the existence of pure Nash
                 equilibria for these delay functions and extends the
                 result of Fotakis et al. [2005].",
  acknowledgement = ack-nhfb,
  articleno =    "2.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "congestion games; game theory; pure Nash equilibria",
}

@Article{Mohring:2006:PGS,
  author =       "Rolf H. M{\"o}hring and Heiko Schilling and Birk
                 Sch{\"u}tz and Dorothea Wagner and Thomas Willhalm",
  title =        "Partitioning graphs to speedup {Dijkstra}'s
                 algorithm",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.8:1--2.8:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1216585",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study an acceleration method for point-to-point
                 shortest-path computations in large and sparse directed
                 graphs with given nonnegative arc weights. The
                 acceleration method is called the {\em arc-flag
                 approach\/} and is based on Dijkstra's algorithm. In
                 the arc-flag approach, we allow a preprocessing of the
                 network data to generate additional information, which
                 is then used to speedup shortest-path queries. In the
                 preprocessing phase, the graph is divided into regions
                 and information is gathered on whether an arc is on a
                 shortest path into a given region. The arc-flag method
                 combined with an appropriate partitioning and a
                 bidirected search achieves an average speedup factor of
                 more than 500 compared to the standard algorithm of
                 Dijkstra on large networks (1 million nodes, 2.5
                 million arcs). This combination narrows down the search
                 space of Dijkstra's algorithm to almost the size of the
                 corresponding shortest path for long-distance
                 shortest-path queries. We conduct an experimental study
                 that evaluates which partitionings are best suited for
                 the arc-flag method. In particular, we examine
                 partitioning algorithms from computational geometry and
                 a multiway arc separator partitioning. The evaluation
                 was done on German road networks. The impact of
                 different partitions on the speedup of the shortest
                 path algorithm are compared. Furthermore, we present an
                 extension of the speedup technique to multiple levels
                 of partitions. With this multilevel variant, the same
                 speedup factors can be achieved with smaller space
                 requirements. It can, therefore, be seen as a
                 compression of the precomputed data that preserves the
                 correctness of the computed shortest paths.",
  acknowledgement = ack-nhfb,
  articleno =    "2.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "acceleration method; Dijkstra's algorithm; road
                 network; shortest path",
}

@Article{Boukerche:2006:ICC,
  author =       "Azzedine Boukerche and Alba Cristina Magalhaes {Alves
                 De Melo}",
  title =        "Integrating coordinated checkpointing and recovery
                 mechanisms into {DSM} synchronization barriers",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "11",
  pages =        "2.9:1--2.9:??",
  month =        "????",
  year =         "2006",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1187436.1216586",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:06:20 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Distributed shared memory (DSM) creates an abstraction
                 of a physical shared memory that parallel programmers
                 can access. Most recent software DSM systems provide
                 relaxed-memory models that guarantee consistency only
                 at synchronization operations, such as locks and
                 barriers. As the main goal of DSM systems is to provide
                 support for long-term computation-intensive
                 applications, checkpointing and recovery mechanisms are
                 highly desirable. This article presents and evaluates
                 the integration of a coordinated checkpointing
                 mechanism to the barrier primitive that is usually
                 provided with many DSM systems. Our results on some
                 popular benchmarks and a real parallel application show
                 that the overhead introduced during the failure-free
                 execution is often small.",
  acknowledgement = ack-nhfb,
  articleno =    "2.9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "barrier synchronization; distributed shared memory",
}

@Article{Anonymous:2008:EGC,
  author =       "Anonymous",
  title =        "Engineering graph clustering: {Models} and
                 experimental evaluation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.1:1--1.1:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1227162",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A promising approach to graph clustering is based on
                 the intuitive notion of intracluster density versus
                 intercluster sparsity. As for the weighted case,
                 clusters should accumulate lots of weight, in contrast
                 to their connection to the remaining graph, which
                 should be light. While both formalizations and
                 algorithms focusing on particular aspects of this
                 rather vague concept have been proposed, no conclusive
                 argument on their appropriateness has been given. In
                 order to deepen the understanding of particular
                 concepts, including both quality assessment as well as
                 designing new algorithms, we conducted an experimental
                 evaluation of graph-clustering approaches. By combining
                 proved techniques from graph partitioning and geometric
                 clustering, we also introduce a new approach that
                 compares favorably.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "clustering algorithms; experimental evaluation; graph
                 clustering; quality measures",
}

@Article{Barsky:2008:GAT,
  author =       "Marina Barsky and Ulrike Stege and Alex Thomo and
                 Chris Upton",
  title =        "A graph approach to the threshold all-against-all
                 substring matching problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.10:1--1.10:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1370596.1370601",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present a novel graph model and an efficient
                 algorithm for solving the ``threshold all against all''
                 problem, which involves searching two strings (with
                 length $M$ and $N$, respectively) for all maximal
                 approximate substring matches of length at least $S$,
                 with up to $K$ differences. Our algorithm solves the
                 problem in time $O(MNK_3)$, which is a considerable
                 improvement over the previous known bound for this
                 problem. We also provide experimental evidence that, in
                 practice, our algorithm exhibits a better performance
                 than its worst-case running time.",
  acknowledgement = ack-nhfb,
  articleno =    "1.10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "bioinformatics; complexity; string matching",
}

@Article{Dietzfelbinger:2008:DIB,
  author =       "Martin Dietzfelbinger and Martin H{\"u}hne and
                 Christoph Weidling",
  title =        "A dictionary implementation based on dynamic perfect
                 hashing",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.11:1--1.11:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1370596.1370602",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We describe experimental results on an implementation
                 of a dynamic dictionary. The basis of our
                 implementation is ``dynamic perfect hashing'' as
                 described by Dietzfelbinger et al. ({\em SIAM J.
                 Computing 23}, 1994, pp. 738--761), an extension of the
                 storage scheme proposed by Fredman et al. ({\em J.
                 ACM\/} 31, 1984, pp. 538--544). At the top level, a
                 hash function is used to partition the keys to be
                 stored into several sets. On the second level, there is
                 a perfect hash function for each of these sets. This
                 technique guarantees $O(1)$ worst-case time for lookup
                 and expected $O(1)$ amortized time for insertion and
                 deletion, while only linear space is required. We study
                 the practical performance of dynamic perfect hashing
                 and describe improvements of the basic scheme. The
                 focus is on the choice of the hash function (both for
                 integer and string keys), on the efficiency of
                 rehashing, on the handling of small buckets, and on the
                 space requirements of the implementation.",
  acknowledgement = ack-nhfb,
  articleno =    "1.11",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "data structures; dictionaries; dynamic hashing; hash
                 functions; implementation",
}

@Article{Maniscalco:2008:EVA,
  author =       "Michael A. Maniscalco and Simon J. Puglisi",
  title =        "An efficient, versatile approach to suffix sorting",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.2:1--1.2:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1278374",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Sorting the suffixes of a string into lexicographical
                 order is a fundamental task in a number of contexts,
                 most notably lossless compression (Burrows--Wheeler
                 transformation) and text indexing (suffix arrays). Most
                 approaches to suffix sorting produce a sorted array of
                 suffixes directly, continually moving suffixes into
                 their final place in the array until the ordering is
                 complete. In this article, we describe a novel and
                 resource-efficient (time and memory) approach to suffix
                 sorting, which works in a complementary way --- by
                 assigning each suffix its rank in the final ordering,
                 before converting to a sorted array, if necessary, once
                 all suffixes are ranked. We layer several powerful
                 extensions on this basic idea and show experimentally
                 that our approach is superior to other leading
                 algorithms in a variety of real-world contexts.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "Burrows--Wheeler transform; suffix array; suffix
                 sorting; suffix tree",
}

@Article{Aloul:2008:SBP,
  author =       "Fadi A. Aloul and Arathi Ramani and Igor L. Markov and
                 Karem A. Sakallah",
  title =        "Symmetry breaking for pseudo-{Boolean} formulas",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.3:1--1.3:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1278375",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Many important tasks in design automation and
                 artificial intelligence can be performed in practice
                 via reductions to Boolean satisfiability (SAT).
                 However, such reductions often omit
                 application-specific structure, thus handicapping tools
                 in their competition with creative engineers.
                 Successful attempts to represent and utilize additional
                 structure on Boolean variables include recent work on
                 0-1 integer linear programming (ILP) and symmetries in
                 SAT. Those extensions gracefully accommodate well-known
                 advances in SAT solving, however, no previous work has
                 attempted to combine both extensions. Our work shows
                 (i) how one can detect and use symmetries in instances
                 of 0-1 ILP, and (ii) what benefits this may bring.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "graph automorphism",
}

@Article{Pellegrini:2008:EIT,
  author =       "Marco Pellegrini and Giordano Fusco",
  title =        "Efficient {IP} table lookup via adaptive stratified
                 trees with selective reconstructions",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.4:1--1.4:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1278376",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "IP address lookup is a critical operation for
                 high-bandwidth routers in packet-switching networks,
                 such as Internet. The lookup is a nontrivial operation,
                 since it requires searching for the longest prefix,
                 among those stored in a (large) given table, matching
                 the IP address. Ever increasing routing table size,
                 traffic volume, and links speed demand new and more
                 efficient algorithms. Moreover, the imminent move to
                 IPv6 128-bit addresses will soon require a rethinking
                 of previous technical choices. This article describes a
                 the new data structure for solving the IP table lookup
                 problem christened the adaptive stratified tree (AST).
                 The proposed solution is based on casting the problem
                 in geometric terms and on repeated application of
                 efficient local geometric optimization routines.
                 Experiments with this approach have shown that in terms
                 of storage, query time, and update time the AST is at a
                 par with state of the art algorithms based on data
                 compression or string manipulations (and often it is
                 better on some of the measured quantities).",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "data structures; IP table lookup",
}

@Article{Navarro:2008:DSA,
  author =       "Gonzalo Navarro and Nora Reyes",
  title =        "Dynamic spatial approximation trees",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.5:1--1.5:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1322337",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Metric space searching is an emerging technique to
                 address the problem of efficient similarity searching
                 in many applications, including multimedia databases
                 and other repositories handling complex objects.
                 Although promising, the metric space approach is still
                 immature in several aspects that are well established
                 in traditional databases. In particular, most indexing
                 schemes are static, that is, few of them tolerate
                 insertion or deletion of elements at reasonable cost
                 over an existing index. The spatial approximation tree
                 ({\em sa--tree\/}) has been experimentally shown to
                 provide a good tradeoff between construction cost,
                 search cost, and space requirement. However, the {\em
                 sa--tree\/} is static, which renders it unsuitable for
                 many database applications. In this paper, we study
                 different methods to handle insertions and deletions on
                 the {\em sa--tree\/} at low cost. In many cases, the
                 dynamic construction (by successive insertions) is even
                 faster than the previous static construction, and both
                 are similar elsewhere. In addition, the dynamic version
                 significantly improves the search performance of {\em
                 sa--trees\/} in virtually all cases. The result is a
                 much more practical data structure that can be useful
                 in a wide range of database applications.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "multimedia databases; similarity or proximity search;
                 spatial and multidimensional search; spatial
                 approximation tree",
}

@Article{Li:2008:EAC,
  author =       "Keqin Li",
  title =        "Experimental average-case performance evaluation of
                 online algorithms for routing and wavelength assignment
                 and throughput maximization in {WDM} optical networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.7:1--1.7:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1370596.1370598",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We investigate the problem of online routing and
                 wavelength assignment and the related throughput
                 maximization problem in wavelength division
                 multiplexing optical networks. It is pointed out that
                 these problems are highly inapproximable, that is, the
                 competitive ratio of any algorithm is at least a
                 polynomial. We evaluate the average-case performance of
                 several online algorithms, which have no knowledge of
                 future arriving connection requests when processing the
                 current connection request. Our experimental results on
                 a wide range of optical networks demonstrate that the
                 average-case performance of these algorithms are very
                 close to optimal.",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "average-case performance; competitive ratio; online
                 algorithm; optical network; routing; wavelength
                 assignment; wavelength division multiplexing",
}

@Article{Biggar:2008:ESS,
  author =       "Paul Biggar and Nicholas Nash and Kevin Williams and
                 David Gregg",
  title =        "An experimental study of sorting and branch
                 prediction",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.8:1--1.8:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1370599",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Sorting is one of the most important and well-studied
                 problems in computer science. Many good algorithms are
                 known which offer various trade-offs in efficiency,
                 simplicity, memory use, and other factors. However,
                 these algorithms do not take into account features of
                 modern computer architectures that significantly
                 influence performance. Caches and branch predictors are
                 two such features and, while there has been a
                 significant amount of research into the cache
                 performance of general purpose sorting algorithms,
                 there has been little research on their branch
                 prediction properties. In this paper, we empirically
                 examine the behavior of the branches in all the most
                 common sorting algorithms. We also consider the
                 interaction of cache optimization on the predictability
                 of the branches in these algorithms. We find insertion
                 sort to have the fewest branch mispredictions of any
                 comparison-based sorting algorithm, that bubble and
                 shaker sort operate in a fashion that makes their
                 branches highly unpredictable, that the
                 unpredictability of shellsort's branches improves its
                 caching behavior, and that several cache optimizations
                 have little effect on mergesort's branch
                 mispredictions. We find also that optimizations to
                 quicksort, for example the choice of pivot, have a
                 strong influence on the predictability of its branches.
                 We point out a simple way of removing branch
                 instructions from a classic heapsort implementation and
                 also show that unrolling a loop in a cache-optimized
                 heapsort implementation improves the predicitability of
                 its branches. Finally, we note that when sorting random
                 data two-level adaptive branch predictors are usually
                 no better than simpler bimodal predictors. This is
                 despite the fact that two-level adaptive predictors are
                 almost always superior to bimodal predictors, in
                 general.",
  acknowledgement = ack-nhfb,
  articleno =    "1.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "branch prediction; caching; pipeline architectures;
                 sorting",
}

@Article{Hazel:2008:TCL,
  author =       "Thomas Hazel and Laura Toma and Jan Vahrenhold and
                 Rajiv Wickremesinghe",
  title =        "Terracost: {Computing} least-cost-path surfaces for
                 massive grid terrains",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "1.9:1--1.9:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1370596.1370600",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "This paper addresses the problem of computing
                 least-cost-path surfaces for massive grid terrains.
                 Consider a grid terrain $T$ and let $C$ be a cost grid
                 for $T$ such that every point in $C$ stores a value
                 that represents the cost of traversing the
                 corresponding point in $T$. Given $C$ and a set of
                 sources $S \in T$, a least-cost-path grid $\Delta$ for
                 $T$ is a grid such that every point in $\Delta$
                 represents the distance to the source in $S$ that can
                 be reached with minimal cost. We present a scalable
                 approach to computing least-cost-path grids. Our
                 algorithm, terracost, is derived from our previous work
                 on I/O-efficient shortest paths on grids and uses
                 $O(\hbox{sort}(n))$ I/Os, where $\hbox{sort}(n)$ is the
                 complexity of sorting $n$ items of data in the
                 I/O-model of Aggarwal and Vitter. We present the
                 design, the analysis, and an experimental study of
                 terracost. An added benefit of the algorithm underlying
                 terracost is that it naturally lends itself to
                 parallelization. We have implemented terracost in a
                 distributed environment using our cluster management
                 tool and report on experiments that show that it
                 obtains speedup near-linear with the size of the
                 cluster. To the best of our knowledge, this is the
                 first experimental evaluation of a multiple-source
                 least-cost-path algorithm in the external memory
                 setting.",
  acknowledgement = ack-nhfb,
  articleno =    "1.9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "data structures and algorithms; Dijkstra's algorithm;
                 I/O-efficiency; shortest paths; terrain data",
}

@Article{Arge:2008:P,
  author =       "Lars Arge and Giuseppe F. Italiano",
  title =        "Preface",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "2.1:1--2.1:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1227163",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Brodal:2008:ECO,
  author =       "Gerth St{\o}lting Brodal and Rolf Fagerberg and
                 Kristoffer Vinther",
  title =        "Engineering a cache-oblivious sorting algorithm",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "2.2:1--2.2:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1227164",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "This paper is an algorithmic engineering study of
                 cache-oblivious sorting. We investigate by empirical
                 methods a number of implementation issues and parameter
                 choices for the cache-oblivious sorting algorithm Lazy
                 Funnelsort and compare the final algorithm with
                 Quicksort, the established standard for
                 comparison-based sorting, as well as with recent
                 cache-aware proposals. The main result is a carefully
                 implemented cache-oblivious sorting algorithm, which,
                 our experiments show, can be faster than the best
                 Quicksort implementation we are able to find for input
                 sizes well within the limits of RAM. It is also at
                 least as fast as the recent cache-aware implementations
                 included in the test. On disk, the difference is even
                 more pronounced regarding Quicksort and the cache-aware
                 algorithms, whereas the algorithm is slower than a
                 careful implementation of multiway Mergesort, such as
                 TPIE.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "cache-oblivious algorithms; funnelsort; quicksort",
}

@Article{Bender:2008:SSH,
  author =       "Michael A. Bender and Bryan Bradley and Geetha
                 Jagannathan and Krishnan Pillaipakkamnatt",
  title =        "Sum-of-squares heuristics for bin packing and memory
                 allocation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "2.3:1--2.3:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1227165",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The sum-of-squares algorithm (SS) was introduced by
                 Csirik, Johnson, Kenyon, Shor, and Weber for online bin
                 packing of integral-sized items into integral-sized
                 bins. First, we show the results of experiments from
                 two new variants of the SS algorithm. The first
                 variant, which runs in time $O(n \sqrt{B \log B})$,
                 appears to have almost identical expected waste as the
                 sum-of-squares algorithm on all the distributions
                 mentioned in the original papers on this topic. The
                 other variant, which runs in $O(n \log B)$ time,
                 performs well on most, but not on all of those
                 distributions. We also apply SS to the online
                 memory-allocation problem. Our experimental comparisons
                 between SS and Best Fit indicate that neither algorithm
                 is consistently better than the other. If the amount of
                 randomness in item sizes is low, SS appears to have
                 lower waste than Best Fit, whereas, if the amount of
                 randomness is high Best Fit appears to have lower waste
                 than SS. Our experiments suggest that in both real and
                 synthetic traces, SS does not seem to have an
                 asymptotic advantage over Best Fit, in contrast with
                 the bin-packing problem.",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "bin packing; memory allocation; sum of squares",
}

@Article{Pyrga:2008:EMT,
  author =       "Evangelia Pyrga and Frank Schulz and Dorothea Wagner
                 and Christos Zaroliagis",
  title =        "Efficient models for timetable information in public
                 transportation systems",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "2.4:1--2.4:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1227166",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider two approaches that model timetable
                 information in public transportation systems as
                 shortest-path problems in weighted graphs. In the {\em
                 time-expanded\/} approach, every event at a station,
                 e.g., the departure of a train, is modeled as a node in
                 the graph, while in the {\em time-dependent\/} approach
                 the graph contains only one node per station. Both
                 approaches have been recently considered for (a
                 simplified version of) the earliest arrival problem,
                 but little is known about their relative performance.
                 Thus far, there are only theoretical arguments in favor
                 of the time-dependent approach. In this paper, we
                 provide the first extensive experimental comparison of
                 the two approaches. Using several real-world data sets,
                 we evaluate the performance of the basic models and of
                 several new extensions towards realistic modeling.
                 Furthermore, new insights on solving bicriteria
                 optimization problems in both models are presented. The
                 time-expanded approach turns out to be more robust for
                 modeling more complex scenarios, whereas the
                 time-dependent approach shows a clearly better
                 performance.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "itinerary query; public transportation system;
                 shortest path; timetable information",
}

@Article{Leaver-Fay:2008:FPH,
  author =       "Andrew Leaver-Fay and Yuanxin Liu and Jack Snoeyink
                 and Xueyi Wang",
  title =        "Faster placement of hydrogens in protein structures by
                 dynamic programming",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "2.5:1--2.5:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1227167",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "M. Word and coauthors from the Richardsons' 3D Protein
                 Structure laboratory at Duke University propose {\em
                 dot scores\/} to measure interatomic interactions in
                 molecular structures. Their program REDUCE uses these
                 scores in a brute-force search to solve instances of
                 the {\em NP\/}-hard problem of finding the optimal
                 placement of hydrogen atoms in molecular structures
                 determined by X-ray crystallography. We capture the
                 central combinatorial optimization in the hydrogen
                 placement problem with an abstraction that we call an
                 interaction (hyper)graph. REDUCE's dot-based scoring
                 function cannot be decomposed into the sum of pair
                 interactions, but because the function is short ranged
                 we are able to decompose it into the sum of single,
                 pair, triple, and quadruple interactions that we
                 represent by graph hyperedges. Almost every interaction
                 graph we have observed has had a small treewidth. This
                 fact allows us to replace the brute-force search by
                 dynamic programming, giving speedups of nearly ten
                 orders of magnitude. This dynamic programming has been
                 incorporated into REDUCE and is available for
                 download.",
  acknowledgement = ack-nhfb,
  articleno =    "2.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dynamic programming; hard-sphere model; hydrogen
                 bonds; hydrogen placement; protein structure;
                 treewidth",
}

@Article{Demetrescu:2008:PA,
  author =       "Camil Demetrescu and Roberto Tamassia",
  title =        "Papers from {ALENEX 2005}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "3.1:1--3.1:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1402293",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "3.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Brodal:2008:AQ,
  author =       "Gerth St{\o}lting Brodal and Rolf Fagerberg and
                 Gabriel Moruz",
  title =        "On the adaptiveness of {Quicksort}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "3.2:1--3.2:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1402294",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Quicksort was first introduced in 1961 by Hoare. Many
                 variants have been developed, the best of which are
                 among the fastest generic-sorting algorithms available,
                 as testified by the choice of Quicksort as the default
                 sorting algorithm in most programming libraries. Some
                 sorting algorithms are adaptive, i.e., they have a
                 complexity analysis that is better for inputs, which
                 are nearly sorted, according to some specified measure
                 of presortedness. Quicksort is not among these, as it
                 uses $\Omega (n \log n)$ comparisons even for sorted
                 inputs. However, in this paper, we demonstrate
                 empirically that the actual running time of Quicksort
                 {\em is\/} adaptive with respect to the presortedness
                 measure Inv. Differences close to a factor of two are
                 observed between instances with low and high Inv value.
                 We then show that for the randomized version of
                 Quicksort, the number of element {\em swaps\/}
                 performed is {\em provably\/} adaptive with respect to
                 the measure $\hbox{Inv}$. More precisely, we prove that
                 randomized Quicksort performs expected $O(n (1 + \log(1
                 + \hbox{Inv} / n)))$ element swaps, where $\hbox{Inv}$
                 denotes the number of inversions in the input sequence.
                 This result provides a theoretical explanation for the
                 observed behavior and gives new insights on the
                 behavior of Quicksort. We also give some empirical
                 results on the adaptive behavior of Heapsort and
                 Mergesort.",
  acknowledgement = ack-nhfb,
  articleno =    "3.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "adaptive sorting; branch mispredictions; Quicksort",
}

@Article{Codenotti:2008:ESD,
  author =       "Bruno Codenotti and Benton Mccune and Sriram Pemmaraju
                 and Rajiv Raman and Kasturi Varadarajan",
  title =        "An experimental study of different approaches to solve
                 the market equilibrium problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "3.3:1--3.3:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1402295",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Over the last few years, the problem of computing
                 market equilibrium prices for exchange economies has
                 received much attention in the theoretical computer
                 science community. Such activity led to a flurry of
                 polynomial time algorithms for various restricted, yet
                 significant, settings. The most important restrictions
                 arise either when the traders' utility functions
                 satisfy a property known as {\em gross
                 substitutability\/} or when the initial endowments are
                 proportional (the Fisher model). In this paper, we
                 experimentally compare the performance of some of these
                 recent algorithms against that of the most used
                 software packages. In particular, we evaluate the
                 following approaches: (1) the solver PATH, available
                 under GAMS/MPSGE, a popular tool for computing market
                 equilibrium prices; (2) a discrete version of a simple
                 iterative price update scheme called t{\^a}tonnement;
                 (3) a discrete version of the welfare adjustment
                 process; (4) convex feasibility programs that
                 characterize the equilibrium in some special cases. We
                 analyze the performance of these approaches on models
                 of exchange economies where the consumers are equipped
                 with utility functions, which are widely used in real
                 world applications. The outcomes of our experiments
                 consistently show that many market settings allow for
                 an efficient computation of the equilibrium, well
                 beyond the restrictions under which the theory provides
                 polynomial time guarantees. For some of the approaches,
                 we also identify models where they are prone to
                 failure.",
  acknowledgement = ack-nhfb,
  articleno =    "3.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "market equilibrium",
}

@Article{Dementiev:2008:BEM,
  author =       "Roman Dementiev and Juha K{\"a}rkk{\"a}inen and Jens
                 Mehnert and Peter Sanders",
  title =        "Better external memory suffix array construction",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "3.4:1--3.4:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1402296",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Suffix arrays are a simple and powerful data structure
                 for text processing that can be used for full text
                 indexes, data compression, and many other applications,
                 in particular, in bioinformatics. However, so far, it
                 has appeared prohibitive to build suffix arrays for
                 huge inputs that do not fit into main memory. This
                 paper presents design, analysis, implementation, and
                 experimental evaluation of several new and improved
                 algorithms for suffix array construction. The
                 algorithms are asymptotically optimal in the worst case
                 or on average. Our implementation can construct suffix
                 arrays for inputs of up to 4-GB in hours on a low-cost
                 machine. As a tool of possible independent interest, we
                 present a systematic way to design, analyze, and
                 implement {\em pipelined\/} algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "3.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithm engineering; algorithms for strings;
                 external memory; I/O-efficient; large data sets;
                 secondary memory; suffix array",
}

@Article{Swenson:2008:ATE,
  author =       "Krister M. Swenson and Mark Marron and Joel V.
                 Earnest-Deyoung and Bernard M. E. Moret",
  title =        "Approximating the true evolutionary distance between
                 two genomes",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "3.5:1--3.5:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1227161.1402297",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "As more and more genomes are sequenced, evolutionary
                 biologists are becoming increasingly interested in
                 evolution at the level of whole genomes, in scenarios
                 in which the genome evolves through insertions,
                 duplications, deletions, and movements of genes along
                 its chromosomes. In the mathematical model pioneered by
                 Sankoff and others, a unichromosomal genome is
                 represented by a signed permutation of a multiset of
                 genes; Hannenhalli and Pevzner showed that the edit
                 distance between two signed permutations of the same
                 set can be computed in polynomial time when all
                 operations are inversions. El-Mabrouk extended that
                 result to allow deletions and a limited form of
                 insertions (which forbids duplications); in turn we
                 extended it to compute a nearly optimal edit sequence
                 between an arbitrary genome and the identity
                 permutation. In this paper we generalize our approach
                 to compute distances between two arbitrary genomes, but
                 focus on approximating the true evolutionary distance
                 rather than the edit distance. We present experimental
                 results showing that our algorithm produces excellent
                 estimates of the true evolutionary distance up to a
                 (high) threshold of saturation; indeed, the distances
                 thus produced are good enough to enable the simple
                 neighbor-joining procedure to reconstruct our test
                 trees with high accuracy.",
  acknowledgement = ack-nhfb,
  articleno =    "3.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "duplications; evolution; inversions; pairwise
                 distances; whole-genome data",
}

@Article{Krommidas:2008:ESA,
  author =       "Ioannis Krommidas and Christos Zaroliagis",
  title =        "An experimental study of algorithms for fully dynamic
                 transitive closure",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "12",
  pages =        "16:1--16:??",
  month =        jun,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1370596.1370597",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:17:58 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We have conducted an extensive experimental study on
                 algorithms for fully dynamic transitive closure. We
                 have implemented the recent fully dynamic algorithms by
                 King [1999], Roditty [2003], Roditty and Zwick [2002,
                 2004], and Demetrescu and Italiano [2000, 2005] along
                 with several variants and compared them to pseudo fully
                 dynamic and simple-minded algorithms developed in a
                 previous study [Frigioni et al. 2001]. We tested and
                 compared these implementations on random inputs,
                 synthetic (worst-case) inputs, and on inputs motivated
                 by real-world graphs. Our experiments reveal that some
                 of the dynamic algorithms can really be of practical
                 value in many situations.",
  acknowledgement = ack-nhfb,
  articleno =    "16",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "dynamic algorithm; path; reachability; transitive
                 closure",
}

@Article{Gottlob:2008:BBA,
  author =       "Georg Gottlob and Marko Samer",
  title =        "A backtracking-based algorithm for hypertree
                 decomposition",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "1:1--1:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1412228.1412229",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Hypertree decompositions of hypergraphs are a
                 generalization of tree decompositions of graphs. The
                 corresponding hypertree-width is a measure for the
                 acyclicity and therefore an indicator for the
                 tractability of the associated computation problem.
                 Several NP-hard decision and computation problems are
                 known to be tractable on instances whose structure is
                 represented by hypergraphs of bounded hypertree-width.
                 Roughly speaking, the smaller the hypertree-width, the
                 faster the computation problem can be solved. In this
                 paper, we present the new backtracking-based algorithm
                 det-$k$-decomp for computing hypertree decompositions
                 of small width. Our benchmark evaluations have shown
                 that det-$k$-decomp significantly outperforms opt-
                 $k$-decomp, the only exact hypertree decomposition
                 algorithm so far. Even compared to the best heuristic
                 algorithm, we obtained competitive results as long as
                 the hypergraphs are sufficiently simple.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "constraint satisfaction; hypertree decomposition",
}

@Article{Raman:2008:P,
  author =       "Rajeev Raman and Matt Stallmann",
  title =        "Preface",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "1:1--1:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1412228.1412235",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Gramm:2008:DRE,
  author =       "Jens Gramm and Jiong Guo and Falk H{\"u}ffner and Rolf
                 Niedermeier",
  title =        "Data reduction and exact algorithms for clique cover",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "2:1--2:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1412228.1412236",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "To cover the edges of a graph with a minimum number of
                 cliques is an NP-hard problem with many applications.
                 For this problem we develop efficient and effective
                 polynomial-time data reduction rules that, combined
                 with a search tree algorithm, allow for exact problem
                 solutions in competitive time. This is confirmed by
                 experiments with real-world and synthetic data.
                 Moreover, we prove the fixed-parameter tractability of
                 covering edges by cliques.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "clique cover; data reduction; fixed-parameter
                 tractability",
}

@Article{Haran:2008:ESP,
  author =       "Idit Haran and Dan Halperin",
  title =        "An experimental study of point location in planar
                 arrangements in {CGAL}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "3:1--3:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1412228.1412237",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study the performance in practice of various
                 point-location algorithms implemented in CGAL (the
                 Computational Geometry Algorithms Library), including a
                 newly devised {\em landmarks\/} algorithm. Among the
                 other algorithms studied are: a na{\"\i}ve approach, a
                 ``walk along a line'' strategy, and a trapezoidal
                 decomposition-based search structure. The current
                 implementation addresses general arrangements of planar
                 curves, including arrangements of nonlinear segments
                 (e.g., conic arcs) and allows for degenerate input (for
                 example, more than two curves intersecting in a single
                 point or overlapping curves). The algorithms use exact
                 geometric computation and thus result in the correct
                 point location. In our landmarks algorithm (a.k.a. jump
                 \& walk), special points, ``landmarks,'' are chosen in
                 a preprocessing stage, their place in the arrangement
                 is found, and they are inserted into a data structure
                 that enables efficient nearest-neighbor search. Given a
                 query point, the nearest landmark is located and a
                 ``walk'' strategy is applied from the landmark to the
                 query point. We report on various experiments with
                 arrangements composed of line segments or conic arcs.
                 The results indicate that compared to the other
                 algorithms tested, the landmarks approach is the most
                 efficient, when the overall (amortized) cost of a query
                 is taken into account, combining both preprocessing and
                 query time. The simplicity of the algorithm enables an
                 almost straightforward implementation and rather easy
                 maintenance. The generic programming implementation
                 allows versatility both in the selected type of
                 landmarks and in the choice of the nearest-neighbor
                 search structure. The end result is an efficient
                 point-location algorithm that bypasses the alternative
                 CGAL implementations in most practical aspects.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "arrangements; CGAL; computational geometry; generic
                 programming; point location",
}

@Article{Lanthier:2008:CAC,
  author =       "Mark A. Lanthier and Doron Nussbaum and Tsuo-Jung
                 Wang",
  title =        "Computing an approximation of the $1$-center problem
                 on weighted terrain surfaces",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "3:1--3:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1412228.1412231",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this article, we discuss the problem of determining
                 a meeting point of a set of scattered robots $R = r_1,
                 r _2, \ldots{}, r_s$ in a weighted terrain $P$, which
                 has $n > s$ triangular faces. Our algorithmic approach
                 is to produce a discretization of $P$ by producing a
                 graph $G = \{V^G, E^G\}$, which lies on the surface of
                 $P$. For a chosen vertex $p' \in V^G$, we define
                 $|\Pi(r_i, p')|$ as the minimum weight cost of
                 traveling from $r_i$ to $p'$. We show that min$_{p'}
                 \in V^G$ \hbox{max}$_{1\leq i \leq s} |\Pi(r_i, p')|
                 \leq \hbox{min}_p *\in P \hbox{max}_{1\leq i \leq s}
                 |{\Pi}(r_i, p*)| + 2 W |L|$, where $L$ is the longest
                 edge of $P$, $W$ is the maximum cost weight of a face
                 of $P$, and $p*$ is the optimal solution. Our algorithm
                 requires $O(s n m \log(s n m) + s n m^2)$ time to run,
                 where $m = n$ in the Euclidean metric and $m = n^2$ in
                 the weighted metric. However, we show, through
                 experimentation, that only a constant value of $m$ is
                 required (e.g., $m = 8$) in order to produce very
                 accurate solutions.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "1-Center; algorithms; approximation; meeting point;
                 robots; shortest path; terrain; weighted",
}

@Article{Hershberger:2008:SSD,
  author =       "John Hershberger and Nisheeth Shrivastava and Subhash
                 Suri",
  title =        "Summarizing spatial data streams using
                 {ClusterHulls}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "4:1--4:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1412228.1412238",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the following problem: given an on-line,
                 possibly unbounded stream of two-dimensional (2D)
                 points, how can we summarize its spatial distribution
                 or {\em shape\/} using a small, bounded amount of
                 memory? We propose a novel scheme, called {\em
                 ClusterHull}, which represents the shape of the stream
                 as a dynamic collection of convex hulls, with a total
                 of at most $m$ vertices, where $m$ is the size of the
                 memory. The algorithm dynamically adjusts both the
                 number of hulls and the number of vertices in each hull
                 to best represent the stream using its fixed-memory
                 budget. This algorithm addresses a problem whose
                 importance is increasingly recognized, namely, the
                 problem of summarizing real-time data streams to enable
                 on-line analytical processing. As a motivating example,
                 consider habitat monitoring using wireless sensor
                 networks. The sensors produce a steady stream of
                 geographic data, namely, the locations of objects being
                 tracked. In order to conserve their limited resources
                 (power, bandwidth, and storage), the sensors can
                 compute, store, and exchange ClusterHull summaries of
                 their data, without losing important geometric
                 information. We are not aware of other schemes
                 specifically designed for capturing shape information
                 in geometric data streams and so we compare ClusterHull
                 with some of the best general-purpose clustering
                 schemes, such as CURE, $k$-medians, and LSEARCH. We
                 show through experiments that ClusterHull is able to
                 represent the shape of two-dimensional data streams
                 more faithfully and flexibly than the stream versions
                 of these clustering algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "convex hull; data streams; geometric data",
}

@Article{Safro:2008:MAL,
  author =       "Ilya Safro and Dorit Ron and Achi Brandt",
  title =        "Multilevel algorithms for linear ordering problems",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "4:1--4:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1412228.1412232",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Linear ordering problems are combinatorial
                 optimization problems that deal with the minimization
                 of different functionals by finding a suitable
                 permutation of the graph vertices. These problems are
                 widely used and studied in many practical and
                 theoretical applications. In this paper, we present a
                 variety of linear--time algorithms for these problems
                 inspired by the Algebraic Multigrid approach, which is
                 based on weighted-edge contraction. The experimental
                 result for four such problems turned out to be better
                 than every known result in almost all cases, while the
                 short (linear) running time of the algorithms enables
                 testing very large graphs.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algebraic multigrid; linear ordering; multilevel
                 algorithm",
}

@Article{Holzer:2008:EMO,
  author =       "Martin Holzer and Frank Schulz and Dorothea Wagner",
  title =        "Engineering multilevel overlay graphs for
                 shortest-path queries",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "13",
  pages =        "5:1--5:??",
  month =        sep,
  year =         "2008",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1412228.1412239",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Oct 6 16:18:31 MDT 2008",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "An overlay graph of a given graph $G$ = ($V$, $E$) on
                 a subset $S \subseteq V$ is a graph with vertex set $S$
                 and edges corresponding to shortest paths in $G$. In
                 particular, we consider variations of the multilevel
                 overlay graph used in Schulz et al. [2002] to speed up
                 shortest-path computation. In this work, we follow up
                 and present several vertex selection criteria, along
                 with two general strategies of applying these criteria,
                 to determine a subset $S$ of a graph's vertices. The
                 main contribution is a systematic experimental study
                 where we investigate the impact of selection criteria
                 and strategies on multilevel overlay graphs and the
                 resulting speed-up achieved for shortest-path
                 computation: Depending on selection strategy and graph
                 type, a centrality index criterion, selection based on
                 planar separators, and vertex degree turned out to
                 perform best.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "Dijkstra's algorithm; hierarchical; multilevel;
                 overlay graph; preprocessing; shortest path; speed-up
                 technique; vertex selection",
}

@Article{Julstrom:2009:GHB,
  author =       "Bryant A. Julstrom",
  title =        "Greedy heuristics for the bounded diameter minimum
                 spanning tree problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "1:1--1:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Given a connected, weighted, undirected graph G and a
                 bound $D$, the bounded diameter minimum spanning tree
                 problem seeks a spanning tree on $G$ of minimum weight
                 among the trees in which no path between two vertices
                 contains more than $D$ edges. In Prim's algorithm, the
                 diameter of the growing spanning tree can always be
                 known, so it is a good starting point from which to
                 develop greedy heuristics for the bounded diameter
                 problem. Abdalla, Deo, and Gupta described such an
                 algorithm. It imitates Prim's algorithm but avoids
                 edges whose inclusion in the spanning tree would
                 violate the diameter bound. Running the algorithm from
                 one start vertex requires time that is $O(n^3)$. A
                 modification of this approach uses the start vertex as
                 the center of the spanning tree (if $D$ is even) or as
                 one of the two center vertices (if $D$ is odd). This
                 yields a simpler algorithm whose time is $O(n^2)$. A
                 further modification chooses each next vertex at random
                 rather than greedily, though it still connects each
                 vertex to the growing tree with the lowest-weight
                 feasible edge. On Euclidean problem instances with
                 small diameter bounds, the randomized heuristic is
                 superior to the two fully greedy algorithms, though its
                 advantage fades as the diameter bound grows. On
                 instances whose edge weights have been chosen at
                 random, the fully greedy algorithms outperform the
                 randomized heuristic.",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Munro:2009:PSS,
  author =       "J. Ian Munro and Dorothea Wagner",
  title =        "Preface: Section 2 --- Selected Papers from {ALENEX
                 2008}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "1:1--1:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Dumitriu:2009:HMG,
  author =       "Daniel Dumitriu and Stefan Funke and Martin Kutz and
                 Nikola Milosavljevi{\'c}",
  title =        "How much geometry it takes to reconstruct a
                 $2$-manifold in {$R^3$}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "2:1--2:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Known algorithms for reconstructing a 2-manifold from
                 a point sample in $R^3$ are naturally based on
                 decisions/predicates that take the geometry of the
                 point sample into account. Facing the always present
                 problem of round-off errors that easily compromise the
                 exactness of those predicate decisions, an exact and
                 robust implementation of these algorithms is far from
                 being trivial and typically requires employment of
                 advanced datatypes for exact arithmetic, as provided by
                 libraries like CORE, LEDA, or GMP. In this article, we
                 present a new reconstruction algorithm, one whose main
                 novelties is to throw away geometry information early
                 on in the reconstruction process and to mainly operate
                 combinatorially on a graph structure. More precisely,
                 our algorithm only requires distances between the
                 sample points and not the actual embedding in $R^3$. As
                 such, it is less susceptible to robustness problems due
                 to round-off errors and also benefits from not
                 requiring expensive exact arithmetic by faster running
                 times. A more theoretical view on our algorithm
                 including correctness proofs under suitable sampling
                 conditions can be found in a companion article.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Irving:2009:FLS,
  author =       "Robert W. Irving and David F. Manlove",
  title =        "Finding large stable matchings",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "2:1--2:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "When ties and incomplete preference lists are
                 permitted in the stable marriage and
                 hospitals/residents problems, stable matchings can have
                 different sizes. The problem of finding a maximum
                 cardinality stable matching in this context is known to
                 be NP-hard, even under very severe restrictions on the
                 number, size, and position of ties. In this article, we
                 present two new heuristics for finding large stable
                 matchings in variants of these problems in which ties
                 are on one side only. We describe an empirical study
                 involving these heuristics and the best existing
                 approximation algorithm for this problem. Our results
                 indicate that all three of these algorithms perform
                 significantly better than naive tie-breaking algorithms
                 when applied to real-world and randomly-generated data
                 sets and that one of the new heuristics fares slightly
                 better than the other algorithms, in most cases. This
                 study, and these particular problem variants, are
                 motivated by important applications in large-scale
                 centralized matching schemes.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Basu:2009:GAO,
  author =       "Amitabh Basu and Joseph S. B. Mitchell and Girish
                 Kumar Sabhnani",
  title =        "Geometric algorithms for optimal airspace design and
                 air traffic controller workload balancing",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "3:1--3:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The National Airspace System (NAS) is designed to
                 accommodate a large number of flights over North
                 America. For purposes of workload limitations for air
                 traffic controllers, the airspace is partitioned into
                 approximately 600 sectors; each sector is observed by
                 one or more controllers. In order to satisfy workload
                 limitations for controllers, it is important that
                 sectors be designed carefully according to the traffic
                 patterns of flights, so that no sector becomes
                 overloaded. We formulate and study the airspace
                 sectorization problem from an algorithmic
                 point-of-view, modeling the problem of optimal
                 sectorization as a geometric partition problem with
                 constraints. The novelty of the problem is that it
                 partitions data consisting of trajectories of moving
                 points, rather than static point set partitioning that
                 is commonly studied. First, we formulate and solve the
                 1D version of the problem, showing how to partition a
                 line into ``sectors'' (intervals) according to
                 historical trajectory data. Then, we apply the 1D
                 solution framework to design a 2D sectorization
                 heuristic based on binary space partitions. We also
                 devise partitions based on balanced ``pie partitions''
                 of a convex polygon. We evaluate our 2D algorithms
                 experimentally, applying our algorithms to actual
                 historical flight track data for the NAS. We compare
                 the workload balance of our methods to that of the
                 existing set of sectors for the NAS and find that our
                 resectorization yields competitive and improved
                 workload balancing. In particular, our methods yield an
                 improvement by a factor between 2 and 3 over the
                 current sectorization in terms of the time-average and
                 the worst-case workloads of the maximum workload
                 sector. An even better improvement is seen in the
                 standard deviations (over all sectors) of both
                 time-average and worst-case workloads.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Bauer:2009:SFR,
  author =       "Reinhard Bauer and Daniel Delling",
  title =        "{SHARC}: Fast and robust unidirectional routing",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "4:1--4:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "During recent years, impressive speed-up techniques
                 for Dijkstra's have been developed. Unfortunately, the
                 most advanced techniques use bidirectional search,
                 which makes it hard to use them in scenarios where a
                 backward search is prohibited. Even worse, such
                 scenarios are widely spread (e.g.,
                 timetable-information systems or time-dependent
                 networks). In this work, we present a unidirectional
                 speed-up technique, which competes with bidirectional
                 approaches. Moreover, we show how to exploit the
                 advantage of unidirectional routing for fast exact
                 queries in timetable information systems and for fast
                 approximative queries in time-dependent scenarios. By
                 running experiments on several inputs other than road
                 networks, we show that our approach is very robust to
                 the input.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Coleman:2009:RTL,
  author =       "Tom Coleman and Anthony Wirth",
  title =        "Ranking tournaments: Local search and a new
                 algorithm",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "6:1--6:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Ranking is a fundamental activity for organizing and,
                 later, understanding data. Advice of the form ``$a$
                 should be ranked before $b$'' is given. If this advice
                 is consistent, and complete, then there is a total
                 ordering on the data and the ranking problem is
                 essentially a sorting problem. If the advice is
                 consistent, but incomplete, then the problem becomes
                 topological sorting. If the advice is inconsistent,
                 then we have the feedback arc set (FAS) problem: The
                 aim is then to rank a set of items to satisfy as much
                 of the advice as possible. An instance in which there
                 is advice about every pair of items is known as a
                 tournament. This ranking task is equivalent to ordering
                 the nodes of a given directed graph from left to right,
                 while minimizing the number of arcs pointing left. In
                 the past, much work focused on finding good, effective
                 heuristics for solving the problem. Recently, a proof
                 of the NP-completeness of the problem (even when
                 restricted to tournaments) has accompanied new
                 algorithms with approximation guarantees, culminating
                 in the development of a PTAS (polynomial time
                 approximation scheme) for solving FAS on tournaments.
                 In this article, we reexamine many existing algorithms
                 and develop some new techniques for solving FAS. The
                 algorithms are tested on both synthetic and
                 nonsynthetic datasets. We find that, in practice,
                 local-search algorithms are very powerful, even though
                 we prove that they do not have approximation
                 guarantees. Our new algorithm is based on reversing
                 arcs whose nodes have large in-degree differences,
                 eventually leading to a total ordering. Combining this
                 with a powerful local-search technique yields an
                 algorithm that is as strong, or stronger than, existing
                 techniques on a variety of data sets.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Cherkassky:2009:SPF,
  author =       "Boris V. Cherkassky and Loukas Georgiadis and Andrew
                 V. Goldberg and Robert E. Tarjan and Renato F.
                 Werneck",
  title =        "Shortest-path feasibility algorithms: An experimental
                 evaluation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  pages =        "7:1--7:??",
  month =        may,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jun 3 16:21:43 MDT 2009",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "This is an experimental study of algorithms for the
                 shortest-path feasibility problem: Given a directed
                 weighted graph, find a negative cycle or present a
                 short proof that none exists. We study previously known
                 and new algorithms. Our testbed is more extensive than
                 those previously used, including both static and
                 incremental problems, as well as worst-case instances.
                 We show that, while no single algorithm dominates, a
                 small subset (including new algorithms) has very robust
                 performance in practice. Our work advances the state of
                 the art in the area.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Demetrescu:2009:P,
  author =       "Camil Demetrescu",
  title =        "Preface",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "1:1--1:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Serna:2009:PSS,
  author =       "Maria Serna and Carme {\'A}lvarez",
  title =        "Preface to special section of selected papers from
                 {WEA 2006}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "1:1--1:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Maue:2009:GDS,
  author =       "Jens Maue and Peter Sanders and Domagoj Matijevic",
  title =        "Goal-directed shortest-path queries using precomputed
                 cluster distances",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "2:1--2:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We demonstrate how Dijkstra's algorithm for shortest
                 path queries can be accelerated by using precomputed
                 shortest path distances. Our approach allows a
                 completely flexible tradeoff between query time and
                 space consumption for precomputed distances. In
                 particular, sublinear space is sufficient to give the
                 search a strong ``sense of direction''. We evaluate our
                 approach experimentally using large, real-world road
                 networks.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Valimaki:2009:ECS,
  author =       "N. V{\"a}lim{\"a}ki and V. M{\"a}kinen and W. Gerlach
                 and K. Dixit",
  title =        "Engineering a compressed suffix tree implementation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "2:1--2:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Suffix tree is one of the most important data
                 structures in string algorithms and biological sequence
                 analysis. Unfortunately, when it comes to implementing
                 those algorithms and applying them to real genomic
                 sequences, often the main memory size becomes the
                 bottleneck. This is easily explained by the fact that
                 while a DNA sequence of length $n$ from alphabet
                 $\Sigma = \{ A, C, G, T \}$ can be stored in $n \log
                 |\Sigma| = 2 n$ bits, its suffix tree occupies $O(n
                 \log n)$ bits. In practice, the size difference easily
                 reaches factor 50. We report on an implementation of
                 the compressed suffix tree very recently proposed by
                 Sadakane (2007). The compressed suffix tree occupies
                 space proportional to the text size, that is, $O(n \log
                 |\Sigma|)$ bits, and supports all typical suffix tree
                 operations with at most $\log n$ factor slowdown. Our
                 experiments show that, for example, on a 10 MB DNA
                 sequence, the compressed suffix tree takes 10\% of the
                 space of the normal suffix tree. At the same time, a
                 representative algorithm is slowed down by factor 30.
                 Our implementation follows the original proposal in
                 spirit, but some internal parts are tailored toward
                 practical implementation. Our construction algorithm
                 has time requirement $O(n \log n \log |\Sigma|)$ and
                 uses closely the same space as the final structure
                 while constructing it: on the 10MB DNA sequence, the
                 maximum space usage during construction is only 1.5
                 times the final product size. As by-products, we
                 develop a method to create Succinct Suffix Array
                 directly from Burrows--Wheeler transform and a
                 space-efficient version of the suffixes-insertion
                 algorithm to build balanced parentheses representation
                 of suffix tree from LCP information.",
  acknowledgement = ack-nhfb,
  articleno =    "2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Eisenbrand:2009:ALO,
  author =       "Friedrich Eisenbrand and Andreas Karrenbauer and
                 Chihao Xu",
  title =        "Algorithms for longer {OLED} lifetime",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "3:1--3:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider an optimization problem arising in the
                 design of controllers for OLED displays. Our objective
                 is to minimize the amplitude of the electrical current
                 flowing through the diodes, which has a direct impact
                 on the lifetime of such a display. The optimization
                 problem consist of finding a decomposition of an image
                 into subframes with special structural properties that
                 allow the display driver to lower the stress on the
                 diodes. For monochrome images, we present an algorithm
                 that finds an optimal solution of this problem in
                 linear time. Moreover, we consider an online version of
                 the problem in which we have to take a decision for one
                 row based on a constant number of rows in the
                 lookahead. In this framework, this algorithm has a
                 tight competitive ratio. A generalization of this
                 algorithm computes near-optimal solutions of real-world
                 instances in real time.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Englert:2009:EOS,
  author =       "Matthias Englert and Heiko R{\"o}glin and Matthias
                 Westermann",
  title =        "Evaluation of online strategies for reordering
                 buffers",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "3:1--3:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A sequence of objects that are characterized by their
                 color has to be processed. Their processing order
                 influences how efficiently they can be processed: Each
                 color change between two consecutive objects produces
                 costs. A reordering buffer, which is a random access
                 buffer with storage capacity for k objects, can be used
                 to rearrange this sequence online in such a way that
                 the total costs are reduced. This concept is useful for
                 many applications in computer science and economics.
                 The strategy with the best-known competitive ratio is
                 MAP. An upper bound of $O(\log k)$ on the competitive
                 ratio of MAP is known and a nonconstant lower bound on
                 the competitive ratio is not known. Based on
                 theoretical considerations and experimental
                 evaluations, we give strong evidence that the
                 previously used proof techniques are not suitable to
                 show an $o (\sqrt{\log k})$ upper bound on the
                 competitive ratio of MAP. However, we also give some
                 evidence that in fact MAP achieves a competitive ratio
                 of $O(1)$. Further, we evaluate the performance of
                 several strategies on random input sequences
                 experimentally. MAP and its variants RC and RR clearly
                 outperform the other strategies FIFO, LRU, and MCF. In
                 particular, MAP, RC, and RR are the only known
                 strategies whose competitive ratios do not depend on
                 the buffer size. Furthermore, MAP achieves the smallest
                 competitive ratio.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Farshi:2009:ESG,
  author =       "Mohammad Farshi and Joachim Gudmundsson",
  title =        "Experimental study of geometric $t$-spanners",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "3:1--3:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The construction of t -spanners of a given point set
                 has received a lot of attention, especially from a
                 theoretical perspective. In this article, we
                 experimentally study the performance and quality of the
                 most common construction algorithms for points in the
                 Euclidean plane. We implemented the most well-known t
                 -spanner algorithms and tested them on a number of
                 different point sets. The experiments are discussed and
                 compared to the theoretical results, and in several
                 cases, we suggest modifications that are implemented
                 and evaluated. The measures of quality that we consider
                 are the number of edges, the weight, the maximum
                 degree, the spanner diameter, and the number of
                 crossings. This is the first time an extensive
                 comparison has been made between the running times of
                 construction algorithms of t -spanners and the quality
                 of the generated spanners.",
  acknowledgement = ack-nhfb,
  articleno =    "3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Cederman:2009:GQP,
  author =       "Daniel Cederman and Philippas Tsigas",
  title =        "{GPU-Quicksort}: a practical {Quicksort} algorithm for
                 graphics processors",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "4:1--4:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this article, we describe GPU-Quicksort, an
                 efficient Quicksort algorithm suitable for highly
                 parallel multicore graphics processors. Quicksort has
                 previously been considered an inefficient sorting
                 solution for graphics processors, but we show that in
                 CUDA, NVIDIA's programming platform for general-purpose
                 computations on graphical processors, GPU-Quicksort
                 performs better than the fastest-known sorting
                 implementations for graphics processors, such as radix
                 and bitonic sort. Quicksort can thus be seen as a
                 viable alternative for sorting large quantities of data
                 on graphics processors.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Chimani:2009:EEC,
  author =       "Markus Chimani and Carsten Gutwenger and Petra
                 Mutzel",
  title =        "Experiments on exact crossing minimization using
                 column generation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "4:1--4:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The crossing number of a graph G is the smallest
                 number of edge crossings in any drawing of G into the
                 plane. Recently, the first branch-and-cut approach for
                 solving the crossing number problem has been presented
                 in Buchheim et al. [2005]. Its major drawback was the
                 huge number of variables out of which only very few
                 were actually used in the optimal solution. This
                 restricted the algorithm to rather small graphs with
                 low crossing number. In this article, we discuss two
                 column generation schemes; the first is based on
                 traditional algebraic pricing, and the second uses
                 combinatorial arguments to decide whether and which
                 variables need to be added. The main focus of this
                 article is the experimental comparison between the
                 original approach and these two schemes. In addition,
                 we evaluate the quality achieved by the best-known
                 crossing number heuristic by comparing the new results
                 with the results of the heuristic.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Putze:2009:CHS,
  author =       "Felix Putze and Peter Sanders and Johannes Singler",
  title =        "Cache-, hash-, and space-efficient {Bloom} filters",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "4:1--4:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A Bloom filter is a very compact data structure that
                 supports approximate membership queries on a set,
                 allowing false positives. We propose several new
                 variants of Bloom filters and replacements with similar
                 functionality. All of them have a better
                 cache-efficiency and need less hash bits than regular
                 Bloom filters. Some use SIMD functionality, while the
                 others provide an even better space efficiency. As a
                 consequence, we get a more flexible trade-off between
                 false-positive rate, space-efficiency,
                 cache-efficiency, hash-efficiency, and computational
                 effort. We analyze the efficiency of Bloom filters and
                 the proposed replacements in detail, in terms of the
                 false-positive rate, the number of expected
                 cache-misses, and the number of required hash bits. We
                 also describe and experimentally evaluate the
                 performance of highly tuned implementations. For many
                 settings, our alternatives perform better than the
                 methods proposed so far.",
  acknowledgement = ack-nhfb,
  articleno =    "4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Chimani:2009:OOC,
  author =       "Markus Chimani and Maria Kandyba and Ivana Ljubi{\'c}
                 and Petra Mutzel",
  title =        "Obtaining optimal $k$-cardinality trees fast",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "2.5:1--2.5:23",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1498698.1537600",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Given an undirected graph G = (V, E) with edge weights
                 and a positive integer number k, the k -cardinality
                 tree problem consists of finding a subtree T of G with
                 exactly k edges and the minimum possible weight. Many
                 algorithms have been proposed to solve this NP-hard
                 problem, resulting in mainly heuristic and
                 metaheuristic approaches. In this article, we present
                 an exact ILP-based algorithm using directed cuts. We
                 mathematically compare the strength of our formulation
                 to the previously known ILP formulations of this
                 problem, and show the advantages of our approach.
                 Afterwards, we give an extensive study on the
                 algorithm's practical performance compared to the
                 state-of-the-art metaheuristics. In contrast to the
                 widespread assumption that such a problem cannot be
                 efficiently tackled by exact algorithms for medium and
                 large graphs (between 200 and 5,000 nodes), our results
                 show that our algorithm not only has the advantage of
                 proving the optimality of the computed solution, but
                 also often outperforms the metaheuristic approaches in
                 terms of running time.",
  acknowledgement = ack-nhfb,
  articleno =    "2.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Frias:2009:LRC,
  author =       "Leonor Frias and Jordi Petit and Salvador Roura",
  title =        "Lists revisited: Cache-conscious {STL} lists",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "5:1--5:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present three cache-conscious implementations of
                 STL standard compliant lists. Until now, one could
                 either find simple doubly linked list implementations
                 that easily cope with standard strict requirements, or
                 theoretical approaches that do not take into account
                 any of these requirements in their design. In contrast,
                 we have merged both approaches, paying special
                 attention to iterators constraints. In this article,
                 the competitiveness of our implementations is evinced
                 with an extensive experimental analysis. This shows,
                 for instance, 5 to 10 times faster traversals and 3 to
                 5 times faster internal sort.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Holzer:2009:EPS,
  author =       "Martin Holzer and Frank Schulz and Dorothea Wagner and
                 Grigorios Prasinos and Christos Zaroliagis",
  title =        "Engineering planar separator algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "5:1--5:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider classical linear-time planar separator
                 algorithms, determining for a given planar graph a
                 small subset of its nodes whose removal divides the
                 graph into two components of similar size. These
                 algorithms are based on planar separator theorems,
                 which guarantee separators of size $O(\sqrt n)$ and
                 remaining components of size at most $2 n / 3$ (where
                 $n$ denotes the number of nodes in the graph). In this
                 article, we present a comprehensive experimental study
                 of the classical algorithms applied to a large variety
                 of graphs, where our main goal is to find separators
                 that do not only satisfy upper bounds, but also possess
                 other desirable characteristics with respect to
                 separator size and component balance. We achieve this
                 by investigating a number of specific alternatives for
                 the concrete implementation and fine-tuning of certain
                 parts of the classical algorithms. It is also shown
                 that the choice of several parameters influences the
                 separation quality considerably. Moreover, we propose
                 as planar separators the usage of fundamental cycles,
                 whose size is at most twice the diameter of the graph:
                 For graphs of small diameter, the guaranteed bound is
                 better than the $O(\sqrt n)$ bounds, and it turns out
                 that this simple strategy almost always outperforms the
                 other algorithms, even for graphs with large
                 diameter.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Tarjan:2009:DTP,
  author =       "Robert E. Tarjan and Renato F. Werneck",
  title =        "Dynamic trees in practice",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "5:1--5:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Dynamic tree data structures maintain forests that
                 change over time through edge insertions and deletions.
                 Besides maintaining connectivity information in
                 logarithmic time, they can support aggregation of
                 information over paths, trees, or both. We perform an
                 experimental comparison of several versions of dynamic
                 trees: ST-trees, ET-trees, RC-trees, and two variants
                 of top trees (self-adjusting and worst-case). We
                 quantify their strengths and weaknesses through tests
                 with various workloads, most stemming from practical
                 applications. We observe that a simple, linear-time
                 implementation is remarkably fast for graphs of small
                 diameter, and that worst-case and randomized data
                 structures are best when queries are very frequent. The
                 best overall performance, however, is achieved by
                 self-adjusting ST-trees.",
  acknowledgement = ack-nhfb,
  articleno =    "5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Avdil:2009:LSS,
  author =       "Alaubek Avdil and Karsten Weihe",
  title =        "Local search starting from an {LP} solution: Fast and
                 quite good",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "6:1--6:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present and evaluate a specific way to generate
                 good start solutions for local search. The start
                 solution is computed from a certain LP, which is
                 related to the underlying problem. We consider three
                 optimization problems: the directed MAX-CUT problem
                 with a source and a sink and two variations of the MAX-
                 k -SAT problem with k = 2 and k = 3. To compare our
                 technique, we run local search repeatedly with random
                 start solutions. Our technique produces, consistently,
                 final solutions whose objective values are not too far
                 from the best solutions from repeated random starts.
                 The surprising degree of stability and uniformity of
                 this result throughout all of our experiments on
                 various classes of instances strongly suggests that we
                 have consistently achieved nearly optimal solutions. On
                 the other hand, the runtime of our technique is rather
                 small, so the technique is very efficient and probably
                 quite accurate.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Deineko:2009:FMW,
  author =       "Vladimir Deineko and Alexander Tiskin",
  title =        "Fast minimum-weight double-tree shortcutting for
                 metric {TSP}: Is the best one good enough?",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "6:1--6:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The Metric Traveling Salesman Problem (TSP) is a
                 classical NP-hard optimization problem. The double-tree
                 shortcutting method for Metric TSP yields an
                 exponentially-sized space of TSP tours, each of which
                 approximates the optimal solution within, at most, a
                 factor of 2. We consider the problem of finding among
                 these tours the one that gives the closest
                 approximation, that is, the minimum-weight double-tree
                 shortcutting. Burkard et al. gave an algorithm for this
                 problem, running in time $O(n^3 + 2^d n^2)$ and memory
                 $O(2^d n^2)$, where $d$ is the maximum node degree in
                 the rooted minimum spanning tree. We give an improved
                 algorithm for the case of small $d$ (including planar
                 Euclidean TSP, where $d \leq 4$), running in time
                 $O(4^d n^2)$ and memory $O(4^d n)$. This improvement
                 allows one to solve the problem on much larger
                 instances than previously attempted. Our computational
                 experiments suggest that in terms of the time-quality
                 trade-off, the minimum-weight double-tree shortcutting
                 method provides one of the best existing
                 tour-constructing heuristics.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Figueroa:2009:SSA,
  author =       "Karina Figueroa and Edgar Chavez and Gonzalo Navarro
                 and Rodrigo Paredes",
  title =        "Speeding up spatial approximation search in metric
                 spaces",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "6:1--6:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Proximity searching consists of retrieving from a
                 database those elements that are similar to a query
                 object. The usual model for proximity searching is a
                 metric space where the distance, which models the
                 proximity, is expensive to compute. An index uses
                 precomputed distances to speedup query processing.
                 Among all the known indices, the baseline for
                 performance for about 20 years has been AESA. This
                 index uses an iterative procedure, where at each
                 iteration it first chooses the next promising element
                 (``pivot'') to compare to the query, and then it
                 discards database elements that can be proved not
                 relevant to the query using the pivot. The next pivot
                 in AESA is chosen as the one minimizing the sum of
                 lower bounds to the distance to the query proved by
                 previous pivots. In this article, we introduce the new
                 index iAESA, which establishes a new performance
                 baseline for metric space searching. The difference
                 with AESA is the method to select the next pivot. In
                 iAESA, each candidate sorts previous pivots by
                 closeness to it, and chooses the next pivot as the
                 candidate whose order is most similar to that of the
                 query. We also propose a modification to AESA-like
                 algorithms to turn them into probabilistic algorithms.
                 Our empirical results confirm a consistent improvement
                 in query performance. For example, we perform as few as
                 60\% of the distance evaluations of AESA in a database
                 of documents, a very important and difficult real-life
                 instance of the problem. For the probabilistic
                 algorithm, we perform in a database of faces up to 40\%
                 of the comparisons made by the best alternative
                 algorithm to retrieve the same percentage of the
                 correct answer. Based on the empirical results, we
                 conjecture that the new probabilistic AESA-like
                 algorithms will become, as AESA had been for exact
                 algorithms, a reference point establishing, in
                 practice, a lower bound on how good a probabilistic
                 proximity search algorithm can be.",
  acknowledgement = ack-nhfb,
  articleno =    "6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Barbay:2009:EIS,
  author =       "J{\'e}r{\'e}my Barbay and Alejandro L{\'o}pez-Ortiz
                 and Tyler Lu and Alejandro Salinger",
  title =        "An experimental investigation of set intersection
                 algorithms for text searching",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "7:1--7:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The intersection of large ordered sets is a common
                 problem in the context of the evaluation of boolean
                 queries to a search engine. In this article, we propose
                 several improved algorithms for computing the
                 intersection of sorted arrays, and in particular for
                 searching sorted arrays in the intersection context. We
                 perform an experimental comparison with the algorithms
                 from the previous studies from Demaine,
                 L{\'o}pez-Ortiz, and Munro [ALENEX 2001] and from
                 Baeza-Yates and Salinger [SPIRE 2005]; in addition, we
                 implement and test the intersection algorithm from
                 Barbay and Kenyon [SODA 2002] and its randomized
                 variant [SAGA 2003]. We consider both the random data
                 set from Baeza-Yates and Salinger, the Google queries
                 used by Demaine et al., a corpus provided by Google,
                 and a larger corpus from the TREC Terabyte 2006
                 efficiency query stream, along with its own query log.
                 We measure the performance both in terms of the number
                 of comparisons and searches performed, and in terms of
                 the CPU time on two different architectures. Our
                 results confirm or improve the results from both
                 previous studies in their respective context
                 (comparison model on real data, and CPU measures on
                 random data) and extend them to new contexts. In
                 particular, we show that value-based search algorithms
                 perform well in posting lists in terms of the number of
                 comparisons performed.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Estivill-Castro:2009:RRD,
  author =       "Vladimir Estivill-Castro and Apichat Heednacram and
                 Francis Suraweera",
  title =        "Reduction rules deliver efficient {FPT}-algorithms for
                 covering points with lines",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "7:1--7:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present efficient algorithms to solve the Line
                 Cover Problem exactly. In this NP-complete problem, the
                 inputs are n points in the plane and a positive integer
                 k, and we are asked to answer if we can cover these n
                 points with at most k lines. Our approach is based on
                 fixed-parameter tractability and, in particular,
                 kernelization. We propose several reduction rules to
                 transform instances of Line Cover into equivalent
                 smaller instances. Once instances are no longer
                 susceptible to these reduction rules, we obtain a
                 problem kernel whose size is bounded by a polynomial
                 function of the parameter k and does not depend on the
                 size n of the input. Our algorithms provide exact
                 solutions and are easy to implement. We also describe
                 the design of algorithms to solve the corresponding
                 optimization problem exactly. We experimentally
                 evaluated ten variants of the algorithms to determine
                 the impact and trade-offs of several reduction rules.
                 We show that our approach provides tractability for a
                 larger range of values of the parameter and larger
                 inputs, improving the execution time by several orders
                 of magnitude with respect to earlier algorithms that
                 use less rules.",
  acknowledgement = ack-nhfb,
  articleno =    "7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{DeLoera:2009:CMM,
  author =       "Jes{\'u}s A. {De Loera} and David C. Haws and Jon Lee
                 and Allison O'Hair",
  title =        "Computation in multicriteria matroid optimization",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "14",
  number =       "1",
  pages =        "8:1--8:??",
  month =        dec,
  year =         "2009",
  CODEN =        "????",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:04:28 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Motivated by recent work on algorithmic theory for
                 nonlinear and multicriteria matroid optimization, we
                 have developed algorithms and heuristics aimed at
                 practical solution of large instances of some of these
                 difficult problems. Our methods primarily use the local
                 adjacency structure inherent in matroid polytopes to
                 pivot to feasible solutions, which may or may not be
                 optimal. We also present a modified
                 breadth-first-search heuristic that uses adjacency to
                 enumerate a subset of feasible solutions. We present
                 other heuristics and provide computational evidence
                 supporting our techniques. We implemented all of our
                 algorithms in the software package MOCHA.",
  acknowledgement = ack-nhfb,
  articleno =    "8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Jacobs:2010:ESR,
  author =       "Tobias Jacobs",
  title =        "An experimental study of recent hotlink assignment
                 algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "1.1:1--1.1:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1671970.1671971",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:05:50 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The concept of {\em hotlink assignment\/} aims at
                 enhancing the structure of Web sites such that the
                 user's expected navigation effort is minimized. We
                 concentrate on sites that are representable by trees
                 and assume that each leaf carries a weight representing
                 its popularity.\par

                 The problem of optimally adding at most one additional
                 outgoing edge (``hotlink'') to each inner node has been
                 widely studied. A considerable number of approximation
                 algorithms have been proposed and worst-case bounds for
                 the quality of the computed solutions have been given.
                 However, only little is known about the practical
                 behavior of most of these algorithms.\par

                 This article contributes to closing this gap by
                 evaluating all recently proposed strategies
                 experimentally. Our experiments are based on trees
                 extracted from real Web sites, as well as on synthetic
                 instances. The latter are generated by a new method
                 that simulates the growth of a Web site over time.
                 Finally, we present a new heuristic that is easy to
                 implement and exhibits excellent behavior in
                 practice.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "approximation; hotlink; Search tree",
}

@Article{Spence:2010:SGS,
  author =       "Ivor Spence",
  title =        "{{\tt sgen1}}: a generator of small but difficult
                 satisfiability benchmarks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "1.2:1--1.2:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1671970.1671972",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:05:50 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The satisfiability problem is known to be NP-Complete;
                 therefore, there should be relatively small problem
                 instances that take a very long time to solve. However,
                 most of the smaller benchmarks that were once thought
                 challenging, especially the satisfiable ones, can be
                 processed quickly by modern SAT-solvers. We describe
                 and make available a generator that produces both
                 unsatisfiable and, more significantly, satisfiable
                 formulae that take longer to solve than any others
                 known. At the two most recent international SAT
                 Competitions, the smallest unsolved benchmarks were
                 created by this generator. We analyze the results of
                 all solvers in the most recent competition when applied
                 to these benchmarks and also present our own more
                 focused experiments.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "SAT-solvers; Satisfiability benchmarks",
}

@Article{Langguth:2010:HIB,
  author =       "Johannes Langguth and Fredrik Manne and Peter
                 Sanders",
  title =        "Heuristic initialization for bipartite matching
                 problems",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "1.3:1--1.3:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1712655.1712656",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Mar 15 12:05:50 MDT 2010",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "It is a well-established result that improved pivoting
                 in linear solvers can be achieved by computing a
                 bipartite matching between matrix entries and positions
                 on the main diagonal. With the availability of
                 increasingly faster linear solvers, the speed of
                 bipartite matching computations must keep up to avoid
                 slowing down the main computation. Fast algorithms for
                 bipartite matching, which are usually initialized with
                 simple heuristics, have been known for a long time.
                 However, the performance of these algorithms is largely
                 dependent on the quality of the heuristic. We compare
                 combinations of several known heuristics and exact
                 algorithms to find fast combined methods, using
                 real-world matrices as well as randomly generated
                 instances. In addition, we present a new heuristic
                 aimed at obtaining high-quality matchings and compare
                 its impact on bipartite matching algorithms with that
                 of other heuristics. The experiments suggest that its
                 performance compares favorably to the best-known
                 heuristics, and that it is especially suited for
                 application in linear solvers.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "Heuristics; matching",
}

@Article{Delbot:2010:AEC,
  author =       "Fran{\c{c}}ois Delbot and Christian Laforest",
  title =        "Analytical and experimental comparison of six
                 algorithms for the vertex cover problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "14:1--14:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1865970.1865971",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The vertex cover is a well-known NP-complete
                 minimization problem in graphs that has received a lot
                 of attention these last decades. Many algorithms have
                 been proposed to construct vertex cover in different
                 contexts (offline, online, list algorithms, etc.)
                 leading to solutions of different level of quality.
                 This quality is traditionally measured in terms of
                 approximation ratio, that is, the worst possible ratio
                 between the quality of the solution constructed and the
                 optimal one. For the vertex cover problem the range of
                 such known ratios are between 2 (conjectured as being
                 the smallest constant ratio) and $\Delta$, the maximum
                 degree of the graph. Based on this measure of quality,
                 the hierarchy is almost clear (the smaller the ratio
                 is, the better the algorithm is). In this article, we
                 show that this measure, although of great importance,
                 is too macroscopic and does not reflect the practical
                 behavior of the methods. We prove this by analyzing
                 (known and recent) algorithms running on a particular
                 class of graphs: the paths. We obtain closed and exact
                 formulas for the mean of the sizes of vertex cover
                 constructed by these different algorithms. Then, we
                 assess their quality experimentally in several
                 well-chosen class of graphs (random, regular, trees,
                 BHOSLIB benchmarks, trap graphs, etc.). The synthesis
                 of all these results lead us to formulate a ``practical
                 hierarchy'' of the algorithms. We remark that it is,
                 more or less, the opposite to the one only based on
                 approximation ratios, showing that worst-case analysis
                 only gives partial information on the quality of an
                 algorithm.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Arroyuelo:2010:PAR,
  author =       "Diego Arroyuelo and Gonzalo Navarro",
  title =        "Practical approaches to reduce the space requirement
                 of {Lempel--Ziv}-based compressed text indices",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "15:1--15:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1671970.1883684",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Given a text $T [1.\,.n]$ over an alphabet of size
                 $\sigma$, the full-text search problem consists in
                 locating the occ occurrences of a given pattern
                 $P[1.\,.m]$ in $T$. Compressed full-text self-indices
                 are space-efficient representations of the text that
                 provide direct access to and indexed search on
                 it.\par

                 The LZ-index of Navarro is a compressed full-text
                 self-index based on the LZ78 compression algorithm.
                 This index requires about 5 times the size of the
                 compressed text (in theory, $4 n H_k(T) + o(n \log
                 \sigma)$ bits of space, where $H_k(T)$ is the $k$-th
                 order empirical entropy of $T$). In practice, the
                 average locating complexity of the LZ-index is
                 $O(\sigma m \log_\sigma n + {\rm occ} \sigma^{m / 2})$,
                 where {\em occ} is the number of occurrences of $P$. It
                 can extract text substrings of length $l$ in $O(l)$
                 time. This index outperforms competing schemes both to
                 locate short patterns and to extract text snippets.
                 However, the LZ-index can be up to 4 times larger than
                 the smallest existing indices (which use $n H_k(T) +
                 o(n \log \sigma)$ bits in theory), and it does not
                 offer space/time tuning options. This limits its
                 applicability.\par

                 In this article, we study practical ways to reduce the
                 space of the LZ-index. We obtain new LZ-index variants
                 that require $2(1 + \epsilon) n H_k(T) + o(n \log
                 \sigma)$ bits of space, for any $0 < \epsilon < 1$.
                 They have an average locating time of $O(1 / \epsilon
                 (m \log n + {\rm occ} \sigma^{m / 2}))$, while
                 extracting takes $O(l)$ time.\par

                 We perform extensive experimentation and conclude that
                 our schemes are able to reduce the space of the
                 original LZ-index by a factor of $2/3$, that is, around
                 $3$ times the compressed text size. Our schemes are
                 able to extract about 1 to 2 MB of the text per second,
                 being twice as fast as the most competitive
                 alternatives. Pattern occurrences are located at a rate
                 of up to 1 to 4 million per second. This constitutes
                 the best space\slash time trade-off when indices are
                 allowed to use 4 times the size of the compressed text
                 or more.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Ullmann:2010:BVA,
  author =       "Julian R. Ullmann",
  title =        "Bit-vector algorithms for binary constraint
                 satisfaction and subgraph isomorphism",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "16:1--16:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1671970.1921702",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A solution to a binary constraint satisfaction problem
                 is a set of discrete values, one in each of a given set
                 of domains, subject to constraints that allow only
                 prescribed pairs of values in specified pairs of
                 domains. Solutions are sought by backtrack search
                 interleaved with a process that removes from domains
                 those values that are currently inconsistent with
                 provisional choices already made in the course of
                 search. For each value in a given domain, a bit-vector
                 shows which values in another domain are or are not
                 permitted in a solution. Bit-vector representation of
                 constraints allows bit-parallel, therefore fast,
                 operations for editing domains during search. This
                 article revises and updates bit-vector algorithms
                 published in the 1970's, and introduces focus search,
                 which is a new bit-vector algorithm relying more on
                 search and less on domain-editing than previous
                 algorithms. Focus search is competitive within a
                 limited family of constraint satisfaction problems.
                 Determination of subgraph isomorphism is a specialized
                 binary constraint satisfaction problem for which
                 bit-vector algorithms have been widely used since the
                 1980s, particularly for matching molecular structures.
                 This article very substantially updates the author's
                 1976 subgraph isomorphism algorithm, and reports
                 experimental results with random and real-life data.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Askitis:2010:RSH,
  author =       "Nikolas Askitis and Justin Zobel",
  title =        "Redesigning the string hash table, burst trie, and
                 {BST} to exploit cache",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "17:1--17:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1671970.1921704",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A key decision when developing in-memory computing
                 applications is choice of a mechanism to store and
                 retrieve strings. The most efficient current data
                 structures for this task are the hash table with
                 move-to-front chains and the burst trie, both of which
                 use linked lists as a substructure, and variants of
                 binary search tree. These data structures are
                 computationally efficient, but typical implementations
                 use large numbers of nodes and pointers to manage
                 strings, which is not efficient in use of cache. In
                 this article, we explore two alternatives to the
                 standard representation: the simple expedient of
                 including the string in its node, and, for linked
                 lists, the more drastic step of replacing each list of
                 nodes by a contiguous array of characters. Our
                 experiments show that, for large sets of strings, the
                 improvement is dramatic. For hashing, in the best case
                 the total space overhead is reduced to less than 1 bit
                 per string. For the burst trie, over 300MB of strings
                 can be stored in a total of under 200MB of memory with
                 significantly improved search time. These results, on a
                 variety of data sets, show that cache-friendly variants
                 of fundamental data structures can yield remarkable
                 gains in performance.",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{McGeoch:2010:P,
  author =       "Catherine C. McGeoch",
  title =        "Preface",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "2.1:1--2.1:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1671970.1671974",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Chimani:2010:LFU,
  author =       "Markus Chimani and Carsten Gutwenger and Petra Mutzel
                 and Hoi-Ming Wong",
  title =        "Layer-free upward crossing minimization",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "2.2:1--2.2:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1671970.1671975",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "An upward drawing of a DAG $G$ is a drawing of $G$ in
                 which all arcs are drawn as curves increasing
                 monotonically in the vertical direction. In this
                 article, we present a new approach for upward crossing
                 minimization, that is, finding an upward drawing of a
                 DAG $G$ with as few crossings as possible. Our
                 algorithm is based on a two-stage upward planarization
                 approach, which computes a feasible upward planar
                 subgraph in the first step and reinserts the remaining
                 arcs by computing constraint-feasible upward insertion
                 paths. An experimental study shows that the new
                 algorithm leads to much better results than existing
                 algorithms for upward crossing minimization, including
                 the classical Sugiyama approach.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "Crossing number; planarization approach; upward
                 drawing; upward planarization",
}

@Article{Bauer:2010:CHG,
  author =       "Reinhard Bauer and Daniel Delling and Peter Sanders
                 and Dennis Schieferdecker and Dominik Schultes and
                 Dorothea Wagner",
  title =        "Combining hierarchical and goal-directed speed-up
                 techniques for {Dijkstra}'s algorithm",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "2.3:1--2.3:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1671970.1671976",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In recent years, highly effective hierarchical and
                 goal-directed speed-up techniques for routing in large
                 road networks have been developed. This article makes a
                 systematic study of combinations of such techniques.
                 These combinations turn out to give the best results in
                 many scenarios, including graphs for unit disk graphs,
                 grid networks, and time-expanded timetables. Besides
                 these quantitative results, we obtain general insights
                 for successful combinations.",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "Dijkstra's algorithm; speed-up technique",
}

@Article{Nash:2010:CID,
  author =       "Nicholas Nash and David Gregg",
  title =        "Comparing integer data structures for 32- and 64-bit
                 keys",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "2.4:1--2.4:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1671970.1671977",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this article, we experimentally compare a number of
                 data structures operating over keys that are 32- and
                 64-bit integers. We examine traditional
                 comparison-based search trees as well as data
                 structures that take advantage of the fact that the
                 keys are integers such as van Emde Boas trees and
                 various trie-based data structures. We propose a
                 variant of a burst trie that performs better in time
                 than all the alternative data structures. In addition,
                 even for small sets of keys, this burst trie variant
                 occupies less space than comparison-based data
                 structures such as red-black trees and $B$-trees. Burst
                 tries have previously been shown to provide a very
                 efficient base for implementing cache efficient string
                 sorting algorithms. We find that with suitable
                 engineering, they also perform excellently as a dynamic
                 ordered data structure operating over integer keys. We
                 provide experimental results when the data structures
                 operate over uniform random data. We also present
                 experimental results for other types of data, including
                 datasets arising from {\em Valgrind}, a widely used
                 suite of tools for the dynamic binary instrumentation
                 of programs.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "Integer keys; level compression; searching; trees;
                 tries",
}

@Article{Sinha:2010:EBT,
  author =       "Ranjan Sinha and Anthony Wirth",
  title =        "Engineering burstsort: Toward fast in-place string
                 sorting",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "15",
  number =       "1",
  pages =        "2.5:1--2.5:??",
  month =        mar,
  year =         "2010",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1671970.1671978",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon Dec 10 09:03:03 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Burstsort is a trie-based string sorting algorithm
                 that distributes strings into small buckets whose
                 contents are then sorted in cache. This approach has
                 earlier been demonstrated to be efficient on modern
                 cache-based processors [Sinha \& Zobel, JEA 2004]. In
                 this article, we introduce improvements that reduce by
                 a significant margin the memory requirement of
                 Burstsort: It is now less than 1\% greater than an
                 in-place algorithm. These techniques can be applied to
                 existing variants of Burstsort, as well as other string
                 algorithms such as for string management.\par

                 We redesigned the buckets, introducing sub-buckets and
                 an index structure for them, which resulted in an
                 order-of-magnitude space reduction. We also show the
                 practicality of moving some fields from the trie nodes
                 to the insertion point (for the next string pointer) in
                 the bucket; this technique reduces memory usage of the
                 trie nodes by one-third. Importantly, the trade-off for
                 the reduction in memory use is only a very slight
                 increase in the running time of Burstsort on real-world
                 string collections. In addition, during the
                 bucket-sorting phase, the string suffixes are copied to
                 a small buffer to improve their spatial locality,
                 lowering the running time of Burstsort by up to 30\%.
                 These memory usage enhancements have enabled the
                 copy-based approach [Sinha et al., JEA 2006] to also
                 reduce the memory usage with negligible impact on
                 speed.",
  acknowledgement = ack-nhfb,
  articleno =    "2.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
  keywords =     "algorithms; cache; experimental algorithms; Sorting;
                 string management; tries",
}

@Article{Boytsov:2011:IMA,
  author =       "Leonid Boytsov",
  title =        "Indexing methods for approximate dictionary searching:
                 Comparative analysis",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "1.1:1--1.1:??",
  month =        may,
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.1963191",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon May 30 08:26:05 MDT 2011",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The primary goal of this article is to survey
                 state-of-the-art indexing methods for approximate
                 dictionary searching. To improve understanding of the
                 field, we introduce a taxonomy that classifies all
                 methods into direct methods and sequence-based
                 filtering methods. We focus on infrequently updated
                 dictionaries, which are used primarily for retrieval.
                 Therefore, we consider indices that are optimized for
                 retrieval rather than for update. The indices are
                 assumed to be associative, that is, capable of storing
                 and retrieving auxiliary information, such as string
                 identifiers. All solutions are lossless and guarantee
                 retrieval of strings within a specified edit distance
                 $k$. Benchmark results are presented for the
                 practically important cases of $k = 1, 2$, and $3$.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Biro:2011:SMC,
  author =       "P{\'e}ter Bir{\'o} and Robert W. Irving and Ildik{\'o}
                 Schlotter",
  title =        "Stable matching with couples: an empirical study",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "12:1--12:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.1970372",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In practical applications, algorithms for the classic
                 version of the hospitals residents problem (the
                 many-one version of the stable marriage problem) may
                 have to be extended to accommodate the needs of couples
                 who wish to be allocated to (geographically) compatible
                 places. Such an extension has been in operation in the
                 National Resident Matching Problem (NRMP) matching
                 scheme in the United States for a number of years. In
                 this setting, a stable matching need not exist, and it
                 is an NP-complete problem to decide if one does.
                 However, the only previous empirical study in this
                 context (focused on the NRMP algorithm), together with
                 information from NRMP, suggest that, in practice,
                 stable matchings do exist and that an appropriate
                 heuristic can be used to find such a matching. The
                 study presented here was motivated by the recent
                 decision to accommodate couples in the Scottish
                 Foundation Allocation Scheme (SFAS), the Scottish
                 equivalent of the NRMP. Here, the problem is a special
                 case, since hospital preferences are derived from a
                 ``master list'' of resident scores, but we show that
                 the existence problem remains NP-complete in this case.
                 We describe the algorithm used in SFAS and contrast it
                 with a version of the algorithm that forms the basis of
                 the NRMP approach. We also propose a third simpler
                 algorithm based on satisfying blocking pairs, and an
                 FPT algorithm when the number of couples is viewed as a
                 parameter. We present an empirical study of the
                 performance of a number of variants of these algorithms
                 using a range of datasets. The results indicate that,
                 not surprisingly, increasing the ratio of couples to
                 single applicants typically makes it harder to find a
                 stable matching (and, by inference, less likely that a
                 stable matching exists). However, the likelihood of
                 finding a stable matching is very high for realistic
                 values of this ratio, and especially so for particular
                 variants of the algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Huber:2011:MGS,
  author =       "Stefan Huber and Martin Held",
  title =        "Motorcycle graphs: Stochastic properties motivate an
                 efficient yet simple implementation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "13:1--13:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.2019578",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this article, we study stochastic properties of a
                 geometric setting that underpins random motorcycle
                 graphs and use it to motivate a simple but very
                 efficient algorithm for computing motorcycle graphs. An
                 analysis of the mean trace length of $n$ random
                 motorcycles suggests that, on average, a motorcycle
                 crosses only a constant number of cells within a $\sqrt
                 n \times \sqrt n$ rectangular grid, provided that the
                 motorcycles are distributed sufficiently uniformly over
                 the area covered by the grid. This analysis motivates a
                 simple algorithm for computing motorcycle graphs: We
                 use the standard priority-queue--based algorithm and
                 enhance it with geometric hashing by means of a
                 rectangular grid. If the motorcycles are distributed
                 sufficiently uniformly, then our stochastic analysis
                 predicts an $O(n \log n)$ runtime. Indeed, extensive
                 experiments run on 22,000 synthetic and real-world
                 datasets confirm a runtime of less than $10^{-5} n \log
                 n$ seconds for the vast majority of our datasets on a
                 standard PC. Further experiments with our software,
                 Moca, also confirm the mean trace length and average
                 number of cells crossed by a motorcycle, as predicted
                 by our analysis. This makes Moca the first
                 implementation that is efficient enough to be applied
                 in practice for computing motorcycle graphs of large
                 datasets. Actually, it is easy to extend Moca to make
                 it compute a generalized version of the original
                 motorcycle graph, thus enabling a significantly larger
                 field of applications.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Michail:2011:ECS,
  author =       "Dimitrios Michail",
  title =        "An experimental comparison of single-sided preference
                 matching algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "14:1--14:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.2019579",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We experimentally study the problem of assigning
                 applicants to posts. Each applicant provides a
                 preference list, which may contain ties, ranking a
                 subset of the posts. Different optimization criteria
                 may be defined, which depend on the desired solution
                 properties. The main focus of this work is to assess
                 the quality of matchings computed by rank-maximal and
                 popular matching algorithms and compare this with the
                 minimum weight matching algorithm, which is a standard
                 matching algorithm that is used in practice. Both
                 rank-maximal and popular matching algorithms use common
                 algorithmic techniques, which makes them excellent
                 candidates for a running time comparison. Since popular
                 matchings do not always exist, we also study the
                 unpopularity of matchings computed by the
                 aforementioned algorithms. Finally, extra criteria like
                 total weight and cardinality are included, due to their
                 importance in practice. All experiments are performed
                 using structured random instances as well as instances
                 created using real-world datasets.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Kot:2011:ECP,
  author =       "Andriy Kot and Andrey N. Chernikov and Nikos P.
                 Chrisochoides",
  title =        "Effective out-of-core parallel {Delaunay} mesh
                 refinement using off-the-shelf software",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "15:1--15:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.2019580",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present three related out-of-core parallel mesh
                 generation algorithms and their implementations for
                 small size computational clusters. Computing
                 out-of-core permits to solve larger problems than
                 otherwise possible on the same hardware setup. Also,
                 when using shared computing resources with high demand,
                 a problem can take longer to compute in terms of
                 wall-clock time when using an in-core algorithm on many
                 nodes instead of using an out-of-core algorithm on few
                 nodes. The difference is due to wait-in-queue delays
                 that can grow exponentially to the number of requested
                 nodes. In one specific case, using our best method and
                 only 16 nodes it can take several times less wall-clock
                 time to generate a 2 billion element mesh than to
                 generate the same size mesh in-core with 121 nodes.
                 Although our best out-of-core method exhibits
                 unavoidable overheads (could be as low as 19\% in some
                 cases) over the corresponding in-core method (for mesh
                 sizes that fit completely in-core), this is a modest
                 and expected performance penalty. We evaluated our
                 methods on traditional clusters of workstations as well
                 as presented preliminary performance evaluation on
                 [the] emerging BlueWaters supercomputer.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Prosser:2011:LDS,
  author =       "Patrick Prosser and Chris Unsworth",
  title =        "Limited discrepancy search revisited",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "16:1--16:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.2019581",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Harvey and Ginsberg's limited discrepancy search (LDS)
                 is based on the assumption that costly heuristic
                 mistakes are made early in the search process.
                 Consequently, LDS repeatedly probes the state space,
                 going against the heuristic (i.e., taking
                 discrepancies) a specified number of times in all
                 possible ways and attempts to take those discrepancies
                 as early as possible. LDS was improved by Richard Korf,
                 to become improved LDS (ILDS), but in doing so,
                 discrepancies were taken as late as possible, going
                 against the original assumption. Many subsequent
                 algorithms have faithfully inherited Korf's
                 interpretation of LDS, and take discrepancies late.
                 This then raises the question: Should we take our
                 discrepancies late or early? We repeat the original
                 experiments performed by Harvey and Ginsberg and those
                 by Korf in an attempt to answer this question. We also
                 investigate the early stopping condition of the YIELDS
                 algorithm, demonstrating that it is simple, elegant and
                 efficient.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Tabourier:2011:GCR,
  author =       "Lionel Tabourier and Camille Roth and Jean-Philippe
                 Cointet",
  title =        "Generating constrained random graphs using multiple
                 edge switches",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "17:1--17:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.2063515",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The generation of random graphs using edge swaps
                 provides a reliable method to draw uniformly random
                 samples of sets of graphs respecting some simple
                 constraints (e.g., degree distributions). However, in
                 general, it is not necessarily possible to access all
                 graphs obeying some given constraints through a
                 classical switching procedure calling on pairs of
                 edges. Therefore, we propose to get around this issue
                 by generalizing this classical approach through the use
                 of higher-order edge switches. This method, which we
                 denote by ``$k$-edge switching,'' makes it possible to
                 progressively improve the covered portion of a set of
                 constrained graphs, thereby providing an increasing,
                 asymptotically certain confidence on the statistical
                 representativeness of the obtained sample.",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Tsourakakis:2011:AAS,
  author =       "Charalampos E. Tsourakakis and Richard Peng and Maria
                 A. Tsiarli and Gary L. Miller and Russell Schwartz",
  title =        "Approximation algorithms for speeding up dynamic
                 programming and denoising {aCGH} data",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "18:1--18:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.2063517",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The development of cancer is largely driven by the
                 gain or loss of subsets of the genome, promoting
                 uncontrolled growth or disabling defenses against it.
                 Denoising array-based Comparative Genome Hybridization
                 (aCGH) data is an important computational problem
                 central to understanding cancer evolution. In this
                 article, we propose a new formulation of the denoising
                 problem that we solve with a ``vanilla'' dynamic
                 programming algorithm, which runs in $O(n^2)$ units of
                 time. Then, we propose two approximation techniques.
                 Our first algorithm reduces the problem into a
                 well-studied geometric problem, namely halfspace
                 emptiness queries, and provides an $\epsilon$ additive
                 approximation to the optimal objective value in
                 $\tilde{O}(n ^{4 / 3 + \delta} \log (U / \epsilon))$
                 time, where $\delta$ is an arbitrarily small positive
                 constant and $U = \max\{\sqrt C, (|P_i|)_{i =
                 1,\ldots{}, n}\} (P = (P_1, P_2, \ldots{}, P_n), P_i
                 \in \mathbb{R})$, is the vector of the noisy aCGH
                 measurements, $C$ a normalization constant. The second
                 algorithm provides a $(1 \pm \epsilon)$ approximation
                 (multiplicative error) and runs in $O(n \log n /
                 \epsilon)$ time. The algorithm decomposes the initial
                 problem into a small (logarithmic) number of Monge
                 optimization subproblems that we can solve in linear
                 time using existing techniques. Finally, we validate
                 our model on synthetic and real cancer datasets. Our
                 method consistently achieves superior precision and
                 recall to leading competitors on the data with ground
                 truth. In addition, it finds several novel markers not
                 recorded in the benchmarks but supported in the
                 oncology literature.",
  acknowledgement = ack-nhfb,
  articleno =    "1.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Vahrenhold:2011:P,
  author =       "Jan Vahrenhold",
  title =        "Preface",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "21:1--21:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.1970374",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Wang:2011:CEM,
  author =       "Bei Wang and Herbert Edelsbrunner and Dmitriy
                 Morozov",
  title =        "Computing elevation maxima by searching the {Gauss}
                 sphere",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "22:1--22:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.1970375",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The elevation function on a smoothly embedded
                 2-manifold in R$^3$ reflects the multiscale topography
                 of cavities and protrusions as local maxima. The
                 function has been useful in identifying coarse docking
                 configurations for protein pairs. Transporting the
                 concept from the smooth to the piecewise linear
                 category, this article describes an algorithm for
                 finding all local maxima. While its worst-case running
                 time is the same as of the algorithm used in prior
                 work, its performance in practice is orders of
                 magnitudes superior. We cast light on this improvement
                 by relating the running time to the total absolute
                 Gaussian curvature of the 2-manifold.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Rotta:2011:MLS,
  author =       "Randolf Rotta and Andreas Noack",
  title =        "Multilevel local search algorithms for modularity
                 clustering",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "23:1--23:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.1970376",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Modularity is a widely used quality measure for graph
                 clusterings. Its exact maximization is NP-hard and
                 prohibitively expensive for large graphs. Popular
                 heuristics first perform a coarsening phase, where
                 local search starting from singleton clusters is used
                 to compute a preliminary clustering, and then
                 optionally a refinement phase, where this clustering is
                 improved by moving vertices between clusters. As a
                 generalization, multilevel heuristics coarsen in
                 several stages, and refine by moving entire clusters
                 from each of these stages, not only individual
                 vertices. This article organizes existing and new
                 single-level and multilevel heuristics into a coherent
                 design space, and compares them experimentally with
                 respect to their effectiveness (achieved modularity)
                 and runtime. For coarsening by iterated cluster
                 joining, it turns out that the most widely used
                 criterion for joining clusters (modularity increase) is
                 outperformed by other simple criteria, that a recent
                 multistep algorithm [Schuetz and Caflisch 2008] is no
                 improvement over simple single-step coarsening for
                 these criteria, and that the recent multilevel
                 coarsening by iterated vertex moving [Blondel et al.
                 2008] is somewhat faster but slightly less effective
                 (with refinement). The new multilevel refinement is
                 significantly more effective than the conventional
                 single-level refinement or no refinement, in reasonable
                 runtime. A comparison with published benchmark results
                 and algorithm implementations shows that multilevel
                 local search heuristics, despite their relative
                 simplicity, are competitive with the best algorithms in
                 the literature.",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Bertasi:2011:PYA,
  author =       "Paolo Bertasi and Marco Bressan and Enoch Peserico",
  title =        "{{\tt psort}}, yet another fast stable sorting
                 software",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "24:1--24:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.1970377",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "{\tt psort} is the fastest sorting software according
                 to the PennySort benchmark, sorting 181GB of data in
                 2008 and 224GB in 2009 for \$0.01 of computer time.
                 This article details its internals, and the careful
                 fitting of its architecture to the structure of modern
                 PC-class platforms, allowing it to outperform
                 state-of-the-art sorting software such as STXXL sort.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Finocchi:2011:GEF,
  author =       "Irene Finocchi and John Hershberger",
  title =        "Guest editors' foreword",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "31:1--31:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.2025377",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "3.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Belazzougui:2011:TPM,
  author =       "Djamal Belazzougui and Paolo Boldi and Rasmus Pagh and
                 Sebastiano Vigna",
  title =        "Theory and practice of monotone minimal perfect
                 hashing",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "32:1--32:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.2025378",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Minimal perfect hash functions have been shown to be
                 useful to compress data in several data management
                 tasks. In particular, order-preserving minimal perfect
                 hash functions (Fox et al. 1991) have been used to
                 retrieve the position of a key in a given list of keys;
                 however, the ability to preserve any given order leads
                 to an unavoidable $\Omega(n \log n)$ lower bound on the
                 number of bits required to store the function.
                 Recently, it was observed (Belazzougui et al. 2009)
                 that very frequently the keys to be hashed are sorted
                 in their intrinsic (i.e., lexicographical) order. This
                 is typically the case of dictionaries of search
                 engines, list of URLs of Web graphs, and so on. We
                 refer to this restricted version of the problem as
                 monotone minimal perfect hashing. We analyze
                 experimentally the data structures proposed in
                 Belazzougui et al. [2009], and along our way we propose
                 some new methods that, albeit asymptotically equivalent
                 or worse, perform very well in practice and provide a
                 balance between access speed, ease of construction, and
                 space usage.",
  acknowledgement = ack-nhfb,
  articleno =    "3.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Doerr:2011:QRS,
  author =       "Benjamin Doerr and Tobias Friedrich and Marvin
                 K{\"u}nnemann and Thomas Sauerwald",
  title =        "Quasirandom rumor spreading: an experimental
                 analysis",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "33:1--33:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.2025379",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We empirically analyze two versions of the well-known
                 ``randomized rumor spreading'' protocol to disseminate
                 a piece of information in networks. In the classical
                 model, in each round, each informed node informs a
                 random neighbor. In the recently proposed quasirandom
                 variant, each node has a (cyclic) list of its
                 neighbors. Once informed, it starts at a random
                 position of the list, but from then on informs its
                 neighbors in the order of the list. While for sparse
                 random graphs a better performance of the quasirandom
                 model could be proven, all other results show that,
                 independent of the structure of the lists, the same
                 asymptotic performance guarantees hold as for the
                 classical model. In this work, we compare the two
                 models experimentally. Not only does this show that the
                 quasirandom model generally is faster, but it also
                 shows that the runtime is more concentrated around the
                 mean. This is surprising given that much fewer random
                 bits are used in the quasirandom process. These
                 advantages are also observed in a lossy communication
                 model, where each transmission does not reach its
                 target with a certain probability, and in an
                 asynchronous model, where nodes send at random times
                 drawn from an exponential distribution. We also show
                 that typically the particular structure of the lists
                 has little influence on the efficiency.",
  acknowledgement = ack-nhfb,
  articleno =    "3.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Haverkort:2011:FDH,
  author =       "Herman Haverkort and Freek V. Walderveen",
  title =        "Four-dimensional {Hilbert} curves for {$R$}-trees",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "34:1--34:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.2025380",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Two-dimensional R-trees are a class of spatial index
                 structures in which objects are arranged to enable fast
                 window queries: report all objects that intersect a
                 given query window. One of the most successful methods
                 of arranging the objects in the index structure is
                 based on sorting the objects according to the positions
                 of their centers along a two-dimensional Hilbert
                 space-filling curve. Alternatively, one may use the
                 coordinates of the objects' bounding boxes to represent
                 each object by a four-dimensional point, and sort these
                 points along a four-dimensional Hilbert-type curve. In
                 experiments by Kamel and Faloutsos and by Arge et al.,
                 the first solution consistently outperformed the latter
                 when applied to point data, while the latter solution
                 clearly outperformed the first on certain artificial
                 rectangle data. These authors did not specify which
                 four-dimensional Hilbert-type curve was used; many
                 exist. In this article, we show that the results of the
                 previous articles can be explained by the choice of the
                 four-dimensional Hilbert-type curve that was used and
                 by the way it was rotated in four-dimensional space. By
                 selecting a curve that has certain properties and
                 choosing the right rotation, one can combine the
                 strengths of the two-dimensional and the
                 four-dimensional approach into one, while avoiding
                 their apparent weaknesses. The effectiveness of our
                 approach is demonstrated with experiments on various
                 datasets. For real data taken from VLSI design, our new
                 curve yields R-trees with query times that are better
                 than those of R-trees that were obtained with
                 previously used curves.",
  acknowledgement = ack-nhfb,
  articleno =    "3.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Negrucseri:2011:SMF,
  author =       "Cosmin Silvestru Negrucseri and Mircea Bogdan Pacsosi
                 and Barbara Stanley and Clifford Stein and Cristian
                 George Strat",
  title =        "Solving maximum flow problems on real-world bipartite
                 graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "35:1--35:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.2025381",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this article, we present an experimental study of
                 several maximum-flow algorithms in the context of
                 unbalanced bipartite networks. Our experiments are
                 motivated by a real-world problem of managing
                 reservation-based inventory in Google content ad
                 systems. We are interested in observing the performance
                 of several push-relabel algorithms on our real-world
                 datasets and also on some generated ones. Previous work
                 suggested an important improvement for push-relabel
                 algorithms on unbalanced bipartite networks: the
                 two-edge push rule. We show how the two-edge push rule
                 improves the running time. While no single algorithm
                 dominates the results, we show there is one that has
                 very robust performance in practice.",
  acknowledgement = ack-nhfb,
  articleno =    "3.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Tazari:2011:DLH,
  author =       "Siamak Tazari and Matthias M{\"u}ller-Hannemann",
  title =        "Dealing with large hidden constants: engineering a
                 {Planar Steiner Tree (PTAS)}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "16",
  number =       "1",
  pages =        "36:1--36:??",
  year =         "2011",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1963190.2025382",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Feb 25 18:02:18 MST 2012",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present the first attempt on implementing a highly
                 theoretical polynomial-time approximation scheme (PTAS)
                 with huge hidden constants, namely, the PTAS for
                 Steiner tree in planar graphs by Borradaile, Klein, and
                 Mathieu (2009). Whereas this result, and several other
                 PTAS results of the recent years, are of high
                 theoretical importance, no practical applications or
                 even implementation attempts have been known to date,
                 due to the extremely large constants that are involved
                 in them. We describe techniques on how to circumvent
                 the challenges in implementing such a scheme. With
                 today's limitations on processing power and space, we
                 still have to sacrifice approximation guarantees for
                 improved running times by choosing some parameters
                 empirically. But our experiments show that with our
                 choice of parameters, we do get the desired
                 approximation ratios, suggesting that a much tighter
                 analysis might be possible. Our computational
                 experiments with benchmark instances from SteinLib and
                 large artificial instances well exceeded our own
                 expectations. We demonstrate that we are able to handle
                 instances with up to a million nodes and several
                 hundreds of terminals in 1.5 hours on a standard PC. On
                 the rectilinear preprocessed instances from SteinLib,
                 we observe a monotonous improvement for smaller values
                 of $\epsilon$, with an average gap below 1\% for
                 $\epsilon = 0.1$. We compare our implementation against
                 the well-known batched $1$-Steiner heuristic and
                 observe that on very large instances, we are able to
                 produce comparable solutions much faster. We also
                 present a thorough experimental evaluation of the
                 influence of the various parameters of the PTAS and
                 thus obtain a better understanding of their empirical
                 effects.",
  acknowledgement = ack-nhfb,
  articleno =    "3.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Reams:2012:AFD,
  author =       "Charles Reams",
  title =        "{Anatree}: a Fast Data Structure for Anagrams",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "17",
  number =       "1",
  pages =        "1.1:1--1.1:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2133803.2133804",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jan 21 07:42:23 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Natural language is a rich source of constraint
                 satisfaction problems (CSPs), with a uniquely
                 structured solution domain. We describe a number of
                 approaches to satisfying the particular case of
                 unordered letter-level constraints, including anagrams,
                 but also relevant to typographical error correction,
                 password security and word puzzles among other fields.
                 We define the anatree, a data structure that can solve
                 many such problems in constant time with respect to the
                 size of the lexicon. The structure represents the
                 lexicon of a language in a format somewhat analogous to
                 a binary decision diagram (BDD) and, as with BDDs,
                 construction heuristics allow the real average-case
                 performance to vastly exceed the theoretical worst
                 case. We compare anatrees and their alternatives
                 empirically, explore the behavior of the construction
                 heuristics, and characterize the tasks for which each
                 is best suited.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Geisberger:2012:RPF,
  author =       "Robert Geisberger and Michael N. Rice and Peter
                 Sanders and Vassilis J. Tsotras",
  title =        "Route planning with flexible edge restrictions",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "17",
  number =       "1",
  pages =        "1.2:1--1.2:??",
  month =        mar,
  year =         "2012",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2133803.2133805",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed Jan 21 07:42:23 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this work, we explore a new type of flexible
                 shortest-path query, in which the query can be
                 dynamically parameterized to constrain the type of
                 edges that may be included in the resulting shortest
                 path (e.g., find the shortest path in a road network
                 that avoids toll roads and low overpasses, respective
                 of the specified vehicle height). We extend the
                 hierarchical preprocessing technique known as
                 Contraction Hierarchies to efficiently support such
                 flexible queries. We also present several effective
                 algorithmic optimizations for further improving the
                 overall scalability and query times of this approach,
                 including the addition of goal-directed search
                 techniques, search space pruning techniques, and
                 generalizing the constraints of the local search.
                 Experiments are presented for both the North American
                 and the European road networks, showcasing the general
                 effectiveness and scalability of our proposed
                 methodology to large-scale, real-world graphs.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Abraham:2013:ARR,
  author =       "Ittai Abraham and Daniel Delling and Andrew V.
                 Goldberg and Renato F. Werneck",
  title =        "Alternative routes in road networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "1.3:1--1.3:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2444016.2444019",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon May 6 18:55:51 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study the problem of finding good alternative
                 routes in road networks. We look for routes that are
                 substantially different from the shortest path, have
                 small stretch, and are locally optimal. We formally
                 define the problem of finding alternative routes with a
                 single via vertex, develop efficient algorithms for it,
                 and evaluate them experimentally. Our algorithms are
                 efficient enough for practical use and compare
                 favorably with previous methods in both speed and
                 solution quality.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Batz:2013:MTD,
  author =       "G. Veit Batz and Robert Geisberger and Peter Sanders
                 and Christian Vetter",
  title =        "Minimum time-dependent travel times with contraction
                 hierarchies",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "1.4:1--1.4:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2444016.2444020",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon May 6 18:55:51 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Time-dependent road networks are represented as
                 weighted graphs, where the weight of an edge depends on
                 the time one passes through that edge. This way, we can
                 model periodic congestions during rush hour and similar
                 effects. In this work we deal with the special case
                 where edge weights are time-dependent travel times.
                 Namely, we consider two problems in this setting:
                 Earliest arrival queries ask for a minimum travel time
                 route for a start and a destination depending on a
                 given departure time. Travel time profile queries ask
                 for the travel time profile for a start, a destination,
                 and an interval of possible departure times. For an
                 instance representing the German road network, for
                 example, we can answer earliest arrival queries in less
                 than 1.5ms. For travel time profile queries, which are
                 much harder to answer, we need less than 40ms if the
                 interval of possible departure times has a width of 24
                 hours. For inexact travel time profiles with an allowed
                 error of about 1\% this even reduces to 3.2ms. The
                 underlying hierarchical representations of the road
                 network, which are variants of a time-dependent
                 contraction hierarchy (TCH), need less than 1GiB of
                 space and can be generated in about 30 minutes. As far
                 as we know, TCHs are currently the only method being
                 able to answer travel time profile queries efficiently.
                 Altogether, with TCHs, web servers with massive request
                 traffic are able to provide fast time-dependent
                 earliest arrival route planning and computation of
                 travel time profiles.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Bonami:2013:BRC,
  author =       "Pierre Bonami and Jon Lee and Sven Leyffer and Andreas
                 W{\"a}chter",
  title =        "On branching rules for convex mixed-integer nonlinear
                 optimization",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "2.6:1--2.6:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2532568",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Branch-and-Bound (B\&B) is perhaps the most
                 fundamental algorithm for the global solution of convex
                 Mixed-Integer Nonlinear Programming (MINLP) problems.
                 It is well-known that carrying out branching in a
                 nonsimplistic manner can greatly enhance the
                 practicality of B\&B in the context of Mixed-Integer
                 Linear Programming (MILP). No detailed study of
                 branching has heretofore been carried out for MINLP. In
                 this article, we study and identify useful
                 sophisticated branching methods for MINLP, including
                 novel approaches based on approximations of the
                 nonlinear relaxations by linear and quadratic
                 programs.",
  acknowledgement = ack-nhfb,
  articleno =    "2.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Canzar:2013:PDA,
  author =       "Stefan Canzar and Khaled Elbassioni and Juli{\'a}n
                 Mestre",
  title =        "A polynomial-delay algorithm for enumerating
                 approximate solutions to the interval constrained
                 coloring problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "2.2:1--2.2:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2444016.2493372",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study the interval constrained coloring problem, a
                 combinatorial problem arising in the interpretation of
                 data on protein structure emanating from experiments
                 based on hydrogen/deuterium exchange and mass
                 spectrometry. The problem captures the challenging task
                 of increasing the spatial resolution of experimental
                 data in order to get a better picture of the protein
                 structure. Since solutions proposed by any algorithmic
                 framework have to ultimately be verified by
                 biochemists, it is important to provide not just a
                 single solution, but a valuable set of candidate
                 solutions. Our contribution is a polynomial-delay,
                 polynomial-space algorithm for enumerating all exact
                 solutions plus further approximate solutions, which are
                 guaranteed to be within an absolute error of two of the
                 optimum within fragments of the protein, that is,
                 within sets of consecutive residues. Our experiments
                 indicate that the quality of the approximate solutions
                 is comparable to the optimal ones in terms of deviation
                 from the underlying true solution. In addition, the
                 experiments also confirm the effectiveness of the
                 method in reducing the delay between two consecutive
                 solutions considerably, compared to what it takes an
                 integer programming solver to produce the next exact
                 solution.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Delort:2013:HDP,
  author =       "Charles Delort and Olivier Spanjaard",
  title =        "A hybrid dynamic programming approach to the
                 biobjective binary knapsack problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "1.2:1--1.2:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2444016.2444018",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "This article is devoted to a study of the impact of
                 using bound sets in biobjective dynamic programming.
                 This notion, introduced by Villareal and Karwan [1981],
                 has been independently revisited by Ehrgott and
                 Gandibleux [2007], as well as by Sourd and Spanjaard
                 [2008]. The idea behind it is very general and can,
                 therefore, be adapted to a wide range of biobjective
                 combinatorial problems. We focus here on the
                 biobjective binary knapsack problem. We show that using
                 bound sets to perform a hybrid dynamic programming
                 procedure embedded in a two-phase method [Ulungu and
                 Teghem 1995] yields numerical results that outperform
                 previous dynamic programming approaches to the problem,
                 both in execution times and memory requirements.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Eppstein:2013:LAM,
  author =       "David Eppstein and Maarten L{\"o}ffler and Darren
                 Strash",
  title =        "Listing All Maximal Cliques in Large Sparse Real-World
                 Graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "??",
  pages =        "3.1:1--3.1:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2543629",
  ISSN =         "1084-6654",
  bibdate =      "Wed Jan 21 07:35:03 MST 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "3.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Ferraro-Petrillo:2013:DSR,
  author =       "Umberto Ferraro-Petrillo and Fabrizio Grandoni and
                 Giuseppe F. Italiano",
  title =        "Data structures resilient to memory faults: an
                 experimental study of dictionaries",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "1.6:1--1.6:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2444016.2444022",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon May 6 18:55:51 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We address the problem of implementing data structures
                 resilient to memory faults, which may arbitrarily
                 corrupt memory locations. In this framework, we focus
                 on the implementation of dictionaries and perform a
                 thorough experimental study using a testbed that we
                 designed for this purpose. Our main discovery is that
                 the best-known (asymptotically optimal) resilient data
                 structures have very large space overheads. More
                 precisely, most of the space used by these data
                 structures is not due to key storage. This might not be
                 acceptable in practice, since resilient data structures
                 are meant for applications where a huge amount of data
                 (often of the order of terabytes) has to be stored.
                 Exploiting techniques developed in the context of
                 resilient (static) sorting and searching, in
                 combination with some new ideas, we designed and
                 engineered an alternative implementation, which, while
                 still guaranteeing optimal asymptotic time and space
                 bounds, performs much better in terms of memory without
                 compromising the time efficiency.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Festa:2013:FSI,
  author =       "Paola Festa",
  title =        "Foreword to the special issue {SEA 2010}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "1.1:1--1.1:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2444016.2444017",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Gog:2013:CST,
  author =       "Simon Gog and Enno Ohlebusch",
  title =        "Compressed suffix trees: Efficient computation and
                 storage of {LCP}-values",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "2.1:1--2.1:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2444016.2461327",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The suffix tree is a very important data structure in
                 string processing, but typical implementations suffer
                 from huge space consumption. In large-scale
                 applications, compressed suffix trees (CSTs) are
                 therefore used instead. A CST consists of three
                 (compressed) components: the suffix array, the longest
                 common prefix (LCP)-array and data structures for
                 simulating navigational operations on the suffix tree.
                 The LCP-array stores the lengths of the LCPs of
                 lexicographically adjacent suffixes, and it can be
                 computed in linear time. In this article, we present a
                 new LCP-array construction algorithm that is fast and
                 very space efficient. In practice, our algorithm
                 outperforms alternative algorithms. Moreover, we
                 introduce a new compressed representation of
                 LCP-arrays.",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Gorke:2013:DGC,
  author =       "Robert G{\"o}rke and Pascal Maillard and Andrea Schumm
                 and Christian Staudt and Dorothea Wagner",
  title =        "Dynamic graph clustering combining modularity and
                 smoothness",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "1.5:1--1.5:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2444016.2444021",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Mon May 6 18:55:51 MDT 2013",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Maximizing the quality index modularity has become one
                 of the primary methods for identifying the clustering
                 structure within a graph. Since many contemporary
                 networks are not static but evolve over time,
                 traditional static approaches can be inappropriate for
                 specific tasks. In this work, we pioneer the NP-hard
                 problem of online dynamic modularity maximization. We
                 develop scalable dynamizations of the currently fastest
                 and the most widespread static heuristics and engineer
                 a heuristic dynamization of an optimal static
                 algorithm. Our algorithms efficiently maintain a
                 modularity -based clustering of a graph for which
                 dynamic changes arrive as a stream. For our quickest
                 heuristic we prove a tight bound on its number of
                 operations. In an experimental evaluation on both a
                 real-world dynamic network and on dynamic clustered
                 random graphs, we show that the dynamic maintenance of
                 a clustering of a changing graph yields higher
                 modularity than recomputation, guarantees much smoother
                 clustering dynamics, and requires much lower runtimes.
                 We conclude with giving sound recommendations for the
                 choice of an algorithm.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Hofri:2013:OSS,
  author =       "Micha Hofri",
  title =        "Optimal selection and sorting via dynamic
                 programming",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "2.3:1--2.3:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2444016.2493373",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We show how to find optimal algorithms for the
                 selection of one or more order statistics over a small
                 set of numbers, and as an extreme case, complete
                 sorting. The criterion is using the smallest number of
                 comparisons; separate derivations are performed for
                 minimization on the average (over all permutations) or
                 in the worst case. When the computational process
                 establishes the optimal values, it also generates
                 C-language functions that implement policies which
                 achieve those optimal values. The search for the
                 algorithms is driven by a Markov decision process, and
                 the program provides the optimality proof as well.",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Kouri:2013:FRM,
  author =       "Tina M. Kouri and Dinesh P. Mehta",
  title =        "Faster reaction mapping through improved naming
                 techniques",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "2.5:1--2.5:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2532569",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Automated reaction mapping is an important tool in
                 cheminformatics where it may be used to classify
                 reactions or validate reaction mechanisms. The reaction
                 mapping problem is known to be NP-Hard and may be
                 formulated as an optimization problem. In this article,
                 we present four algorithms that continue to obtain
                 optimal solutions to this problem, but with
                 significantly improved runtimes over the previous
                 Constructive Count Vector (CCV) algorithm. Our
                 algorithmic improvements include (i) the use of a fast
                 (but not 100\% accurate) canonical labeling algorithm,
                 (ii) name reuse (i.e., storing intermediate results
                 rather than recomputing), and (iii) an incremental
                 approach to canonical name computation. The time to map
                 the reactions from the Kegg/Ligand database previously
                 took over 2 days using CCV, but now it takes fewer than
                 4 hours to complete. Experimental results on chemical
                 reaction databases demonstrate our 2-CCV FDN MS
                 algorithm usually performs over fifteen times faster
                 than previous automated reaction mapping algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "2.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Kouzinopoulos:2013:EOT,
  author =       "Charalampos S. Kouzinopoulos and Konstantinos G.
                 Margaritis",
  title =        "Exact online two-dimensional pattern matching using
                 multiple pattern matching algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "1",
  pages =        "2.4:1--2.4:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2513148",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Wed May 21 14:36:03 MDT 2014",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Baker and Bird and Baeza-Yates and Regnier are two of
                 the most efficient and widely used algorithms for exact
                 online two-dimensional pattern matching. Both use the
                 automaton of the Aho--Corasick multiple pattern
                 matching algorithm to locate all the occurrences of a
                 two-dimensional pattern in a two-dimensional input
                 string, a data structure that is considered by many as
                 inefficient, especially when used to process long
                 patterns or data using large alphabet sizes. This
                 article presents variants of the Baker and Bird and the
                 Baeza-Yates and Regnier algorithms that use the data
                 structures of the Set Horspool, Wu-Manber, Set Backward
                 Oracle Matching, and SOG multiple pattern matching
                 algorithms in place of the automaton of Aho--Corasick
                 and evaluates their performance experimentally in terms
                 of preprocessing and searching time.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Nagarajan:2013:EEI,
  author =       "Chandrashekhar Nagarajan and David P. Williamson",
  title =        "An Experimental Evaluation of Incremental and
                 Hierarchical $k$-Median Algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "18",
  number =       "??",
  pages =        "3.2:1--3.2:??",
  month =        dec,
  year =         "2013",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2543628",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:23:43 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In this article, we consider different incremental and
                 hierarchical $k$-median algorithms with provable
                 performance guarantees and compare their running times
                 and quality of output solutions on different benchmark
                 $k$-median datasets. We determine that the quality of
                 solutions output by these algorithms for all the
                 datasets is much better than their performance
                 guarantees suggest. Since some of the incremental
                 $k$-median algorithms require approximate solutions for
                 the $k$-median problem, we also compare some of the
                 existing $k$-median algorithms running times and
                 quality of solutions obtained on these datasets.",
  acknowledgement = ack-nhfb,
  articleno =    "3.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Gonzalez:2015:LCS,
  author =       "Rodrigo Gonz{\'a}lez and Gonzalo Navarro and
                 H{\'e}ctor Ferrada",
  title =        "Locally Compressed Suffix Arrays",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "1.1:1--1.1:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2594408",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/datacompression.bib;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We introduce a compression technique for suffix
                 arrays. It is sensitive to the compressibility of the
                 text and local, meaning that random portions of the
                 suffix array can be decompressed by accessing mostly
                 contiguous memory areas. This makes decompression very
                 fast, especially when various contiguous cells must be
                 accessed. Our main technical contributions are the
                 following. First, we show that runs of consecutive
                 values that are known to appear in function $ \Psi (i)
                 = A^{-1} [A [i] + 1] $ of suffix arrays $A$ of
                 compressible texts also show up as repetitions in the
                 differential suffix array $ A'[i] = A [i] - A [i - 1]$.
                 Second, we use Re-Pair, a grammar-based compressor, to
                 compress the differential suffix array, and upper bound
                 its compression ratio in terms of the number of runs.
                 Third, we show how to compact the space used by the
                 grammar rules by up to 50\%, while still permitting
                 direct access to the rules. Fourth, we develop specific
                 variants of Re-Pair that work using knowledge of $ \Psi
                 $, and use much less space than the general Re-Pair
                 compressor, while achieving almost the same compression
                 ratios. Fifth, we implement the scheme and compare it
                 exhaustively with previous work, including the first
                 implementations of previous theoretical proposals.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Doerr:2015:RRP,
  author =       "Benjamin Doerr and Magnus Wahlstr{\"o}m",
  title =        "Randomized Rounding in the Presence of a Cardinality
                 Constraint",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "1.2:1--1.2:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2594409",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the problem of generating randomized
                 roundings that satisfy a single cardinality constraint
                 and admit Chernoff-type large deviation bounds for
                 weighted sums of the variables. That this can be done
                 efficiently was proven by Srinivasan [2001], a
                 different approach was later given by the first author
                 [Doerr 2006]. In this work, we (a) present an improved
                 version of the bitwise derandomization given by Doerr,
                 (b) give the first derandomization of Srinivasan's
                 tree-based randomized approach and prove its
                 correctness, and (c) experimentally compare the
                 resulting algorithms. Our experiments show that adding
                 a single cardinality constraint typically reduces the
                 rounding errors and only moderately increases the
                 running times. In general, our derandomization of the
                 tree-based approach is superior to the derandomized
                 bitwise one, while the two randomized versions produce
                 very similar rounding errors. When implementing the
                 derandomized tree-based approach, however, the choice
                 of the tree is important.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Auer:2015:EMC,
  author =       "B. O. Fagginger Auer and R. H. Bisseling",
  title =        "Efficient Matching for Column Intersection Graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "1.3:1--1.3:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2616587",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "To improve the quality and efficiency of
                 hypergraph-based matrix partitioners, we investigate
                 high-quality matchings in column intersection graphs of
                 large sparse binary matrices. We show that such
                 algorithms have a natural decomposition in an
                 integer-weighted graph-matching function and a
                 neighbor-finding function and study the performance of
                 16 combinations of these functions. We improve upon the
                 original matching algorithm of the Mondriaan matrix
                 partitioner: by using PGA', we improve the average
                 matching quality from 95.3\% to 97.4\% of the optimum
                 value; by using our new neighbor-finding heuristic, we
                 obtain comparable quality and speedups of up to a
                 factor of 19.6.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Angione:2015:SMB,
  author =       "Claudio Angione and Annalisa Occhipinti and Giuseppe
                 Nicosia",
  title =        "Satisfiability by {Maxwell--Boltzmann} and
                 {Bose--Einstein} Statistical Distributions",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "1.4:1--1.4:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2629498",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Recent studies in theoretical computer science have
                 exploited new algorithms and methodologies based on
                 statistical physics for investigating the structure and
                 the properties of the Satisfiability (SAT) problem. We
                 propose a characterization of the SAT problem as a
                 physical system, using both quantum and classical
                 statistical-physical models. We associate a graph to an
                 SAT instance and we prove that a Bose--Einstein
                 condensation occurs in the instance with higher
                 probability if the quantum distribution is adopted in
                 the generation of the graph. Conversely, the
                 fit-get-rich behavior is more likely if we adopt the
                 Maxwell--Boltzmann distribution. Our method allows a
                 comprehensive analysis of the SAT problem based on a
                 new definition of entropy of an instance, without
                 requiring the computation of its truth assignments. The
                 entropy of an SAT instance increases in the
                 satisfiability region as the number of free variables
                 in the instance increases. Finally, we develop six new
                 solvers for the MaxSAT problem based on quantum and
                 classical statistical distributions, and we test them
                 on random SAT instances, with competitive results. We
                 experimentally prove that the performance of the
                 solvers based on the two distributions depends on the
                 criterion used to flag clauses as satisfied in the SAT
                 solving process.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Frieder:2015:ESA,
  author =       "Asaf Frieder and Liam Roditty",
  title =        "An Experimental Study on Approximating $k$ Shortest
                 Simple Paths",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "1.5:1--1.5:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2630068",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We have conducted an extensive experimental study on
                 approximation algorithms for computing $k$ shortest
                 simple paths in weighted directed graphs. Very
                 recently, Bernstein [2010] presented an algorithm that
                 computes a $1 + \epsilon$ approximated $k$ shortest
                 simple path in $O(\epsilon^{-1} k(m + n \log n) \log^2
                 n)$ time. We have implemented Bernstein's algorithm and
                 tested it on synthetic inputs and real-world graphs
                 (road maps). Our results reveal that Bernstein's
                 algorithm has a practical value in many
                 scenarios. Moreover, it produces in most of the cases
                 exact paths rather than approximated. We also present a
                 new variant for Bernstein's algorithm. We prove that
                 our new variant has the same upper bounds for the
                 running time and approximation as Bernstein's original
                 algorithm. We have implemented and tested this variant
                 as well. Our testing shows that this variant, which is
                 based on a simple theoretical observation, is better
                 than Bernstein's algorithm in practice.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Gorke:2015:EDC,
  author =       "Robert G{\"o}rke and Andrea Kappes and Dorothea
                 Wagner",
  title =        "Experiments on Density-Constrained Graph Clustering",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "1.6:1--1.6:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2638551",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Clustering a graph means identifying internally dense
                 subgraphs that are only sparsely interconnected.
                 Formalizations of this notion lead to measures that
                 quantify the quality of a clustering and to algorithms
                 that actually find clusterings. Since, most generally,
                 corresponding optimization problems are hard, heuristic
                 clustering algorithms are used in practice, or other
                 approaches that are not based on an objective function.
                 In this work, we conduct a comprehensive experimental
                 evaluation of the qualitative behavior of greedy
                 bottom-up heuristics driven by cut-based objectives and
                 constrained by intracluster density, using both
                 real-world data and artificial instances. Our study
                 documents that a greedy strategy based on local
                 movement is superior to one based on merging. We
                 further reveal that the former approach generally
                 outperforms alternative setups and reference algorithms
                 from the literature in terms of its own objective,
                 while a modularity-based algorithm competes
                 surprisingly well. Finally, we exhibit which
                 combinations of cut-based inter- and intracluster
                 measures are suitable for identifying a hidden
                 reference clustering in synthetic random graphs and
                 discuss the skewness of the resulting cluster size
                 distributions. Our results serve as a guideline to the
                 usage of bicriterial, cut-based measures for graph
                 clusterings.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Galvao:2015:ATG,
  author =       "Gustavo Rodrigues Galv{\~a}o and Zanoni Dias",
  title =        "An Audit Tool for Genome Rearrangement Algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "1.7:1--1.7:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2661633",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the combinatorial problem of sorting a
                 permutation using a minimum number of rearrangement
                 events, which finds application in the estimation of
                 evolutionary distance between species. Many variants of
                 this problem, which we generically refer to as the
                 rearrangement sorting problem, have been tackled in the
                 literature, and for most of them, the best known
                 algorithms are approximations or heuristics. In this
                 article, we present a tool, called GRAAu, to aid in the
                 evaluation of the results produced by these algorithms.
                 To illustrate its application, we use GRAAu to evaluate
                 the results of four approximation algorithms regarding
                 two variants of the rearrangement sorting problem: the
                 problem of sorting by prefix reversals and the problem
                 of sorting by prefix transpositions. As a result, we
                 show that the approximation ratios of three algorithms
                 are tight and conjecture that the approximation ratio
                 of the remaining one is also tight.",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Grossi:2015:FCT,
  author =       "Roberto Grossi and Giuseppe Ottaviano",
  title =        "Fast Compressed Tries through Path Decompositions",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "1.8:1--1.8:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2656332",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/datacompression.bib;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Tries are popular data structures for storing a set of
                 strings, where common prefixes are represented by
                 common root-to-node paths. More than 50 years of usage
                 have produced many variants and implementations to
                 overcome some of their limitations. We explore new
                 succinct representations of path-decomposed tries and
                 experimentally evaluate the corresponding reduction in
                 space usage and memory latency, comparing with the
                 state of the art. We study the following applications:
                 compressed string dictionary and monotone minimal
                 perfect hash for strings. In compressed string
                 dictionary, we obtain data structures that outperform
                 other state-of-the-art compressed dictionaries in space
                 efficiency while obtaining predictable query times that
                 are competitive with data structures preferred by the
                 practitioners. On real-world datasets, our compressed
                 tries obtain the smallest space (except for one case)
                 and have the fastest lookup times, whereas access times
                 are within 20\% slower than the best-known solutions.
                 In monotone minimal perfect hash for strings, our
                 compressed tries perform several times faster than
                 other trie-based monotone perfect hash functions while
                 occupying nearly the same space. On real-world
                 datasets, our tries are approximately 2 to 5 times
                 faster than previous solutions, with a space occupancy
                 less than 10\% larger.",
  acknowledgement = ack-nhfb,
  articleno =    "1.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Jurkiewicz:2015:MVA,
  author =       "Tomasz Jurkiewicz and Kurt Mehlhorn",
  title =        "On a Model of Virtual Address Translation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "1.9:1--1.9:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2656337",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Modern computers are not Random Access Machines
                 (RAMs). They have a memory hierarchy, multiple cores,
                 and a virtual memory. We address the computational cost
                 of the address translation in the virtual memory. The
                 starting point for our work on virtual memory is the
                 observation that the analysis of some simple algorithms
                 (random scan of an array, binary search, heapsort) in
                 either the RAM model or the External Memory (EM) model
                 does not correctly predict growth rates of actual
                 running times. We propose the Virtual Address
                 Translation (VAT) model to account for the cost of
                 address translations and analyze the algorithms
                 mentioned and others in the model. The predictions
                 agree with the measurements. We also analyze the
                 VAT-cost of cache-oblivious algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "1.9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Klasing:2015:E,
  author =       "Ralf Klasing",
  title =        "Editorial",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "2.1:1--2.1:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2677196",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Safro:2015:ACS,
  author =       "Ilya Safro and Peter Sanders and Christian Schulz",
  title =        "Advanced Coarsening Schemes for Graph Partitioning",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "2.2:1--2.2:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2670338",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The graph partitioning problem is widely used and
                 studied in many practical and theoretical applications.
                 Today, multilevel strategies represent one of the most
                 effective and efficient generic frameworks for solving
                 this problem on large-scale graphs. Most of the
                 attention in designing multilevel partitioning
                 frameworks has been on the refinement phase. In this
                 work, we focus on the coarsening phase, which is
                 responsible for creating structures similar to the
                 original but smaller graphs. We compare different
                 matching- and AMG-based coarsening schemes, experiment
                 with the algebraic distance between nodes, and
                 demonstrate computational results on several classes of
                 graphs that emphasize the running time and quality
                 advantages of different coarsening schemes.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Navarro:2015:GDR,
  author =       "Gonzalo Navarro and Simon J. Puglisi and Daniel
                 Valenzuela",
  title =        "General Document Retrieval in Compact Space",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "2.3:1--2.3:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2670128",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/datacompression.bib;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Given a collection of documents and a query pattern,
                 document retrieval is the problem of obtaining
                 documents that are relevant to the query. The
                 collection is available beforehand so that a data
                 structure, called an index, can be built on it to speed
                 up queries. While initially restricted to natural
                 language text collections, document retrieval problems
                 arise nowadays in applications like bioinformatics,
                 multimedia databases, and web mining. This requires a
                 more general setup where text and pattern can be
                 general sequences of symbols, and the classical
                 inverted indexes developed for words cannot be applied.
                 While linear-space time-optimal solutions have been
                 developed for most interesting queries in this general
                 case, space usage is a serious problem in practice. In
                 this article, we develop compact data structures that
                 solve various important document retrieval problems on
                 general text collections. More specifically, we provide
                 practical solutions for listing the documents where a
                 query pattern appears, together with its frequency in
                 each document, and for listing $k$ documents where a
                 query pattern appears most frequently. Some of our
                 techniques build on existing theoretical proposals,
                 while others are new. In particular, we introduce a
                 novel grammar-based compressed bitmap representation
                 that may be of independent interest when dealing with
                 repetitive sequences. Ours are the first practical
                 indexes that use less space when the text collection is
                 compressible. Our experimental results show that, in
                 various real-life text collections, our data structures
                 are significantly smaller than the most space-efficient
                 previous solutions, using up to half the space without
                 noticeably increasing the query time. Overall, document
                 listing can be carried out in 10 to 40 milliseconds for
                 patterns that appear 100 to 10,000 times in the
                 collection, whereas top-$k$ retrieval is carried out in
                 $k$ to $ 10 k$ milliseconds.",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Moruz:2015:EEP,
  author =       "Gabriel Moruz and Andrei Negoescu and Christian
                 Neumann and Volker Weichert",
  title =        "Engineering Efficient Paging Algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "2.4:1--2.4:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2670127",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In the field of online algorithms, paging is a
                 well-studied problem. LRU is a simple paging algorithm
                 that incurs few cache misses and supports efficient
                 implementations. Algorithms outperforming LRU in terms
                 of cache misses exist but are in general more complex
                 and thus not automatically better, since their
                 increased runtime might annihilate the gains in cache
                 misses. In this article, we focus on efficient
                 implementations for the O nOPT class described in Moruz
                 and Negoescu [2012], particularly on an algorithm in
                 this class, denoted RDM, that was shown to typically
                 incur fewer misses than LRU. We provide experimental
                 evidence on a wide range of cache traces showing that
                 our implementation of RDM is competitive to LRU with
                 respect to runtime. In a scenario incurring realistic
                 time penalties for cache misses, we show that our
                 implementation consistently outperforms LRU, even if
                 the runtime of LRU is set to zero.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Kirchler:2015:ECS,
  author =       "Dominik Kirchler and Leo Liberti and Roberto Wolfler
                 Calvo",
  title =        "Efficient Computation of Shortest Paths in
                 Time-Dependent Multi-Modal Networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "2.5:1--2.5:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2670126",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider shortest paths on time-dependent
                 multimodal transportation networks in which
                 restrictions or preferences on the use of certain modes
                 of transportation may arise. We model restrictions and
                 preferences by means of regular languages. Methods for
                 solving the corresponding problem (called the regular
                 language constrained shortest path problem ) already
                 exist. We propose a new algorithm, called State
                 Dependent ALT (SDALT), which runs considerably faster
                 in many scenarios. Speed-up magnitude depends on the
                 type of constraints. We present different versions of
                 SDALT, including unidirectional and bidirectional
                 search. We also provide extensive experimental results
                 on realistic multimodal transportation networks.",
  acknowledgement = ack-nhfb,
  articleno =    "2.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Manlove:2015:PAK,
  author =       "David F. Manlove and Gregg O'Malley",
  title =        "Paired and Altruistic Kidney Donation in the {UK}:
                 Algorithms and Experimentation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "2.6:1--2.6:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2670129",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study the computational problem of identifying
                 optimal sets of kidney exchanges in the UK. We show how
                 to expand an integer programming-based formulation due
                 to Roth et al. [2007] in order to model the criteria
                 that constitute the UK definition of optimality. The
                 software arising from this work has been used by the
                 National Health Service Blood and Transplant to find
                 optimal sets of kidney exchanges for their National
                 Living Donor Kidney Sharing Schemes since July 2008. We
                 report on the characteristics of the solutions that
                 have been obtained in matching runs of the scheme since
                 this time. We then present empirical results arising
                 from experiments on the real datasets that stem from
                 these matching runs, with the aim of establishing the
                 extent to which the particular optimality criteria that
                 are present in the UK influence the structure of the
                 solutions that are ultimately computed. A key
                 observation is that allowing four-way exchanges would
                 be likely to lead to a moderate number of additional
                 transplants.",
  acknowledgement = ack-nhfb,
  articleno =    "2.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Luxen:2015:CSA,
  author =       "Dennis Luxen and Dennis Schieferdecker",
  title =        "Candidate Sets for Alternative Routes in Road
                 Networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "2.7:1--2.7:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2674395",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study the computation of good alternatives to the
                 shortest path in road networks. Our approach is based
                 on single via-node routing on top of contraction
                 hierarchies and achieves superior quality and
                 efficiency compared to previous methods. We present a
                 fast preprocessing method for computing multiple good
                 alternatives and apply this result in an online
                 setting. This setting makes our result applicable in
                 legacy systems with negligible memory overhead. An
                 extensive experimental analysis on a continental-sized
                 real- world road network proves the performance of our
                 algorithm and supports the general systematic algorithm
                 engineering approach. We also show how to combine our
                 results with the competing concept of alternative
                 graphs that encode many alternative paths at once.",
  acknowledgement = ack-nhfb,
  articleno =    "2.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Bader:2015:ISI,
  author =       "David A. Bader and Petra Mutzel",
  title =        "Introduction to Special Issue {ALENEX'12}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "3.1:1--3.1:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2721893",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "3.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Dibbelt:2015:UCM,
  author =       "Julian Dibbelt and Thomas Pajor and Dorothea Wagner",
  title =        "User-Constrained Multimodal Route Planning",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "19",
  number =       "??",
  pages =        "3.2:1--3.2:??",
  month =        feb,
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2699886",
  ISSN =         "1084-6654",
  bibdate =      "Fri Apr 3 16:22:03 MDT 2015",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In the multimodal route planning problem, we are given
                 multiple transportation networks (e.g., pedestrian,
                 road, public transit) and ask for a best integrated
                 journey between two points. The main challenge is that
                 a seemingly optimal journey may have changes between
                 networks that do not reflect the user's modal
                 preferences. In fact, quickly computing reasonable
                 multimodal routes remains a challenging problem:
                 previous approaches either suffer from poor query
                 performance or their available choices of modal
                 preferences during query time is limited. In this work,
                 we focus on computing exact multimodal journeys that
                 can be restricted by specifying arbitrary modal
                 sequences at query time. For example, a user can say
                 whether he or she wants to only use public transit,
                 prefers to also use a taxi or walking at the beginning
                 or end of the journey, or has no restrictions at all.
                 By carefully adapting node contraction, a common
                 ingredient to many speedup techniques on road networks,
                 we are able to compute point-to-point queries on a
                 continental network combined of cars, railroads, and
                 flights several orders of magnitude faster than
                 Dijkstra's algorithm. Thereby, we require little space
                 overhead and obtain fast preprocessing times.",
  acknowledgement = ack-nhfb,
  articleno =    "3.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Chimani:2015:UPT,
  author =       "Markus Chimani and Robert Zeranski",
  title =        "Upward Planarity Testing in Practice: {SAT}
                 Formulations and Comparative Study",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "20",
  number =       "??",
  pages =        "1.2:1--1.2:??",
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2699875",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:47:42 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A directed acyclic graph (DAG) is upward planar if it
                 can be drawn without any crossings while all edges-when
                 following them in their direction-are drawn with
                 strictly monotonously increasing y -coordinates.
                 Testing whether a graph allows such a drawing is known
                 to be NP-complete, and while the problem is
                 polynomial-time solvable for special graph classes,
                 there is not much known about solving the problem for
                 general graphs in practice. The only attempt so far has
                 been a branch-and-bound algorithm over the graph's
                 triconnectivity structure, which was able to solve
                 small graphs. Furthermore, there are some known FPT
                 algorithms to deal with the problem. In this article,
                 we propose two fundamentally different approaches based
                 on the seemingly novel concept of ordered embeddings
                 and on the concept of a Hanani--Tutte-type
                 characterization of monotone drawings. In both
                 approaches, we model the problem as special SAT
                 instances, that is, logic formulae for which we check
                 satisfiability. Solving these SAT instances allows us
                 to decide upward planarity for arbitrary graphs. For
                 the first time, we give an extensive experimental
                 comparison between virtually all known approaches to
                 the problem. To this end, we also investigate
                 implementation issues and different variants of the
                 known algorithms as well as of our SAT approaches and
                 evaluate all algorithms on real-world as well as on
                 constructed instances. We also give a detailed
                 performance study of the novel SAT approaches. We show
                 that the SAT formulations outperform all known
                 approaches for graphs with up to 400 edges. For even
                 larger graphs, a modified branch-and-bound algorithm
                 becomes competitive.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Cohen:2015:CGH,
  author =       "Nathann Cohen and David Coudert and Aur{\'e}lien
                 Lancin",
  title =        "On Computing the {Gromov} Hyperbolicity",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "20",
  number =       "??",
  pages =        "1.6:1--1.6:??",
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2780652",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:47:42 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The Gromov hyperbolicity is an important parameter for
                 analyzing complex networks which expresses how the
                 metric structure of a network looks like a tree. It is
                 for instance used to provide bounds on the expected
                 stretch of greedy-routing algorithms in Internet-like
                 graphs. However, the best-known theoretical algorithm
                 computing this parameter runs in O ( n$^{3.69}$ ) time,
                 which is prohibitive for large-scale graphs. In this
                 article, we propose an algorithm for determining the
                 hyperbolicity of graphs with tens of thousands of
                 nodes. Its running time depends on the distribution of
                 distances and on the actual value of the hyperbolicity.
                 Although its worst case runtime is O ( n$^4$ ), it is
                 in practice much faster than previous proposals as
                 observed in our experimentations. Finally, we propose a
                 heuristic algorithm that can be used on graphs with
                 millions of nodes. Our algorithms are all evaluated on
                 benchmark instances.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{DAndrea:2015:DMS,
  author =       "Annalisa D'Andrea and Mattia D'Emidio and Daniele
                 Frigioni and Stefano Leucci and Guido Proietti",
  title =        "Dynamic Maintenance of a Shortest-Path Tree on
                 Homogeneous Batches of Updates: New Algorithms and
                 Experiments",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "20",
  number =       "??",
  pages =        "1.5:1--1.5:??",
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2786022",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:47:42 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A dynamic graph algorithm is called batch if it is
                 able to update efficiently the solution of a given
                 graph problem after multiple updates at a time (i.e., a
                 batch) take place on the input graph. In this article,
                 we study batch algorithms for maintaining a
                 single-source shortest-path tree in graphs with
                 positive real edge weights. In particular, we focus our
                 attention on homogeneous batches, that is, either
                 incremental (containing only edge insertion and weight
                 decrease operations) or decremental (containing only
                 edge deletion and weight increase operations) batches,
                 which model realistic dynamic scenarios like transient
                 vertex failures in communication networks and traffic
                 congestion/decongestion phenomena in road networks. We
                 propose two new algorithms to process either
                 incremental or decremental batches, respectively, and a
                 combination of these two algorithms that is able to
                 process arbitrary sequences of incremental and
                 decremental batches. All these algorithms are update
                 sensitive; namely, they are efficient with respect to
                 the number of vertices in the shortest-path tree that
                 change their parents and/or their distances from the
                 source as a consequence of a batch. This makes
                 unfeasible an effective comparison on a theoretical
                 basis of our new algorithms with the solutions known in
                 the literature, which in turn are analyzed with respect
                 to others and different parameters. For this reason, in
                 order to evaluate the quality of our approach, we
                 provide also an extensive experimental study including
                 our new algorithms and the most efficient previous
                 batch algorithms. Our experimental results complement
                 previous studies and show that the various solutions
                 can be consistently ranked on the basis of the type of
                 homogeneous batch and of the underlying network. As a
                 result, our work can be helpful in selecting a proper
                 solution depending on the specific application
                 scenario.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Finocchi:2015:CCM,
  author =       "Irene Finocchi and Marco Finocchi and Emanuele G.
                 Fusco",
  title =        "Clique Counting in {MapReduce}: Algorithms and
                 Experiments",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "20",
  number =       "??",
  pages =        "1.7:1--1.7:??",
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2794080",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:47:42 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We tackle the problem of counting the number q$_k$ of
                 k -cliques in large-scale graphs, for any constant k
                 {$>$}= 3. Clique counting is essential in a variety of
                 applications, including social network analysis. Our
                 algorithms make it possible to compute q$_k$ for
                 several real-world graphs and shed light on its growth
                 rate as a function of k. Even for small values of k,
                 the number q$_k$ of k -cliques can be in the order of
                 tens or hundreds of trillions. As k increases,
                 different graph instances show different behaviors:
                 while on some graphs $q_{k + 1} < q_k$, on other
                 benchmarks $q_{k + 1} \fg q_k$, up to two orders of
                 magnitude in our observations. Graphs with steep clique
                 growth rates represent particularly tough instances in
                 practice. Due to the computationally intensive nature
                 of the clique counting problem, we settle for parallel
                 solutions in the MapReduce framework, which has become
                 in the last few years a de facto standard for batch
                 processing of massive datasets. We give both
                 theoretical and experimental contributions. On the
                 theory side, we design the first exact scalable
                 algorithm for counting (and listing) $k$-cliques in
                 MapReduce. Our algorithm uses $O (m^{3 / 2})$ total
                 space and $O(m^{k / 2})$ work, where $m$ is the number
                 of graph edges. This matches the best-known bounds for
                 triangle listing when $k = 3$ and is work optimal in
                 the worst case for any $k$, while keeping the
                 communication cost independent of $k$. We also design
                 sampling-based estimators that can dramatically reduce
                 the running time and space requirements of the exact
                 approach, while providing very accurate solutions with
                 high probability. We then assess the effectiveness of
                 different clique counting approaches through an
                 extensive experimental analysis over the Amazon EC2
                 platform, considering both our algorithms and their
                 state-of-the-art competitors. The experimental results
                 clearly highlight the algorithm of choice in different
                 scenarios and prove our exact approach to be the most
                 effective when the number of $k$-cliques is large,
                 gracefully scaling to nontrivial values of $k$ even on
                 clusters of small/medium size. Our approximation
                 algorithms achieve extremely accurate estimates and
                 large speedups, especially on the toughest instances
                 for the exact algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Hedtke:2015:UST,
  author =       "Ivo Hedtke",
  title =        "Upgrading Subgroup Triple-Product-Property Triples",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "20",
  number =       "??",
  pages =        "1.1:1--1.1:??",
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2699877",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:47:42 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In 2003, Cohn and Umans introduced a group-theoretic
                 approach to fast matrix multiplication. This involves
                 finding large subsets of a group satisfying the Triple
                 Product Property (TPP) as a means to bound the exponent
                 of matrix multiplication. Recently, Hedtke and Murthy
                 discussed several methods to find TPP triples. Because
                 the search space for subset triples is too large, it is
                 only possible to focus on subgroup triples. We present
                 methods to upgrade a given TPP triple to a bigger TPP
                 triple. If no upgrade is possible, we use reduction
                 methods (based on random experiments and heuristics) to
                 create a smaller TPP triple that can be used as input
                 for the upgrade methods. If we apply the upgrade
                 process for subset triples after one step with the
                 upgrade method for subgroup triples for the known
                 maximal subgroup TPP triples in groups of order up to
                 1,000, we achieve an enlargement of the triple size of
                 100\% in the best case. Further, we test the upgrade
                 process with all examples from the 2003 and 2005 papers
                 from Cohn et al. and are able to increase the triple
                 size by 595\% in the best case (in the group D$^5_6$
                 ).",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Spence:2015:WCC,
  author =       "Ivor Spence",
  title =        "Weakening Cardinality Constraints Creates Harder
                 Satisfiability Benchmarks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "20",
  number =       "??",
  pages =        "1.4:1--1.4:??",
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2746239",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:47:42 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "For some time, the satisfiability formulae that have
                 been the most difficult to solve for their size have
                 been crafted to be unsatisfiable by the use of
                 cardinality constraints. Recent solvers have introduced
                 explicit checking of such constraints, rendering
                 previously difficult formulae trivial to solve. A
                 family of unsatisfiable formulae is described that is
                 derived from the sgen4 family but cannot be solved
                 using cardinality constraints detection and reasoning
                 alone. These formulae were found to be the most
                 difficult during the SAT2014 competition by a
                 significant margin and include the shortest unsolved
                 benchmark in the competition, sgen6-1200-5-1.cnf.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Ullmann:2015:DRL,
  author =       "Julian R. Ullmann",
  title =        "Degree Reduction in Labeled Graph Retrieval",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "20",
  number =       "??",
  pages =        "1.3:1--1.3:??",
  year =         "2015",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2699878",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:47:42 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Within a given collection of graphs, a graph retrieval
                 system may seek all graphs that contain a given graph,
                 or may instead seek all graphs that are contained
                 within a given graph. Although subgraph isomorphism is
                 worst-case exponential, it may be average-case
                 polynomial if graphs are labeled so as to restrict
                 possible correspondences between vertices of included
                 and includer graphs. Degree reduction is a procedure
                 that uses logical inference to preclude some such
                 correspondences, thereby substantially increasing the
                 size of includer graphs that can be processed, without
                 preventing any existent isomorphism from being found.
                 Degree reduction works only with labeled graphs, which
                 may be directed or undirected, with or without edge
                 labels. Inexact or approximate isomorphism is
                 accommodated by reducing strictness of conditions for
                 perfect isomorphism. Disk-based degree reduction, which
                 is an order of magnitude slower than memory-based
                 degree reduction, has successfully processed graphs
                 that have millions of vertices. Although the principle
                 of degree reduction is simple and fundamental, its
                 efficient practical implementation involves intricate
                 procedural detail. Its average-case complexity analysis
                 is currently intractable, so cost-benefit assessment
                 has to be experimental.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Bergner:2016:BPC,
  author =       "Martin Bergner and Marco E. L{\"u}bbecke and Jonas T.
                 Witt",
  title =        "A Branch-Price-and-Cut Algorithm for Packing Cuts in
                 Undirected Graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "1.2:1--1.2:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2851492",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The cut packing problem in an undirected graph is to
                 find a largest cardinality collection of pairwise
                 edge-disjoint cuts. We provide the first experimental
                 study of this NP-hard problem that is interesting from
                 a pure theorist's viewpoint as well as from the
                 standpoint of scientific applications (e.g., in
                 bioinformatics and network reliability). So far it
                 could not be solved exactly. We propose a
                 branch-price-and-cut algorithm to optimally solve
                 instances from various graph classes, random and from
                 the literature, with up to several hundred vertices. In
                 particular, we investigate how complexity results match
                 computational experience and how combinatorial
                 properties help improve the algorithm's performance.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Bingmann:2016:ISL,
  author =       "Timo Bingmann and Johannes Fischer and Vitaly Osipov",
  title =        "Inducing Suffix and {LCP} Arrays in External Memory",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "2.3:1--2.3:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2975593",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider full text index construction in external
                 memory (EM). Our first contribution is an inducing
                 algorithm for suffix arrays in external memory, which
                 runs in sorting complexity. Practical tests show that
                 this algorithm outperforms the previous best EM suffix
                 sorter [Dementiev et al., JEA 2008] by a factor of
                 about two in time and I/O volume. Our second
                 contribution is to augment the first algorithm to also
                 construct the array of longest common prefixes (LCPs).
                 This yields a new internal memory LCP array
                 construction algorithm and the first EM construction
                 algorithm for LCP arrays. The overhead in time and I/O
                 volume for this extended algorithm over plain suffix
                 array construction is roughly two. Our algorithms scale
                 far beyond problem sizes previously considered in the
                 literature (text size of 80GiB using only 4GiB of RAM
                 in our experiments).",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Coudert:2016:EEB,
  author =       "David Coudert and Dorian Mazauric and Nicolas Nisse",
  title =        "Experimental Evaluation of a Branch-and-Bound
                 Algorithm for Computing Pathwidth and Directed
                 Pathwidth",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "1.3:1--1.3:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2851494",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Path decompositions of graphs are an important
                 ingredient of dynamic programming algorithms for
                 solving efficiently many NP-hard problems. Therefore,
                 computing the pathwidth and associated path
                 decomposition of graphs has both a theoretical and
                 practical interest. In this article, we design a
                 branch-and-bound algorithm that computes the exact
                 pathwidth of graphs and a corresponding path
                 decomposition. Our main contribution consists of
                 several nontrivial techniques to reduce the size of the
                 input graph (preprocessing) and to cut the exploration
                 space during the search phase of the algorithm. We
                 evaluate experimentally our algorithm by comparing it
                 to existing algorithms of the literature. It appears
                 from the simulations that our algorithm offers a
                 significant gain with respect to previous work. In
                 particular, it is able to compute the exact pathwidth
                 of any graph with less than 60 nodes in a reasonable
                 running time ($\leq$ 10min on a standard laptop).
                 Moreover, our algorithm achieves good performance when
                 used as a heuristic (i.e., when returning best result
                 found within bounded time limit). Our algorithm is not
                 restricted to undirected graphs since it actually
                 computes the directed pathwidth that generalizes the
                 notion of pathwidth to digraphs.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Dibbelt:2016:CCH,
  author =       "Julian Dibbelt and Ben Strasser and Dorothea Wagner",
  title =        "Customizable Contraction Hierarchies",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "1.5:1--1.5:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2886843",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the problem of quickly computing shortest
                 paths in weighted graphs. Often, this is achieved in
                 two phases: (1) derive auxiliary data in an expensive
                 preprocessing phase, and (2) use this auxiliary data to
                 speed up the query phase. By adding a fast
                 weight-customization phase, we extend Contraction
                 Hierarchies to support a three-phase workflow. The
                 expensive preprocessing is split into a phase
                 exploiting solely the unweighted topology of the graph
                 and a lightweight phase that adapts the auxiliary data
                 to a specific weight. We achieve this by basing our
                 Customizable Contraction Hierarchies (CCHs) on nested
                 dissection orders. We provide an in-depth experimental
                 analysis on large road and game maps showing that CCHs
                 are a very practicable solution in scenarios where edge
                 weights often change.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Efentakis:2016:REH,
  author =       "Alexandros Efentakis and Dieter Pfoser",
  title =        "{ReHub}: Extending Hub Labels for Reverse $k$-Nearest
                 Neighbor Queries on Large-Scale Networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "1.13:1--1.13:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2990192",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Quite recently, the algorithmic community has focused
                 on solving multiple shortest-path query problems beyond
                 simple vertex-to-vertex queries, especially in the
                 context of road networks. Unfortunately, those advanced
                 query-processing techniques cannot be applied to
                 large-scale graphs, such as social or collaboration
                 networks, or to efficiently answer reverse k -nearest
                 neighbor (R k NN) queries, which are of practical
                 relevance to a wide range of applications. To remedy
                 this, we propose ReHub, a novel main-memory algorithm
                 that extends the hub labeling technique to efficiently
                 answer R k NN queries on large-scale networks. Our
                 experimentation will show that ReHub is the best
                 overall solution for this type of queries, requiring
                 only minimal additional preprocessing and providing
                 very fast query times in all cases.",
  acknowledgement = ack-nhfb,
  articleno =    "1.13",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Fox-Epstein:2016:SSC,
  author =       "Eli Fox-Epstein and Shay Mozes and Phitchaya Mangpo
                 Phothilimthana and Christian Sommer",
  title =        "Short and Simple Cycle Separators in Planar Graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "2.2:1--2.2:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2957318",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We provide an implementation of an algorithm that,
                 given a triangulated planar graph with m edges, returns
                 a simple cycle that is a 3/4-balanced separator
                 consisting of at most $\sqrt{8 m}$ edges. An efficient
                 construction of a short and balanced separator that
                 forms a simple cycle is essential in numerous planar
                 graph algorithms, for example, for computing shortest
                 paths, minimum cuts, or maximum flows. To the best of
                 our knowledge, this is the first implementation of such
                 a cycle separator algorithm with a worst-case guarantee
                 on the cycle length. We evaluate the performance of our
                 algorithm and compare it to the planar separator
                 algorithms recently studied by Holzer et al. [2009].
                 Out of these algorithms, only the Fundamental Cycle
                 Separator (FCS) produces a simple cycle separator.
                 However, FCS does not provide a worst-case size
                 guarantee. We demonstrate that (1) our algorithm is
                 competitive across all test cases in terms of running
                 time, balance, and cycle length; (2) it provides
                 worst-case guarantees on the cycle length,
                 significantly outperforming FCS on some instances; and
                 (3) it scales to large graphs.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Gemsa:2016:ELS,
  author =       "Andreas Gemsa and Martin N{\"o}llenburg and Ignaz
                 Rutter",
  title =        "Evaluation of Labeling Strategies for Rotating Maps",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "1.4:1--1.4:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2851493",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the following problem of labeling points
                 in a dynamic map that allows rotation. We are given a
                 set of feature points in the plane labeled by a set of
                 mutually disjoint labels, where each label is an
                 axis-aligned rectangle attached with one corner to its
                 respective point. We require that each label remains
                 horizontally aligned during the map rotation, and our
                 goal is to find a set of mutually nonoverlapping
                 visible labels for every rotation angle $\alpha \in [0,
                 2 \pi)$ so that the number of visible labels over a
                 full map rotation of $2 \pi$ is maximized. We discuss and
                 experimentally evaluate several labeling strategies
                 that define additional consistency constraints on label
                 visibility to reduce flickering effects during monotone
                 map rotation. We introduce three heuristic algorithms
                 and compare them experimentally to an existing
                 approximation algorithm and exact solutions obtained
                 from an integer linear program. Our results show that
                 on the one hand, low flickering can be achieved at the
                 expense of only a small reduction in the objective
                 value, and on the other hand, the proposed heuristics
                 achieve a high labeling quality significantly faster
                 than the other methods.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Glantz:2016:TBC,
  author =       "Roland Glantz and Henning Meyerhenke and Christian
                 Schulz",
  title =        "Tree-Based Coarsening and Partitioning of Complex
                 Networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "1.6:1--1.6:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2851496",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A hierarchy of increasingly coarse versions of a
                 network allows one to represent the network on multiple
                 scales at the same time. Often, the elementary
                 operation for generating a hierarchy on a network is
                 merging adjacent vertices, an operation that can be
                 realized through contracting the edge between the two
                 vertices. Such a hierarchy is defined by the selection
                 of the edges to be contracted between a level and the
                 next coarser level. The selection may involve (i)
                 rating the edges, (ii) constraining the selection
                 (e.g., that the selected edges form a matching), as
                 well as (iii) maximizing the total rate of the selected
                 edges under the constraints. Hierarchies of this kind
                 are, among others, involved in multilevel methods for
                 partitioning networks-a prerequisite for processing in
                 parallel with distributed memory. In this article, we
                 propose a new edge rating by (i) defining weights for
                 the edges of a network that express the edges'
                 importance for connectivity via shortest paths, (ii)
                 computing a minimum weight spanning tree with respect
                 to these weights, and (iii) rating the network edges
                 based on the conductance values of the tree's
                 fundamental cuts. To make the computation of our new
                 edge rating efficient, we develop the first optimal
                 linear-time algorithm to compute the conductance values
                 of all fundamental cuts of a given spanning tree. We
                 integrate the new edge rating into a leading multilevel
                 graph partitioner and equip the latter also with a new
                 greedy postprocessing for optimizing the Maximum
                 Communication Volume (MCV) of a partition. Our
                 experiments, in which we bipartition frequently used
                 benchmark networks, show that the postprocessing
                 reduces MCV by 11.3\%. Our new edge rating, here used
                 for matching-based coarsening, further reduces MCV by
                 10.3\% compared to the previously best rating with MCV
                 postprocessing in place for both ratings. In total,
                 with a modest increase in running time, our new
                 approach reduces the MCV of complex network partitions
                 by 20.4\%.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Gudmundsson:2016:ESS,
  author =       "Joachim Gudmundsson and Jyrki Katajainen",
  title =        "Editorial, {SEA 2014} Special Issue",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "1.1:1--1.1:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2854021",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Karkkainen:2016:LAC,
  author =       "Juha K{\"a}rkk{\"a}inen and Dominik Kempa",
  title =        "{LCP} Array Construction in External Memory",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "1.7:1--1.7:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2851491",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "One of the most important data structures for string
                 processing-the suffix array-needs to be augmented with
                 the longest-common-prefix (LCP) array in numerous
                 applications. We describe the first external memory
                 algorithm for constructing the LCP array given the
                 suffix array as input. The only previous way to compute
                 the LCP array for data that is bigger than the RAM is
                 to use an external memory suffix array construction
                 algorithm (SACA) with complex modifications to produce
                 the LCP array as a by-product. Compared to the best
                 prior method, our algorithm needs much less disk space
                 (by more than a factor of three) and is significantly
                 faster. Furthermore, our algorithm can be combined with
                 any SACA, including a better one developed in the
                 future.",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Karkkainen:2016:LLZ,
  author =       "Juha K{\"a}rkk{\"a}inen and Dominik Kempa and Simon J.
                 Puglisi",
  title =        "Lazy {Lempel--Ziv} Factorization Algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "2.4:1--2.4:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2699876",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/datacompression.bib;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "For decades the Lempel--Ziv (LZ77) factorization has
                 been a cornerstone of data compression and string
                 processing algorithms, and uses for it are still being
                 uncovered. For example, LZ77 is central to several
                 recent text indexing data structures designed to search
                 highly repetitive collections. However, in many
                 applications computation of the factorization remains a
                 bottleneck in practice. In this article, we describe a
                 number of simple and fast LZ77 factorization
                 algorithms, which consistently outperform all previous
                 methods in practice, use less memory, and still offer
                 strong worst-case performance guarantees. A common
                 feature of the new algorithms is that they compute
                 longest common prefix information in a lazy fashion,
                 with the degree of laziness in preprocessing
                 characterizing different algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Marinov:2016:PAF,
  author =       "Martin Marinov and Nicholas Nash and David Gregg",
  title =        "Practical Algorithms for Finding Extremal Sets",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "1.9:1--1.9:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2893184",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The minimal sets within a collection of sets are
                 defined as the ones that do not have a proper subset
                 within the collection, and the maximal sets are the
                 ones that do not have a proper superset within the
                 collection. Identifying extremal sets is a fundamental
                 problem with a wide range of applications in SAT
                 solvers, data mining, and social network analysis. In
                 this article, we present two novel improvements of the
                 high-quality extremal set identification algorithm,
                 AMS-Lex, described by Bayardo and Panda. The first
                 technique uses memoization to improve the execution
                 time of the single-threaded variant of the AMS-Lex,
                 while our second improvement uses parallel programming
                 methods. In a subset of the presented experiments, our
                 memoized algorithm executes more than 400 times faster
                 than the highly efficient publicly available
                 implementation of AMS-Lex. Moreover, we show that our
                 modified algorithm's speedup is not bounded above by a
                 constant and that it increases as the length of the
                 common prefixes in successive input itemsets increases.
                 We provide experimental results using both real-world
                 and synthetic datasets, and show our multithreaded
                 variant algorithm outperforming AMS-Lex by 3 to 6
                 times. We find that on synthetic input datasets, when
                 executed using 16 CPU cores of a 32-core machine, our
                 multithreaded program executes about as fast as the
                 state-of-the-art parallel GPU-based program using an
                 NVIDIA GTX 580 graphics processing unit.",
  acknowledgement = ack-nhfb,
  articleno =    "1.9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Mcgeoch:2016:MDJ,
  author =       "Catherine Mcgeoch",
  title =        "In Memoriam: {David S. Johnson}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "1.1:1--1.1:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2907073",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "1.1e",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Navarro:2016:FCS,
  author =       "Gonzalo Navarro and Alberto Ord{\'o}{\~n}ez Pereira",
  title =        "Faster Compressed Suffix Trees for Repetitive
                 Collections",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "1.8:1--1.8:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2851495",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Recent compressed suffix trees targeted to highly
                 repetitive sequence collections reach excellent
                 compression performance, but operation times are very
                 high. We design a new suffix tree representation for
                 this scenario that still achieves very low space usage,
                 only slightly larger than the best previous one, but
                 supports the operations orders of magnitude faster. Our
                 suffix tree is still orders of magnitude slower than
                 general-purpose compressed suffix trees, but these use
                 several times more space when the collection is
                 repetitive. Our main novelty is a practical
                 grammar-compressed tree representation with full
                 navigation functionality, which is useful in all
                 applications where large trees with repetitive topology
                 must be represented.",
  acknowledgement = ack-nhfb,
  articleno =    "1.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Peethambaran:2016:ESR,
  author =       "Jiju Peethambaran and Amal Dev Parakkat and Ramanathan
                 Muthuganapathy",
  title =        "An Empirical Study on Randomized Optimal Area
                 Polygonization of Planar Point Sets",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "1.10:1--1.10:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2896849",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "While random polygon generation from a set of planar
                 points has been widely investigated in the literature,
                 very few works address the construction of a simple
                 polygon with minimum area (MINAP) or maximum area
                 (MAXAP) from a set of fixed planar points. Currently,
                 no deterministic algorithms are available to compute
                 MINAP/MAXAP, as the problems have been shown to be
                 NP-complete. In this article, we present a greedy
                 heuristic for computing the approximate MINAP of any
                 given planar point set using the technique of
                 randomized incremental construction. For a given point
                 set of n points, the proposed algorithm takes O ( n$^2$
                 log n ) time and O ( n ) space. It is rather simplistic
                 in nature, hence very easy for implementation and
                 maintenance. We report on various experimental studies
                 on the behavior of a randomized heuristic on different
                 point set instances. Test data have been taken from the
                 SPAETH cluster data base and TSPLIB library.
                 Experimental results indicate that the proposed
                 algorithm outperforms its counterparts for generating a
                 tighter upper bound on the optimal minimum area polygon
                 for large-sized point sets.",
  acknowledgement = ack-nhfb,
  articleno =    "1.10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Rosenbrock:2016:NAP,
  author =       "Conrad W. Rosenbrock and Wiley S. Morgan and Gus L. W.
                 Hart and Stefano Curtarolo and Rodney W. Forcade",
  title =        "Numerical Algorithm for {P{\'o}lya} Enumeration
                 Theorem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "1.11:1--1.11:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2955094",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Although the P{\'o}lya enumeration theorem has been
                 used extensively for decades, an optimized, purely
                 numerical algorithm for calculating its coefficients is
                 not readily available. We present such an algorithm for
                 finding the number of unique colorings of a finite set
                 under the action of a finite group.",
  acknowledgement = ack-nhfb,
  articleno =    "1.11",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Sanders:2016:ISI,
  author =       "Peter Sanders and Norbert Zeh",
  title =        "Introduction to Special Issue {ALENEX 2013}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "2.1:1--2.1:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2966922",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Toda:2016:IEA,
  author =       "Takahisa Toda and Takehide Soh",
  title =        "Implementing Efficient All Solutions {SAT} Solvers",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "1.12:1--1.12:??",
  month =        nov,
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2975585",
  ISSN =         "1084-6654",
  bibdate =      "Fri Nov 4 16:46:55 MDT 2016",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "All solutions SAT (AllSAT for short) is a variant of
                 the propositional satisfiability problem. AllSAT has
                 been relatively unexplored compared to other variants
                 despite its significance. We thus survey and discuss
                 major techniques of AllSAT solvers. We accurately
                 implemented them and conducted comprehensive
                 experiments using a large number of instances and
                 various types of solvers including a few publicly
                 available software. The experiments revealed the
                 solvers' characteristics. We made our implemented
                 solvers publicly available so that other researchers
                 can easily develop their solvers by modifying our code
                 and comparing it with existing methods.",
  acknowledgement = ack-nhfb,
  articleno =    "1.12",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Efentakis:2017:REH,
  author =       "Alexandros Efentakis and Dieter Pfoser",
  title =        "{ReHub}: Extending Hub Labels for Reverse $k$-Nearest
                 Neighbor Queries on Large-Scale Networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "1.13:1--1.13:??",
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2990192",
  ISSN =         "1084-6654",
  bibdate =      "Sun Aug 20 07:54:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Quite recently, the algorithmic community has focused
                 on solving multiple shortest-path query problems beyond
                 simple vertex-to-vertex queries, especially in the
                 context of road networks. Unfortunately, those advanced
                 query-processing techniques cannot be applied to
                 large-scale graphs, such as social or collaboration
                 networks, or to efficiently answer reverse $k$-nearest
                 neighbor (RkNN) queries, which are of practical
                 relevance to a wide range of applications. To remedy
                 this, we propose ReHub, a novel main-memory algorithm
                 that extends the hub labeling technique to efficiently
                 answer RkNN queries on large-scale networks. Our
                 experimentation will show that ReHub is the best
                 overall solution for this type of queries, requiring
                 only minimal additional preprocessing and providing
                 very fast query times in all cases.",
  acknowledgement = ack-nhfb,
  articleno =    "1.13",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Sanders:2017:ISI,
  author =       "Peter Sanders and Norbert Zeh",
  title =        "Introduction to Special Issue {ALENEX 2013}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "2.1:1--2.1:??",
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2966922",
  ISSN =         "1084-6654",
  bibdate =      "Sun Aug 20 07:54:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Fox-Epstein:2017:SSC,
  author =       "Eli Fox-Epstein and Shay Mozes and Phitchaya Mangpo
                 Phothilimthana and Christian Sommer",
  title =        "Short and Simple Cycle Separators in Planar Graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "2.2:1--2.2:??",
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2957318",
  ISSN =         "1084-6654",
  bibdate =      "Sun Aug 20 07:54:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We provide an implementation of an algorithm that,
                 given a triangulated planar graph with m edges, returns
                 a simple cycle that is a 3/4-balanced separator
                 consisting of at most $ \sqrt {8 m} $ edges. An
                 efficient construction of a short and balanced
                 separator that forms a simple cycle is essential in
                 numerous planar graph algorithms, for example, for
                 computing shortest paths, minimum cuts, or maximum
                 flows. To the best of our knowledge, this is the first
                 implementation of such a cycle separator algorithm with
                 a worst-case guarantee on the cycle length. We evaluate
                 the performance of our algorithm and compare it to the
                 planar separator algorithms recently studied by Holzer
                 et al. [2009]. Out of these algorithms, only the
                 Fundamental Cycle Separator (FCS) produces a simple
                 cycle separator. However, FCS does not provide a
                 worst-case size guarantee. We demonstrate that (1) our
                 algorithm is competitive across all test cases in terms
                 of running time, balance, and cycle length; (2) it
                 provides worst-case guarantees on the cycle length,
                 significantly outperforming FCS on some instances; and
                 (3) it scales to large graphs.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Bingmann:2017:ISL,
  author =       "Timo Bingmann and Johannes Fischer and Vitaly Osipov",
  title =        "Inducing Suffix and {LCP} Arrays in External Memory",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "2.3:1--2.3:??",
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2975593",
  ISSN =         "1084-6654",
  bibdate =      "Sun Aug 20 07:54:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider full text index construction in external
                 memory (EM). Our first contribution is an inducing
                 algorithm for suffix arrays in external memory, which
                 runs in sorting complexity. Practical tests show that
                 this algorithm outperforms the previous best EM suffix
                 sorter [Dementiev et al., JEA 2008] by a factor of
                 about two in time and I/O volume. Our second
                 contribution is to augment the first algorithm to also
                 construct the array of longest common prefixes (LCPs).
                 This yields a new internal memory LCP array
                 construction algorithm and the first EM construction
                 algorithm for LCP arrays. The overhead in time and I/O
                 volume for this extended algorithm over plain suffix
                 array construction is roughly two. Our algorithms scale
                 far beyond problem sizes previously considered in the
                 literature (text size of 80GiB using only 4GiB of RAM
                 in our experiments).",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Karkkainen:2017:LLZ,
  author =       "Juha K{\"a}rkk{\"a}inen and Dominik Kempa and Simon J.
                 Puglisi",
  title =        "Lazy {Lempel--Ziv} Factorization Algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "21",
  number =       "1",
  pages =        "2.4:1--2.4:??",
  year =         "2016",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/2699876",
  ISSN =         "1084-6654",
  bibdate =      "Sun Aug 20 07:54:41 MDT 2017",
  bibsource =    "http://portal.acm.org/;
                 https://www.math.utah.edu/pub/tex/bib/datacompression.bib;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib;
                 https://www.math.utah.edu/pub/tex/bib/string-matching.bib",
  abstract =     "For decades the Lempel--Ziv (LZ77) factorization has
                 been a cornerstone of data compression and string
                 processing algorithms, and uses for it are still being
                 uncovered. For example, LZ77 is central to several
                 recent text indexing data structures designed to search
                 highly repetitive collections. However, in many
                 applications computation of the factorization remains a
                 bottleneck in practice. In this article, we describe a
                 number of simple and fast LZ77 factorization
                 algorithms, which consistently outperform all previous
                 methods in practice, use less memory, and still offer
                 strong worst-case performance guarantees. A common
                 feature of the new algorithms is that they compute
                 longest common prefix information in a lazy fashion,
                 with the degree of laziness in preprocessing
                 characterizing different algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Baier:2017:SEP,
  author =       "Uwe Baier and Timo Beller and Enno Ohlebusch",
  title =        "Space-Efficient Parallel Construction of Succinct
                 Representations of Suffix Tree Topologies",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "22",
  number =       "??",
  pages =        "1.1:1--1.1:??",
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3035540",
  ISSN =         "1084-6654",
  bibdate =      "Mon Jan 22 09:52:54 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "A compressed suffix tree usually consists of three
                 components: a compressed suffix array, a compressed
                 LCP-array, and a succinct representation of the suffix
                 tree topology. There are parallel algorithms that
                 construct the suffix array and the LCP-array, but none
                 for the third component. In this article, we present
                 parallel algorithms on shared memory architectures that
                 construct the balanced parentheses sequence (BPS), an
                 explicit succinct representation of the suffix tree
                 topology, as well as the enhanced balanced parentheses
                 representation (eBPR), an implicit succinct
                 representation of the suffix tree topology. For both
                 representations, this article presents a sequential
                 construction algorithm (a new one for the BPS), a
                 linear work and $O(\log n)$ time parallel construction
                 algorithm, and a heuristic parallel construction
                 algorithm that works very well in practice. The
                 experimental results show that our methods are well
                 suited for real-world applications.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Buchin:2017:CFD,
  author =       "Kevin Buchin and Maike Buchin and Joachim Gudmundsson
                 and Michael Horton and Stef Sijben",
  title =        "Compact Flow Diagrams for State Sequences",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "22",
  number =       "??",
  pages =        "1.7:1--1.7:??",
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3150525",
  ISSN =         "1084-6654",
  bibdate =      "Mon Jan 22 09:52:54 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We introduce the concept of using a flow diagram to
                 compactly represent the segmentation of a large number
                 of state sequences according to a set of criteria. We
                 argue that this flow diagram representation gives an
                 intuitive summary that allows the user to detect
                 patterns within the segmentations. In essence, our aim
                 is to generate a flow diagram with a minimum number of
                 nodes that models a segmentation of the states in the
                 input sequences. For a small number of state sequences
                 we present efficient algorithms to compute a minimal
                 flow diagram. For a large number of state sequences, we
                 show that it is unlikely that efficient algorithms
                 exist. Specifically, the problem is W [1]-hard if the
                 number of state sequences is taken as a parameter. We
                 introduce several heuristics for this problem. We argue
                 about the usefulness of the flow diagram by applying
                 the algorithms to two problems in sports analysis, and
                 evaluate the performance of our algorithms on a
                 football dataset and synthetic data.",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Gog:2017:PCI,
  author =       "Simon Gog and Roberto Konow and Gonzalo Navarro",
  title =        "Practical Compact Indexes for Top-$k$ Document
                 Retrieval",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "22",
  number =       "??",
  pages =        "1.2:1--1.2:??",
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3043958",
  ISSN =         "1084-6654",
  bibdate =      "Mon Jan 22 09:52:54 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We present a fast and compact index for top- k
                 document retrieval on general string collections, in
                 which given a string pattern, the index returns the k
                 documents where it appears most often. We adapt a
                 linear-space and optimal-time theoretical solution,
                 whose implementation poses various algorithm
                 engineering challenges. Although a naive implementation
                 of the optimal solution is estimated to require around
                 80 n bytes for a text collection of n symbols, our
                 implementation requires 2.5 n to 3.0 n bytes, text
                 included, and answers queries within microseconds. This
                 outperforms all previous practical indexes by orders of
                 magnitude; the only index using less space is hundreds
                 of times slower. Our index can be built on collections
                 of hundreds of gigabytes and on tokenized text
                 collections.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Hirvola:2017:BPA,
  author =       "Tommi Hirvola and Jorma Tarhio",
  title =        "Bit-Parallel Approximate Matching of Circular Strings
                 with $k$ Mismatches",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "22",
  number =       "??",
  pages =        "1.5:1--1.5:??",
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3129536",
  ISSN =         "1084-6654",
  bibdate =      "Mon Jan 22 09:52:54 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib;
                 https://www.math.utah.edu/pub/tex/bib/string-matching.bib",
  abstract =     "We consider approximate string matching of a circular
                 pattern consisting of the rotations of a pattern of
                 length $m$. From SBNDM and Tuned Shift-Add, we derive a
                 sublinear-time algorithm for searching a noncircular
                 pattern with $k$ allowed mismatches, which is extended
                 to the problem of approximate circular pattern matching
                 with $k$ mismatches. We prove that the presented
                 algorithms are average-optimal for $ m \cdot \lceil
                 \log_2 (k + 1) + 1 \rceil = O(w)$, where $w$ is the
                 size of the computer word in bits. Experiments
                 conducted under the aforementioned condition show that
                 the new $k$-mismatches algorithm for circular strings
                 outperforms previous solutions in practice. In
                 particular, our algorithm is the first nonfiltering
                 method for approximate circular string matching in
                 sublinear average time, which makes it more suitable
                 than earlier filtering methods for high error levels $
                 k / m$ and small alphabets.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Kerber:2017:GHC,
  author =       "Michael Kerber and Dmitriy Morozov and Arnur
                 Nigmetov",
  title =        "Geometry Helps to Compare Persistence Diagrams",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "22",
  number =       "??",
  pages =        "1.4:1--1.4:??",
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3064175",
  ISSN =         "1084-6654",
  bibdate =      "Mon Jan 22 09:52:54 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Exploiting geometric structure to improve the
                 asymptotic complexity of discrete assignment problems
                 is a well-studied subject. In contrast, the practical
                 advantages of using geometry for such problems have not
                 been explored. We implement geometric variants of the
                 Hopcroft-Karp algorithm for bottleneck matching (based
                 on previous work by Efrat el al.) and of the auction
                 algorithm by Bertsekas for Wasserstein distance
                 computation. Both implementations use k-d trees to
                 replace a linear scan with a geometric proximity query.
                 Our interest in this problem stems from the desire to
                 compute distances between persistence diagrams, a
                 problem that comes up frequently in topological data
                 analysis. We show that our geometric matching
                 algorithms lead to a substantial performance gain, both
                 in running time and in memory consumption, over their
                 purely combinatorial counterparts. Moreover, our
                 implementation significantly outperforms the only other
                 implementation available for comparing persistence
                 diagrams.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Khuong:2017:ALC,
  author =       "Paul-Virak Khuong and Pat Morin",
  title =        "Array Layouts for Comparison-Based Searching",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "22",
  number =       "??",
  pages =        "1.3:1--1.3:??",
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3053370",
  ISSN =         "1084-6654",
  bibdate =      "Mon Jan 22 09:52:54 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We attempt to determine the best order and search
                 algorithm to store n comparable data items in an array,
                 A, of length n so we can, for any query value, x,
                 quickly find the smallest value in A that is greater
                 than or equal to x. In particular, we consider the
                 important case where there are many such queries to the
                 same array, A, which resides entirely in RAM. In
                 addition to the obvious sorted order/binary search
                 combination we consider the Eytzinger
                 breadth-first-search (BFS) layout normally used for
                 heaps, an implicit B-tree layout that generalizes the
                 Eytzinger layout, and the van Emde Boas layout commonly
                 used in the cache-oblivious algorithms literature.
                 After extensive testing and tuning on a wide variety of
                 modern hardware, we arrive at the conclusion that, for
                 small values of n, sorted order, combined with a good
                 implementation of binary search, is best. For larger
                 values of n, we arrive at the surprising conclusion
                 that the Eytzinger layout is usually the fastest. The
                 latter conclusion is unexpected and goes counter to
                 earlier experimental work by Brodal, Fagerberg, and
                 Jacob (SODA 2003), who concluded that both the B-tree
                 and van Emde Boas layouts were faster than the
                 Eytzinger layout for large values of n. Our fastest C++
                 implementations, when compiled, use conditional moves
                 to avoid branch mispredictions and prefetching to
                 reduce cache latency.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Poloczek:2017:EEF,
  author =       "Matthias Poloczek and David P. Williamson",
  title =        "An Experimental Evaluation of Fast Approximation
                 Algorithms for the Maximum Satisfiability Problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "22",
  number =       "??",
  pages =        "1.6:1--1.6:??",
  year =         "2017",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3064174",
  ISSN =         "1084-6654",
  bibdate =      "Mon Jan 22 09:52:54 MST 2018",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We evaluate the performance of fast approximation
                 algorithms for MAX SAT on the comprehensive benchmark
                 sets from the SAT and MAX SAT contests. Our examination
                 of a broad range of algorithmic techniques reveals that
                 greedy algorithms offer particularly striking
                 performance, delivering very good solutions at low
                 computational cost. Interestingly, their relative
                 ranking does not follow their worst-case behavior.
                 Johnson's deterministic algorithm is consistently
                 better than the randomized greedy algorithm of Poloczek
                 et al. [2017], but in turn is outperformed by the
                 derandomization of the latter: this two-pass algorithm
                 satisfies more than 99\% of the clauses for instances
                 stemming from industrial applications. In general, it
                 performs considerably better than nonoblivious local
                 search, Tabu Search, WalkSat, and several
                 state-of-the-art complete and incomplete solvers, while
                 being much faster. But the two-pass algorithm does not
                 achieve the excellent performance of Spears's
                 computationally intense simulated annealing. Therefore,
                 we propose a new hybrid algorithm that combines the
                 strengths of greedy algorithms and stochastic local
                 search to provide outstanding solutions at high speed:
                 in our experiments, its performance is as good as
                 simulated annealing, achieving an average loss with
                 respect to the best-known assignment of less that
                 0.5\%, while its speed is comparable to the greedy
                 algorithms.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Afshani:2018:EAT,
  author =       "Peyman Afshani and Mark De Berg and Henri Casanova and
                 Ben Karsin and Colin Lambrechts and Nodari Sitchinava
                 and Constantinos Tsirogiannis",
  title =        "An Efficient Algorithm for the {$1$D} Total
                 Visibility-Index Problem and Its Parallelization",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "23",
  number =       "??",
  pages =        "2.3:1--2.3:??",
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3209685",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Let T be a terrain and P be a set of points on its
                 surface. An important problem in Geographic Information
                 Science (GIS) is computing the visibility index of a
                 point p on P, that is, the number of points in P that
                 are visible from p. The total visibility-index problem
                 asks for the visibility index of every point in P. We
                 present the first subquadratic-time algorithm to solve
                 the one-dimensional total-visibility-index problem. Our
                 algorithm uses a geometric dualization technique to
                 reduce the problem to a set of instances of the
                 red--blue line segment intersection counting problem,
                 allowing us to find the total visibility-index in O ( n
                 log$^2$ n ) time. We implement a naive O ( n$^2$ )
                 approach and four variations of our algorithm: one that
                 uses an existing red--blue line segment intersection
                 counting algorithm and three new approaches that
                 leverage features specific to our problem. Two of our
                 implementations allow for parallel execution, requiring
                 O (log$^2$ n ) time and O ( n log$^2$ n ) work in the
                 CREW PRAM model. We present experimental results for
                 both serial and parallel implementations on synthetic
                 and real-world datasets using two hardware platforms.
                 Results show that all variants of our algorithm
                 outperform the naive approach by several orders of
                 magnitude. Furthermore, we show that our special-case
                 red--blue line segment intersection counting
                 implementations out-perform the existing general-case
                 solution by up to a factor 10. Our fastest parallel
                 implementation is able to process a terrain of more
                 than 100 million vertices in under 3 minutes, achieving
                 up to 85\% parallel efficiency using 16 cores.",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Bergamini:2018:IBC,
  author =       "Elisabetta Bergamini and Pierluigi Crescenzi and
                 Gianlorenzo D'Angelo and Henning Meyerhenke and Lorenzo
                 Severini and Yllka Velaj",
  title =        "Improving the Betweenness Centrality of a Node by
                 Adding Links",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "23",
  number =       "??",
  pages =        "1.5:1--1.5:??",
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3166071",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Betweenness is a well-known centrality measure that
                 ranks the nodes according to their participation in the
                 shortest paths of a network. In several scenarios,
                 having a high betweenness can have a positive impact on
                 the node itself. Hence, in this article, we consider
                 the problem of determining how much a vertex can
                 increase its centrality by creating a limited amount of
                 new edges incident to it. In particular, we study the
                 problem of maximizing the betweenness score of a given
                 node-Maximum Betweenness Improvement (MBI)-and that of
                 maximizing the ranking of a given node-Maximum Ranking
                 Improvement (MRI). We show that MBI cannot be
                 approximated in polynomial-time within a factor
                 (1-1/2e) and that MRI does not admit any
                 polynomial-time constant factor approximation
                 algorithm, both unless P = NP. We then propose a simple
                 greedy approximation algorithm for MBI with an almost
                 tight approximation ratio and we test its performance
                 on several real-world networks. We experimentally show
                 that our algorithm highly increases both the
                 betweenness score and the ranking of a given node and
                 that it outperforms several competitive baselines. To
                 speed up the computation of our greedy algorithm, we
                 also propose a new dynamic algorithm for updating the
                 betweenness of one node after an edge insertion, which
                 might be of independent interest. Using the dynamic
                 algorithm, we are now able to compute an approximation
                 of MBI on networks with up to 10$^5$ edges in most
                 cases in a matter of seconds or a few minutes.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Dibbelt:2018:CSA,
  author =       "Julian Dibbelt and Thomas Pajor and Ben Strasser and
                 Dorothea Wagner",
  title =        "Connection Scan Algorithm",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "23",
  number =       "??",
  pages =        "1.7:1--1.7:??",
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3274661",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We introduce the Connection Scan Algorithm (CSA) to
                 efficiently answer queries to timetable information
                 systems. The input consists, in the simplest setting,
                 of a source position and a desired target position. The
                 output consists of a sequence of vehicles such as
                 trains or buses that a traveler should take to get from
                 the source to the target. We study several problem
                 variations such as the earliest arrival and profile
                 problems. We present algorithm variants that only
                 optimize the arrival time or additionally optimize the
                 number of transfers in the Pareto sense. An advantage
                 of CSA is that it can easily adjust to changes in the
                 timetable, allowing the easy incorporation of known
                 vehicle delays. We additionally introduce the Minimum
                 Expected Arrival Time (MEAT) problem to handle
                 possible, uncertain, future vehicle delays. We present
                 a solution to the MEAT problem that is based on CSA.
                 Finally, we extend CSA using the multilevel overlay
                 paradigm to answer complex queries on nationwide
                 integrated timetables with trains and buses.",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Hamann:2018:EGM,
  author =       "Michael Hamann and Ulrich Meyer and Manuel Penschuck
                 and Hung Tran and Dorothea Wagner",
  title =        "{I/O}-Efficient Generation of Massive Graphs Following
                 the {LFR} Benchmark",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "23",
  number =       "??",
  pages =        "2.5:1--2.5:??",
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3230743",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "LFR is a popular benchmark graph generator used to
                 evaluate community detection algorithms. We present
                 EM-LFR, the first external memory algorithm able to
                 generate massive complex networks following the LFR
                 benchmark. Its most expensive component is the
                 generation of random graphs with prescribed degree
                 sequences which can be divided into two steps: the
                 graphs are first materialized deterministically using
                 the Havel-Hakimi algorithm, and then randomized. Our
                 main contributions are EM-HH and EM-ES, two
                 I/O-efficient external memory algorithms for these two
                 steps. We also propose EM-CM/ES, an alternative
                 sampling scheme using the Configuration Model and
                 rewiring steps to obtain a random simple graph. In an
                 experimental evaluation, we demonstrate their
                 performance; our implementation is able to handle
                 graphs with more than 37 billion edges on a single
                 machine, is competitive with a massively parallel
                 distributed algorithm, and is faster than a
                 state-of-the-art internal memory implementation even on
                 instances fitting in main memory. EM-LFR 's
                 implementation is capable of generating large graph
                 instances orders of magnitude faster than the original
                 implementation. We give evidence that both
                 implementations yield graphs with matching properties
                 by applying clustering algorithms to generated
                 instances. Similarly, we analyze the evolution of graph
                 properties as EM-ES is executed on networks obtained
                 with EM-CM/ES and find that the alternative approach
                 can accelerate the sampling process.",
  acknowledgement = ack-nhfb,
  articleno =    "2.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Hamann:2018:GBP,
  author =       "Michael Hamann and Ben Strasser",
  title =        "Graph Bisection with {Pareto} Optimization",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "23",
  number =       "??",
  pages =        "1.2:1--1.2:??",
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3173045",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We introduce FlowCutter, a novel algorithm to compute
                 a set of edge cuts or node separators that optimize cut
                 size and balance in the Pareto sense. Our core
                 algorithm heuristically solves the balanced connected
                 st -edge-cut problem, where two given nodes s and t
                 must be separated by removing edges to obtain two
                 connected parts. Using the core algorithm as a
                 subroutine, we build variants that compute node
                 separators that are independent of s and t. From the
                 computed Pareto set, we can identify cuts with a
                 particularly good tradeoff between cut size and balance
                 that can be used to compute contraction and minimum
                 fill-in orders, which can be used in Customizable
                 Contraction Hierarchies (CCHs), a speed-up technique
                 for shortest-path computations. Our core algorithm runs
                 in O ( c | E |) time, where E is the set of edges and c
                 is the size of the largest outputted cut. This makes it
                 well suited for separating large graphs with small
                 cuts, such as road graphs, which is the primary
                 application motivating our research. For road graphs,
                 we present an extensive experimental study
                 demonstrating that FlowCutter outperforms the current
                 state of the art in terms of both cut sizes and CCH
                 performance. By evaluating FlowCutter on a standard
                 graph partitioning benchmark, we further show that
                 FlowCutter also finds small, balanced cuts on nonroad
                 graphs. Another application is the computation of small
                 tree decompositions. To evaluate the quality of our
                 algorithm in this context, we entered the PACE 2016
                 challenge [13] and won first place in the corresponding
                 sequential competition track. We can therefore conclude
                 that our FlowCutter algorithm finds small, balanced
                 cuts on a wide variety of graphs.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Henzinger:2018:PMC,
  author =       "Monika Henzinger and Alexander Noe and Christian
                 Schulz and Darren Strash",
  title =        "Practical Minimum Cut Algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "23",
  number =       "??",
  pages =        "1.8:1--1.8:??",
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3274662",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The minimum cut problem for an undirected
                 edge-weighted graph asks us to divide its set of nodes
                 into two blocks while minimizing the weight sum of the
                 cut edges. Here, we introduce a linear-time algorithm
                 to compute near-minimum cuts. Our algorithm is based on
                 cluster contraction using label propagation and Padberg
                 and Rinaldi's contraction heuristics [SIAM Review,
                 1991]. We give both sequential and shared-memory
                 parallel implementations of our algorithm. Extensive
                 experiments on both real-world and generated instances
                 show that our algorithm finds the optimal cut on nearly
                 all instances significantly faster than other
                 state-of-the-art exact algorithms, and our error rate
                 is lower than that of other heuristic algorithms. In
                 addition, our parallel algorithm runs a factor 7.5$
                 \times $ faster on average when using 32 threads. To
                 further speed up computations, we also give a version
                 of our algorithm that performs random edge contractions
                 as preprocessing. This version achieves a lower running
                 time and better parallel scalability at the expense of
                 a higher error rate.",
  acknowledgement = ack-nhfb,
  articleno =    "1.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{VonLooz:2018:UDR,
  author =       "Moritz {Von Looz} and Henning Meyerhenke",
  title =        "Updating Dynamic Random Hyperbolic Graphs in Sublinear
                 Time",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "23",
  number =       "??",
  pages =        "1.6:1--1.6:??",
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3195635",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Generative network models play an important role in
                 algorithm development, scaling studies, network
                 analysis, and realistic system benchmarks for graph
                 data sets. A complex network model gaining considerable
                 popularity builds random hyperbolic graphs, generated
                 by distributing points within a disk in the hyperbolic
                 plane and then adding edges between points with a
                 probability depending on their hyperbolic distance. We
                 present a dynamic extension to model gradual network
                 change, while preserving at each step the point
                 position probabilities. To process the dynamic changes
                 efficiently, we formalize the concept of a
                 probabilistic neighborhood: Let P be a set of n points
                 in Euclidean or hyperbolic space, q a query point, dist
                 a distance metric, and f: R$^+$ -{$>$} [0, 1] a
                 monotonically decreasing function. Then, the
                 probabilistic neighborhood N ( q, f ) of q with respect
                 to f is a random subset of P and each point p \in P
                 belongs to N ( q, f ) with probability f (dist( p, q
                 )). We present a fast, sublinear-time query algorithm
                 to sample probabilistic neighborhoods from planar point
                 sets. For certain distributions of planar P, we prove
                 that our algorithm answers a query in O ((| N ( q, f )|
                 + \sqrt n ) log n ) time with high probability. This
                 enables us to process a node movement in random
                 hyperbolic graphs in sublinear time, resulting in a
                 speedup of about one order of magnitude in practice
                 compared to the fastest previous approach. Apart from
                 that, our query algorithm is also applicable to
                 Euclidean geometry, making it of independent interest
                 for other sampling or probabilistic spreading
                 scenarios.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Mann:2018:CCR,
  author =       "Zolt{\'a}n {\'A}d{\'a}m Mann",
  title =        "Complexity of Coloring Random Graphs: an Experimental
                 Study of the Hardest Region",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "23",
  number =       "??",
  pages =        "1.3:1--1.3:??",
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3183350",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "It is known that the problem of deciding
                 $k$-colorability of a graph exhibits an easy-hard-easy
                 pattern, --- that is, the average-case complexity for
                 backtrack-type algorithms, as a function of $k$, has a
                 peak. This complexity peak is either at $k = \chi - 1$
                 or $k = \chi$, where $\chi$ is the chromatic number of
                 the graph. However, the behavior around the complexity
                 peak is poorly understood. In this article, we use list
                 coloring to model coloring with a fractional number of
                 colors between $\chi - 1$ and $\chi$. We present a
                 comprehensive computational study on the complexity of
                 backtrack-type graph coloring algorithms in this
                 critical range. According to our findings, an
                 easy-hard-easy pattern can be observed on a finer scale
                 between $\chi - 1$ and $\chi$ as well. The highest
                 complexity found this way can be higher than for any
                 integer value of $k$. It turns out that the complexity
                 follows an alternating three-dimensional pattern;
                 understanding this pattern is very important for
                 benchmarking purposes. Our results also answer the
                 previously open question whether coloring with $\chi -
                 1$ or with $\chi$ colors is harder: this depends on the
                 location of the maximal fractional complexity.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Paudel:2018:CCN,
  author =       "Nilakantha Paudel and Loukas Georgiadis and Giuseppe
                 F. Italiano",
  title =        "Computing Critical Nodes in Directed Graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "23",
  number =       "??",
  pages =        "2.2:1--2.2:??",
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3228332",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We consider the critical node detection problem (CNDP)
                 in directed graphs, which can be defined as
                 follows. Given a directed graph $G$ and a parameter
                 $k$, we wish to remove a subset $S$ of at most $k$
                 vertices of G such that the residual graph $G
                 \backslash S$ has minimum pairwise strong
                 connectivity. This problem is NP-hard, and thus we are
                 interested in practical heuristics. In this article, we
                 apply the framework of Georgiadis et al. (SODA 2017)
                 and provide a sophisticated linear-time algorithm for
                 the $k = 1$ case. Based on this algorithm, we provide
                 an efficient heuristic for the general case. Then, we
                 conduct a thorough experimental evaluation of various
                 heuristics for CNDP. Our experimental results suggest
                 that our heuristic performs very well in practice, both
                 in terms of running time and of solution quality.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Polischchuk:2018:EAS,
  author =       "Valentin Polischchuk and Vijaya Ramachandran / Rezaul
                 A. Chowdhury",
  title =        "Editorial: {ALENEX 2017} Special Issue",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "23",
  number =       "??",
  pages =        "2.1:1--2.1:??",
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3239166",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Robinson:2018:AHP,
  author =       "Jeffrey A. Robinson and Susan V. Vrbsky and Xiaoyan
                 Hong and Brian P. Eddy",
  title =        "Analysis of a High-Performance {TSP} Solver on the
                 {GPU}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "23",
  number =       "??",
  pages =        "1.1:1--1.1:??",
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3154835",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Graphical Processing Units have been applied to solve
                 NP-hard problems with no known polynomial time
                 solutions. An example of such a problem is the
                 Traveling Salesman Problem (TSP). The TSP is one of the
                 most commonly studied combinatorial optimization
                 problems and has multiple applications in the areas of
                 engineering, transportation, and logistics. This
                 article presents an improved algorithm for
                 approximating the TSP on fully connected, symmetric
                 graphs by utilizing the GPU. Our approach improves an
                 existing 2-opt hill-climbing algorithm with random
                 restarts by considering multiple updates to the current
                 path found in parallel, and it allows $k$ number of
                 updates per iteration, called $k$-swap. With our
                 $k$-swap modification, we show a speed-up over the
                 existing algorithm of $4.5 \times $ to $22.9 \times $
                 on data sets ranging from 1,400 to 33,810 nodes,
                 respectively.",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Sieranoja:2018:CHD,
  author =       "Sami Sieranoja and Pasi Fr{\"a}nti",
  title =        "Constructing a High-Dimensional $k$ {NN}-Graph Using a
                 {$Z$}-Order Curve",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "23",
  number =       "??",
  pages =        "1.9:1--1.9:??",
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3274656",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Although many fast methods exist for constructing a $k$
                 NN-graph for low-dimensional data, it is still an open
                 question how to do it efficiently for high-dimensional
                 data. We present a new method to construct an
                 approximate $k$ NN-graph for medium- to high-dimensional
                 data. Our method uses one-dimensional mapping with a
                 Z-order curve to construct an initial graph and then
                 continues to improve this using neighborhood
                 propagation. Experiments show that the method is faster
                 than the compared methods with five different benchmark
                 datasets, the dimensionality of which ranges from 14 to
                 784. Compared to a brute-force approach, the method
                 provides a speedup between 12.7:1 and 414.2:1 depending
                 on the dataset. We also show that errors in the
                 approximate $k$ NN-graph originate more likely from
                 outlier points; and, it can be detected during runtime,
                 which points are likely to have errors in their
                 neighbors.",
  acknowledgement = ack-nhfb,
  articleno =    "1.9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Tsirogiannis:2018:CEV,
  author =       "Constantinos Tsirogiannis and Frank Staals and Vincent
                 Pellissier",
  title =        "Computing the Expected Value and Variance of Geometric
                 Measures",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "23",
  number =       "??",
  pages =        "2.4:1--2.4:??",
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3228331",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Let $P$ be a point set in $R^d$, and let $M$ be a function
                 that maps any subset of $P$ to a positive real. We
                 examine the problem of computing the mean and variance
                 of $M$ when a subset in $P$ is selected according to a
                 random distribution. We consider two distributions; in
                 the first distribution (the Bernoulli distribution),
                 each point $p$ in $P$ is included in the random subset
                 independently, with probability $\pi(p)$. In the
                 second distribution (the fixed-size distribution),
                 exactly $s$ points are selected uniformly at random among
                 all possible subsets of $s$ points in $P$. We present
                 efficient algorithms for computing the mean and
                 variance of several geometric measures when point sets
                 are selected under one of the described random
                 distributions. We also implemented four of those
                 algorithms: an algorithm that computes the mean 2D
                 bounding box volume in the Bernoulli distribution, an
                 algorithm for the mean 2D convex hull area in the
                 fixed-size distribution, an algorithm that computes the
                 exact mean and variance of the mean pairwise distance
                 (MPD) for $d$-dimensional point sets in the fixed-size
                 distribution, and an $(1 - \epsilon)$-approximation
                 algorithm for the same measure. We conducted
                 experiments where we compared the performance of our
                 implementations with a standard heuristic approach, and
                 we show that our implementations are very efficient. We
                 also compared the implementation of our exact MPD
                 algorithm and the $(1 - \epsilon)$-approximation
                 algorithm; the approximation method performs faster on
                 real-world datasets for point sets of up to 13
                 dimensions, and provides high-precision
                 approximations.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Vella:2018:DMF,
  author =       "Flavio Vella and Massimo Bernaschi and Giancarlo
                 Carbone",
  title =        "Dynamic Merging of Frontiers for Accelerating the
                 Evaluation of Betweenness Centrality",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "23",
  number =       "??",
  pages =        "1.4:1--1.4:??",
  year =         "2018",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3182656",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Betweenness Centrality (BC) is a widely used metric of
                 the relevance of a node in a network. The fastest-known
                 algorithm for the evaluation of BC on unweighted graphs
                 builds a tree representing information about the
                 shortest paths for each vertex to calculate its
                 contribution to the BC score. Actually, for specific
                 vertices, the shortest-path trees of neighboring nodes
                 could be leveraged to reduce the computational burden,
                 but existing BC algorithms do not exploit that
                 information and carry out redundant computations. We
                 propose a new algorithm, called dynamic merging of
                 frontiers, which makes use of such information to
                 derive the BC score of degree-2 vertices by re-using
                 the results of the sub-trees of the neighbors. We
                 implemented our idea in parallel fashion exploiting
                 Graphics Processing Units. Compared to state-of-the-art
                 implementations, our approach achieves a linear
                 improvement in the number of degree-2 vertices and an
                 average improvement of $ \times $ over a variety of
                 real-world graphs.",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Ahmed:2019:MLS,
  author =       "Reyan Ahmed and Patrizio Angelini and Faryad Darabi
                 Sahneh and Alon Efrat and David Glickenstein and Martin
                 Gronemann and Niklas Heinsohn and Stephen G. Kobourov
                 and Richard Spence and Joseph Watkins and Alexander
                 Wolff",
  title =        "Multi-level {Steiner} Trees",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "2",
  pages =        "2.5:1--2.5:??",
  month =        dec,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3368621",
  ISSN =         "1084-6654",
  bibdate =      "Mon Dec 16 08:07:51 MST 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "In the classical Steiner tree problem, given an
                 undirected, connected graph $ G = (V, E) $ with
                 non-negative edge costs and a set of terminals $ T
                 \subseteq V $, the objective is to find a minimum-cost
                 tree $ E^\prime \subseteq E $ that spans the terminals.
                 The problem is APX-hard; the best-known approximation
                 algorithm has a ratio of $ \rho = \ln (4) + \epsilon <
                 1.39 $. In this article, we study a natural
                 generalization, the multi-level Steiner tree (MLST)
                 problem: Given a nested sequence of terminals $ T_l
                 \subset \ldots {} \subset T_1 \subseteq V $, compute
                 nested trees $ E_l \subseteq \ldots {} \subseteq E_1
                 \subseteq E $ that span the corresponding terminal sets
                 with minimum total cost. The MLST problem and variants
                 thereof have been studied under various names,
                 including Multi-level Network Design,
                 Quality-of-Service Multicast tree, Grade-of-Service
                 Steiner tree, and Multi-tier tree. Several
                 approximation results are known. We first present two
                 simple $ O(l)$-approximation heuristics. Based on
                 these, we introduce a rudimentary composite algorithm
                 that generalizes the above heuristics, and determine
                 its approximation ratio by solving a linear program. We
                 then present a method that guarantees the same
                 approximation ratio using at most 2l Steiner tree
                 computations. We compare these heuristics
                 experimentally on various instances of up to 500
                 vertices using three different network generation
                 models. We also present several integer linear
                 programming formulations for the MLST problem and
                 compare their running times on these instances. To our
                 knowledge, the composite algorithm achieves the best
                 approximation ratio for up to $ l = 100$ levels, which
                 is sufficient for most applications, such as network
                 visualization or designing multi-level
                 infrastructure.",
  acknowledgement = ack-nhfb,
  articleno =    "2.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Altmanova:2019:ETF,
  author =       "Katerina Altmanov{\'a} and Dusan Knop and Martin
                 Kouteck{\'y}",
  title =        "Evaluating and Tuning $n$-fold Integer Programming",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "2",
  pages =        "2.2:1--2.2:??",
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3330137",
  ISSN =         "1084-6654",
  bibdate =      "Mon Dec 16 08:07:51 MST 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3330137",
  abstract =     "In recent years, algorithmic breakthroughs in
                 stringology, computational social choice, scheduling,
                 and so on, were achieved by applying the theory of
                 so-called $n$-fold integer programming. An $n$-fold
                 integer program (IP) has a highly uniform block
                 structured constraint matrix. Hemmecke, Onn, and
                 Romanchuk [Math. Program., 2013] showed an algorithm
                 with runtime $ \Delta^{O (rst + r 2 s)} n^3$, where $
                 \Delta $ is the largest coefficient, $r$, $s$, and $t$
                 are dimensions of blocks of the constraint matrix and
                 $n$ is the total dimension of the IP; thus, an
                 algorithm efficient if the blocks are of small size and
                 with small coefficients. The algorithm works by
                 iteratively improving a feasible solution with
                 augmenting steps, and $n$-fold IPs have the special
                 property that augmenting steps are guaranteed to exist
                 in a not-too-large neighborhood. However, this
                 algorithm has never been implemented and evaluated. We
                 have implemented the algorithm and learned the
                 following along the way. The original algorithm is
                 practically unusable, but we discover a series of
                 improvements that make its evaluation possible.
                 Crucially, we observe that a certain constant in the
                 algorithm can be treated as a tuning parameter, which
                 yields an efficient heuristic (essentially searching in
                 a smaller-than-guaranteed neighborhood). Furthermore,
                 the algorithm uses an overly expensive strategy to find
                 a ``best'' step, while finding only an ``approximately
                 best'' step is much cheaper, yet sufficient for quick
                 convergence. Using this insight, we improve the
                 asymptotic dependence on $n$ from $ n^3$ to $ n^2 \log
                 n$. Finally, we tested the behavior of the algorithm
                 with various values of the tuning parameter and
                 different strategies of finding improving steps. First,
                 we show that decreasing the tuning parameter initially
                 leads to an increased number of iterations needed for
                 convergence and eventually to getting stuck in local
                 optima, as expected. However, surprisingly small values
                 of the parameter already exhibit good behavior while
                 significantly lowering the time the algorithm spends
                 per single iteration. Second, our new strategy for
                 finding ``approximately best'' steps wildly outperforms
                 the original construction.",
  acknowledgement = ack-nhfb,
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Barenboim:2019:FDG,
  author =       "Leonid Barenboim and Tzalik Maimon",
  title =        "Fully Dynamic Graph Algorithms Inspired by Distributed
                 Computing: Deterministic Maximal Matching and Edge
                 Coloring in Sublinear Update-Time",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.14:1--1.14:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3338529",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3338529",
  abstract =     "We study dynamic graphs in the fully dynamic
                 centralized setting. In this setting, the vertex set of
                 size $n$ of a graph $G$ is fixed, and the edge set
                 changes step-by-step, such that each step either adds
                 or removes an edge. Dynamic graphs have various
                 applications in fields such as Communication Networks,
                 Computer Graphics, and VLSI Design. The goal in this
                 setting is maintaining a solution to a certain problem
                 (e.g., maximal matching, edge coloring) after each
                 step, such that each step is executed efficiently. The
                 running time of a step is called update-time. One can
                 think of this setting as a dynamic network that is
                 monitored by a central processor that is responsible
                 for maintaining the solution. Prior to the current
                 work, for several central problems, the best-known
                 deterministic algorithms for general graphs were the
                 naive ones with update-time $ O(n)$. This is the case
                 for maximal matching and proper $
                 O(\Delta)$-edge-coloring. The question of existence of
                 sublinear in $n$ update-time deterministic algorithms
                 for dense graphs and general graphs remained wide open.
                 In this article, we address this question by devising
                 sublinear update-time deterministic algorithms for
                 maximal matching in graphs with bounded neighborhood
                 independence $ o(n / \log^2 n)$, and for proper $
                 O(\Delta)$-edge-coloring in general graphs. The family
                 of graphs with bounded neighborhood independence is a
                 very wide family of dense graphs. In particular, graphs
                 with constant neighborhood independence include
                 line-graphs, claw-free graphs, unit disk graphs, and
                 many other graphs. Thus, these graphs represent very
                 well various types of networks. For graphs with
                 constant neighborhood independence, our
                 maximal-matching algorithm has $ {\~ O}(\sqrt n)$
                 update-time. Our $ O(\Delta)$-edge-coloring algorithms
                 has $ {\~ O}(\sqrt \Delta)$ update-time for general
                 graphs. To obtain our results, we employ a novel
                 approach that adapts certain distributed algorithms of
                 the LOCAL setting to the centralized fully dynamic
                 setting. This is achieved by optimizing the work each
                 processor performs and efficiently simulating a
                 distributed algorithm in a centralized setting. The
                 simulation is efficient, thanks to a careful selection
                 of the network parts that the algorithm is invoked on,
                 and by deducing the solution from the additional
                 information that is present in the centralized setting,
                 but not in the distributed one. Our experiments on
                 various network topologies and scenarios demonstrate
                 that our algorithms are highly efficient in practice.
                 We believe that our approach is of independent interest
                 and may be applicable to additional problems.",
  acknowledgement = ack-nhfb,
  articleno =    "1.14",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Baum:2019:FEC,
  author =       "Moritz Baum and Valentin Buchhold and Julian Dibbelt
                 and Dorothea Wagner",
  title =        "Fast Exact Computation of Isocontours in Road
                 Networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.18:1--1.18:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3355514",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "We study the problem of computing isocontours in
                 static and dynamic road networks, where the objective
                 is to identify the boundary of the region that is
                 reachable from a given source within a certain amount
                 of time (or another limited resource). Although there
                 is a wide range of practical applications for this
                 problem (e.g., urban planning, geomarketing,
                 visualizing the cruising range of a vehicle), there has
                 been little research on fast algorithms for large,
                 realistic inputs, and existing approaches tend to
                 compute more information than necessary. Our
                 contribution is twofold: (1) We propose compact but
                 sufficient definitions of isocontours, based on which
                 (2) we provide several easy-to-parallelize, scalable
                 algorithmic approaches for faster computation. By
                 extensive experimental analysis, we demonstrate that
                 our techniques enable interactive isocontour
                 computation within milliseconds even on continental
                 networks, significantly faster than the state of the
                 art.",
  acknowledgement = ack-nhfb,
  articleno =    "1.18",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Bentert:2019:LAM,
  author =       "Matthias Bentert and Anne-Sophie Himmel and Hendrik
                 Molter and Marco Morik and Rolf Niedermeier and
                 Ren{\'e} Saitenmacher",
  title =        "Listing All Maximal $k$-Plexes in Temporal Graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.13:1--1.13:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3325859",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3325859",
  abstract =     "Many real-world networks evolve over time, that is,
                 new contacts appear and old contacts may disappear.
                 They can be modeled as temporal graphs where
                 interactions between vertices (which represent people
                 in the case of social networks) are represented by
                 timestamped edges. One of the most fundamental problems
                 in (social) network analysis is community detection,
                 and one of the most basic primitives to model a
                 community is a clique. Addressing the problem of
                 finding communities in temporal networks, Viard et al.
                 [TCS 2016] introduced $ \Delta $-cliques as a natural
                 temporal version of cliques. Himmel et al. [SNAM 2017]
                 showed how to adapt the well-known Bron--Kerbosch
                 algorithm to enumerate $ \Delta $-cliques. We continue
                 this work and improve and extend the algorithm of
                 Himmel et al. to enumerate temporal $k$-plexes
                 (notably, cliques are the special case $ k = 1$). We
                 define a $ \Delta - k$-plex as a set of vertices and a
                 time interval, where during this time interval each
                 vertex has in each consecutive $ \Delta + 1$ timesteps
                 at least one edge to all but at most $ k - 1$ vertices
                 in the chosen set of vertices. We develop a recursive
                 algorithm for enumerating all maximal $ \Delta -
                 k$-plexes and perform experiments on real-world social
                 networks that demonstrate the practical feasibility of
                 our approach. In particular, for the special case of $
                 \Delta - 1$-plexes (i.e., $ \Delta $-cliques), we
                 observe that our algorithm is on average significantly
                 faster than the previous algorithms by Himmel et al.
                 [SNAM 2017] and Viard et al. [IPL 2018] for enumerating
                 $ \Delta $-cliques.",
  acknowledgement = ack-nhfb,
  articleno =    "1.13",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Beyer:2019:SST,
  author =       "Stephan Beyer and Markus Chimani",
  title =        "Strong {Steiner} Tree Approximations in Practice",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.7:1--1.7:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3299903",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3299903",
  abstract =     "In this experimental study, we consider Steiner tree
                 approximation algorithms that guarantee a constant
                 approximation ratio smaller than 2. The considered
                 greedy algorithms and approaches based on linear
                 programming involve the incorporation of $k$-restricted
                 full components for some $ k \geq 3$. For most of the
                 algorithms, their strongest theoretical approximation
                 bounds are only achieved for $ k \to \infty $. However,
                 the running time is also exponentially dependent on
                 $k$, so only small $k$ are tractable in practice. We
                 investigate different implementation aspects and
                 parameter choices that finally allow us to construct
                 algorithms (somewhat) feasible for practical use. We
                 compare the algorithms against each other, to an exact
                 algorithm based on integer linear programs, and to fast
                 and simple 2-approximations as well as state-of-the-art
                 heuristics.",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Bjorklund:2019:FHF,
  author =       "Andreas Bj{\"o}rklund and Brajesh Gupt and Nicol{\'a}s
                 Quesada",
  title =        "A Faster {Hafnian} Formula for Complex Matrices and
                 Its Benchmarking on a Supercomputer",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.11:1--1.11:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3325111",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib;
                 https://www.math.utah.edu/pub/tex/bib/super.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3325111",
  abstract =     "We introduce new and simple algorithms for the
                 calculation of the number of perfect matchings of
                 complex weighted, undirected graphs with and without
                 loops. Our compact formulas for the hafnian and loop
                 hafnian of $ n \times n $ complex matrices run in $
                 O(n^3 2^{n / 2}) $ time, are embarrassingly
                 parallelizable and, to the best of our knowledge, are
                 the fastest exact algorithms to compute these
                 quantities. Despite our highly optimized algorithm,
                 numerical benchmarks on the Titan supercomputer with
                 matrices up to size $ 56 \times 56 $ indicate that one
                 would require the 288,000 CPUs of this machine for
                 about 6 weeks to compute the hafnian of a $ 100 \times
                 100 $ matrix.",
  acknowledgement = ack-nhfb,
  articleno =    "1.11",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Borassi:2019:KAA,
  author =       "Michele Borassi and Emanuele Natale",
  title =        "{KADABRA} is an {ADaptive Algorithm for Betweenness
                 via Random Approximation}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.2:1--1.2:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3284359",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3284359",
  abstract =     "We present KADABRA, a new algorithm to approximate
                 betweenness centrality in directed and undirected
                 graphs, which significantly outperforms all previous
                 approaches on real-world complex networks. The
                 efficiency of the new algorithm relies on two new
                 theoretical contributions, of independent interest. The
                 first contribution focuses on sampling shortest paths,
                 a subroutine used by most algorithms that approximate
                 betweenness centrality. We show that, on realistic
                 random graph models, we can perform this task in time $
                 |E|^{1 / 2 + o (1)} $ with high probability, obtaining
                 a significant speedup with respect to the $ \Theta
                 (|E|) $ worst-case performance. We experimentally show
                 that this new technique achieves similar speedups on
                 real-world complex networks, as well. The second
                 contribution is a new rigorous application of the
                 adaptive sampling technique. This approach decreases
                 the total number of shortest paths that need to be
                 sampled to compute all betweenness centralities with a
                 given absolute error, and it also handles more general
                 problems, such as computing the $k$ most central nodes.
                 Furthermore, our analysis is general, and it might be
                 extended to other settings.",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Buchhold:2019:RTT,
  author =       "Valentin Buchhold and Peter Sanders and Dorothea
                 Wagner",
  title =        "Real-time Traffic Assignment Using Engineered
                 Customizable Contraction Hierarchies",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "2",
  pages =        "2.4:1--2.4:??",
  month =        dec,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3362693",
  ISSN =         "1084-6654",
  bibdate =      "Mon Dec 16 08:07:51 MST 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Given an urban road network and a set of
                 origin-destination pairs, the traffic assignment
                 problem asks for the traffic flow on each road segment.
                 Common solution algorithms require a large number of
                 shortest-path computations. In this article, we
                 significantly accelerate the computation of flow
                 patterns, enabling interactive transportation and urban
                 planning applications. We achieve this by building a
                 traffic assignment procedure upon customizable
                 contraction hierarchies (CCH), revisiting and carefully
                 engineering CCH customization and queries, and adapting
                 CCH to compute batched point-to-point shortest paths.
                 Although motivated by the traffic assignment problem,
                 our optimizations apply to CCH in general. In contrast
                 to previous work, our evaluation uses real-world
                 production data for all parts of the input. On a
                 metropolitan area encompassing about 2.7 million
                 inhabitants, we decrease the flow-pattern computation
                 for a typical 1-hour morning peak (a quarter million
                 trips) from 90.9 to 14.1 seconds on one core and 2.4
                 seconds on a 16-core machine. This represents a speedup
                 of 37 over the state of the art and more than three
                 orders of magnitude over the Dijkstra-based baseline.",
  acknowledgement = ack-nhfb,
  articleno =    "2.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Cantone:2019:LES,
  author =       "Domenico Cantone and Simone Faro and Arianna Pavone",
  title =        "Linear and Efficient String Matching Algorithms Based
                 on Weak Factor Recognition",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.8:1--1.8:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3301295",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/hash.bib;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3301295",
  abstract =     "We present a simple and very efficient algorithm for
                 string matching based on the combination of weak factor
                 recognition and hashing. Despite its quadratic
                 worst-case running time, our algorithm exhibits a
                 sublinear behaviour. We also propose some practical
                 improvements of our algorithm and a variant with a
                 linear worst-case time complexity. Experimental results
                 show that, in most cases, some of the variants of our
                 algorithm obtain the best running times when compared,
                 under various conditions, against the most effective
                 algorithms present in the literature. For instance, in
                 the case of small alphabets and long patterns, the gain
                 in running time is up to 18\%. This makes our proposed
                 algorithm one of the most flexible solutions in
                 practical cases.",
  acknowledgement = ack-nhfb,
  articleno =    "1.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Cazals:2019:CTC,
  author =       "F. Cazals and D. Mazauric and R. Tetley and R.
                 Watrigant",
  title =        "Comparing Two Clusterings Using Matchings between
                 Clusters of Clusters",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.17:1--1.17:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3345951",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "Clustering is a fundamental problem in data science,
                 yet the variety of clustering methods and their
                 sensitivity to parameters make clustering hard. To
                 analyze the stability of a given clustering algorithm
                 while varying its parameters, and to compare clusters
                 yielded by different algorithms, several comparison
                 schemes based on matchings, information theory, and
                 various indices (Rand, Jaccard) have been developed. We
                 go beyond these by providing a novel class of methods
                 computing meta-clusters within each clustering-a
                 meta-cluster is a group of clusters, together with a
                 matching between these. Let the intersection graph of
                 two clusterings be the edge-weighted bipartite graph in
                 which the nodes represent the clusters, the edges
                 represent the nonempty intersection between two
                 clusters, and the weight of an edge is the number of
                 common items. We introduce the so-called
                 $D$-family-matching problem on intersection graphs,
                 with $D$ the upper bound on the diameter of the graph
                 induced by the clusters of any meta-cluster. First we
                 prove NP -completeness and APX -hardness results, and
                 unbounded approximation ratio of simple strategies.
                 Second, we design exact polynomial time dynamic
                 programming algorithms for some classes of graphs (in
                 particular trees). Then we prove spanning tree-based
                 efficient heuristic algorithms for general graphs. Our
                 experiments illustrate the role of $D$ as a scale
                 parameter providing information on the relationship
                 between clusters within a clustering and in-between two
                 clusterings. They also show the advantages of our
                 built-in mapping over classical cluster comparison
                 measures such as the variation of information.",
  acknowledgement = ack-nhfb,
  articleno =    "1.17",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Chimani:2019:EAM,
  author =       "Markus Chimani and Ivo Hedtke and Tilo Wiedera",
  title =        "Exact Algorithms for the Maximum Planar Subgraph
                 Problem: New Models and Experiments",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "2.1:1--2.1:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3320344",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3320344",
  abstract =     "Given a graph $G$, the NP-hard Maximum Planar Subgraph
                 problem asks for a planar subgraph of $G$ with the
                 maximum number of edges. The only known non-trivial
                 exact algorithm utilizes Kuratowski's famous planarity
                 criterion and can be formulated as an integer linear
                 program (ILP) or a pseudo-Boolean satisfiability
                 problem (PBS). We examine three alternative
                 characterizations of planarity regarding their
                 applicability to model maximum planar subgraphs. For
                 each, we consider both ILP and PBS variants,
                 investigate diverse formulation aspects, and evaluate
                 their practical performance.",
  acknowledgement = ack-nhfb,
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{DAngelo:2019:FDH,
  author =       "Gianlorenzo D'Angelo and Mattia D'Emidio and Daniele
                 Frigioni",
  title =        "Fully Dynamic $2$-Hop Cover Labeling",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.6:1--1.6:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3299901",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3299901",
  abstract =     "The 2-hop Cover labeling of a graph is currently the
                 best data structure for answering shortest-path
                 distance queries on large-scale networks, since it
                 combines low query times, affordable space occupancy,
                 and reasonable preprocessing effort. Its main limit
                 resides in not being suited for dynamic networks since,
                 after a network change, (1) queries on the distance can
                 return incorrect values and (2) recomputing the
                 labeling from scratch yields unsustainable time
                 overhead. In this article, we overcome this limit by
                 introducing the first decremental algorithm able to
                 update 2-hop Cover labelings under node/edge removals
                 and edge weight increases. We prove the new algorithm
                 to be (1) correct, i.e., after each update operation
                 queries on the updated labeling return exact values;
                 (2) efficient with respect to the number of nodes that
                 change their distance as a consequence of a graph
                 update; and (3) able to preserve the minimality of the
                 labeling, a desirable property that impacts on both
                 query time and space occupancy. Furthermore, we provide
                 an extensive experimental study to demonstrate the
                 effectiveness of the new method. We consider it both
                 alone and in combination with the unique known
                 incremental approach (Akiba et al. 2014), thus
                 obtaining the first fully dynamic algorithm for
                 updating 2-hop Cover labelings under general graph
                 updates. Our experiments show that the new dynamic
                 algorithms are orders of magnitude faster than the
                 from-scratch approach while at the same time being able
                 to preserve the quality of the labeling in terms of
                 query time and space occupancy, thus allowing one to
                 employ the 2-hop Cover labeling approach in dynamic
                 networks with practical performance.",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{DAngelo:2019:ISI,
  author =       "Gianlorenzo D'Angelo",
  title =        "Introduction to the Special Issue {SEA 2018}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "2",
  pages =        "2.1:1--2.1:??",
  month =        dec,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3365652",
  ISSN =         "1084-6654",
  bibdate =      "Mon Dec 16 08:07:51 MST 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  acknowledgement = ack-nhfb,
  articleno =    "2.1e",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Dey:2019:SET,
  author =       "Tamal K. Dey and Dayu Shi and Yusu Wang",
  title =        "{SimBa}: an Efficient Tool for Approximating
                 Rips-filtration Persistence via Simplicial Batch
                 Collapse",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.5:1--1.5:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3284360",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3284360",
  abstract =     "In topological data analysis, a point cloud data $P$
                 extracted from a metric space is often analyzed by
                 computing the persistence diagram or barcodes of a
                 sequence of Rips complexes built on $P$ indexed by a
                 scale parameter. Unfortunately, even for input of
                 moderate size, the size of the Rips complex may become
                 prohibitively large as the scale parameter increases.
                 Starting with the Sparse Rips filtration introduced by
                 Sheehy, some existing methods aim to reduce the size of
                 the complex to improve time efficiency as well.
                 However, as we demonstrate, existing approaches still
                 fall short of scaling well, especially for
                 high-dimensional data. In this article, we investigate
                 the advantages and limitations of existing approaches.
                 Based on insights gained from the experiments, we
                 propose an efficient new algorithm, called SimBa, for
                 approximating the persistent homology of Rips
                 filtrations with quality guarantees. Our new algorithm
                 leverages a batch-collapse strategy as well as a new
                 Sparse Rips-like filtration. We experiment on a variety
                 of low- and high-dimensional datasets. We show that our
                 strategy presents a significant size reduction and that
                 our algorithm for approximating Rips filtration
                 persistence is an order of magnitude faster than
                 existing methods in practice.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Edelkamp:2019:BAB,
  author =       "Stefan Edelkamp and Armin Wei{\ss}",
  title =        "{BlockQuicksort}: Avoiding Branch Mispredictions in
                 {Quicksort}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.4:1--1.4:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3274660",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3274660",
  abstract =     "It is well known that Quicksort -- which is commonly
                 considered as one of the fastest in-place sorting
                 algorithms --- suffers in an essential way from branch
                 mispredictions. We present a novel approach to
                 addressing this problem by partially decoupling control
                 from dataflow: in order to perform the partitioning, we
                 split the input into blocks of constant size. Then, all
                 elements in one block are compared with the pivot and
                 the outcomes of the comparisons are stored in a buffer.
                 In a second pass, the respective elements are
                 rearranged. By doing so, we avoid conditional branches
                 based on outcomes of comparisons (except for the final
                 Insertionsort). Moreover, we prove that when sorting
                 $n$ elements, the average total number of branch
                 mispredictions is at most $ \epsilon n l o g n + O (n)$
                 for some small $ \epsilon $ depending on the block
                 size. Our experimental results are promising: when
                 sorting random-integer data, we achieve an increase in
                 speed (number of elements sorted per second) of more
                 than 80\% over the GCC implementation of Quicksort (C++
                 std::sort). Also, for many other types of data and
                 non-random inputs, there is still a significant speedup
                 over std::sort. Only in a few special cases, such as
                 sorted or almost sorted inputs, can std::sort beat our
                 implementation. Moreover, on random-input permutations,
                 our implementation is even slightly faster than an
                 implementation of the highly tuned Super Scalar Sample
                 Sort, which uses a linear amount of additional space.
                 Finally, we also apply our approach to Quickselect and
                 obtain a speed-up of more than 100\% over the GCC
                 implementation (C++ std::nth_element).",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Hespe:2019:SKM,
  author =       "Demian Hespe and Christian Schulz and Darren Strash",
  title =        "Scalable Kernelization for Maximum Independent Sets",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.16:1--1.16:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3355502",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3355502",
  abstract =     "The most efficient algorithms for finding maximum
                 independent sets in both theory and practice use
                 reduction rules to obtain a much smaller problem
                 instance called a kernel. The kernel can then be solved
                 quickly using exact or heuristic algorithms-or by
                 repeatedly kernelizing recursively in the
                 branch-and-reduce paradigm. Current algorithms are
                 either slow but produce a small kernel or fast and give
                 a large kernel. Yet it is of critical importance for
                 these algorithms that kernelization is fast and returns
                 a small kernel. We attempt to accomplish both of these
                 goals simultaneously by giving an efficient parallel
                 kernelization algorithm based on graph partitioning and
                 parallel bipartite maximum matching. We combine our
                 parallelization techniques with two techniques to
                 accelerate kernelization further: dependency checking
                 that prunes reductions that cannot be applied, and
                 reduction tracking that allows us to stop kernelization
                 when reductions become less fruitful. Our algorithm
                 produces kernels that are orders of magnitude smaller
                 than the fastest kernelization methods while having a
                 similar execution time. Furthermore, our algorithm is
                 able to compute kernels with size comparable to the
                 smallest known kernels but up to two orders of
                 magnitude faster than possible previously. Finally, we
                 show that our kernelization algorithm can be used to
                 accelerate existing state-of-the-art heuristic
                 algorithms, allowing us to find larger independent sets
                 faster on large real-world networks and synthetic
                 instances.",
  acknowledgement = ack-nhfb,
  articleno =    "1.16",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Heuer:2019:NFB,
  author =       "Tobias Heuer and Peter Sanders and Sebastian Schlag",
  title =        "Network Flow-Based Refinement for Multilevel
                 Hypergraph Partitioning",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "2.3:1--2.3:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3329872",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3329872",
  abstract =     "We present a refinement framework for multilevel
                 hypergraph partitioning that uses max-flow computations
                 on pairs of blocks to improve the solution quality of a
                 $k$-way partition. The framework generalizes the
                 flow-based improvement algorithm of the Karlsruhe Fast
                 Flow Partitioner (KaFFPa) from graphs to hypergraphs
                 and is integrated into the hypergraph partitioner
                 Karlsruhe Hypergraph Partitioning (KaHyPar). By
                 reducing the size of hypergraph flow networks,
                 improving the flow model used in KaFFPa, and developing
                 techniques to improve the running time of our
                 algorithm, we obtain a partitioner that computes the
                 best solutions for a wide range of benchmark
                 hypergraphs from different application areas for both
                 the connectivity and the cut-net metric while still
                 having a running time comparable to that of hMetis. In
                 the case of graph partitioning, our algorithm compares
                 favorably with KaFFPa, even after enhancing the latter
                 with our improved flow network, and at the same time is
                 more than a factor of two faster. Finally, we show that
                 our algorithm improves the performance of the memetic
                 multilevel hypergraph partitioner KaHyPar-E.",
  acknowledgement = ack-nhfb,
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Karkkainen:2019:BEM,
  author =       "Juha K{\"a}rkk{\"a}inen and Dominik Kempa",
  title =        "Better External Memory {LCP} Array Construction",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.3:1--1.3:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3297723",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3297723",
  abstract =     "The suffix array, perhaps the most important data
                 structure in modern string processing, needs to be
                 augmented with the longest-common-prefix (LCP) array in
                 many applications. Their construction is often a major
                 bottleneck, especially when the data is too big for
                 internal memory. We describe two new algorithms for
                 computing the LCP array from the suffix array in
                 external memory. Experiments demonstrate that the new
                 algorithms are about a factor of two faster than the
                 fastest previous algorithm. We then further engineer
                 the two new algorithms and improve them in three ways.
                 First, we speed up the algorithms by up to a factor of
                 two through parallelism. Eight threads is sufficient
                 for making the algorithms essentially I/O bound.
                 Second, we reduce the disk space usage of the
                 algorithms making them in-place: the input (text and
                 suffix array) is treated as read-only, and the working
                 disk space never exceeds the size of the final output
                 (the LCP array). Third, we add support for large
                 alphabets. All previous implementations assume the byte
                 alphabet.",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Korhonen:2019:SGP,
  author =       "Tuukka Korhonen and Jeremias Berg and Matti
                 J{\"a}rvisalo",
  title =        "Solving Graph Problems via Potential Maximal Cliques:
                 an Experimental Evaluation of the
                 {Bouchitt{\'e}--Todinca} Algorithm",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.9:1--1.9:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3301297",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3301297",
  abstract =     "The BT algorithm of Bouchitt{\'e} and Todinca based on
                 enumerating potential maximal cliques, originally
                 proposed for the treewidth and minimum fill-in
                 problems, yields improved exact exponential-time
                 algorithms for various graph optimization problems
                 related to optimal triangulations. While the BT
                 algorithm has received significant attention in terms
                 of theoretical analysis, less attention has been paid
                 on engineering efficient implementations of the
                 algorithm for different problems and thereby on
                 empirical studies on its effectiveness in practice. In
                 this work, we provide an experimental evaluation of an
                 implementation of the BT algorithm, based on our
                 second-place winning entry in the 2nd Parameterized
                 Algorithms and Computational Experiments Challenge
                 (PACE 2017), extended to several related graph
                 problems: treewidth, minimum fill-in, generalized and
                 fractional hypertreewidth, and the total table size
                 problem. Instead of focusing on problem-specific
                 optimization of BT for a particular problem, our focus
                 in this work is on studying the applicability of BT
                 more generally to a range of problems. Based on the
                 results, we conclude that an efficient implementation
                 of the BT algorithm yields an empirically competitive
                 approach to each of the considered problems when
                 compared to available implementations of alternative
                 problem-specific algorithmic approaches.",
  acknowledgement = ack-nhfb,
  articleno =    "1.9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Levin:2019:ACR,
  author =       "Harry A. Levin and Sorelle A. Friedler",
  title =        "Automated {Congressional} Redistricting",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.10:1--1.10:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3316513",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3316513",
  abstract =     "Every 10 years, when states are forced to redraw their
                 congressional districts, the process is intensely
                 partisan, and the outcome is rarely fair and
                 democratic. In the past few decades, the growing
                 capabilities of computers have offered the promise of
                 objective, computerized redistricting. Unfortunately,
                 the redistricting problem can be shown to be
                 NP-Complete, but there are a number of heuristics that
                 are effective. We specifically define the redistricting
                 problem and analyze several variations of a new divide
                 and conquer algorithm, comparing the compactness and
                 population deviation of our new algorithm to existing
                 algorithms and the actual districts. We offer a
                 comparative component-based analysis that demonstrates
                 the strengths and weaknesses of each algorithm
                 component and the type of input. The comparative
                 analysis shows that there are several ways to produce
                 valid redistricting plans, but each approach has
                 benefits and consequences. Our new algorithm produces
                 valid results to the redistricting problem in almost
                 every state that undergoes congressional redistricting,
                 offering a new solution to this challenging real-world
                 problem. In one version, the algorithm produces plans
                 with the optimal population deviation in 42 out of 43
                 multi-district states, which is better than most
                 algorithms in the literature. While compactness scores
                 vary, this approach offers new opportunities to improve
                 population deviation. Our output files comply with the
                 accepted format at most government hearings and
                 redistricting competitions, so the results would be
                 compatible with most public participation efforts in
                 2020.",
  acknowledgement = ack-nhfb,
  articleno =    "1.10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Nadara:2019:EEA,
  author =       "Wojciech Nadara and Marcin Pilipczuk and Roman
                 Rabinovich and Felix Reidl and Sebastian Siebertz",
  title =        "Empirical Evaluation of Approximation Algorithms for
                 Generalized Graph Coloring and Uniform Quasi-wideness",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "2",
  pages =        "2.6:1--2.6:??",
  month =        dec,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3368630",
  ISSN =         "1084-6654",
  bibdate =      "Mon Dec 16 08:07:51 MST 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The notions of bounded expansion and nowhere denseness
                 not only offer robust and general definitions of
                 uniform sparseness of graphs, they also describe the
                 tractability boundary for several important algorithmic
                 questions. In this article, we study two structural
                 properties of these graph classes that are of
                 particular importance in this context: the property of
                 having bounded generalized coloring numbers and the
                 property of being uniformly quasi-wide. We provide
                 experimental evaluations of several algorithms that
                 approximate these parameters on real-world graphs. On
                 the theoretical side, we provide a new algorithm for
                 uniform quasi-wideness with polynomial size guarantees
                 in graph classes of bounded expansion and show a lower
                 bound indicating that the guarantees of this algorithm
                 are close to optimal in graph classes with fixed
                 excluded minor.",
  acknowledgement = ack-nhfb,
  articleno =    "2.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Radermacher:2019:GHR,
  author =       "Marcel Radermacher and Klara Reichard and Ignaz Rutter
                 and Dorothea Wagner",
  title =        "Geometric Heuristics for Rectilinear Crossing
                 Minimization",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.12:1--1.12:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3325861",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3325861",
  abstract =     "In this article, we consider the rectilinear crossing
                 minimization problem, i.e., we seek a straight-line
                 drawing $ \Gamma $ of a graph $ G = (V, E) $ with a
                 small number of edge crossings. Crossing minimization
                 is an active field of research [1, 10]. While there is
                 a lot of work on heuristics for topological drawings,
                 these techniques are typically not transferable to the
                 rectilinear (i.e., straight-line) setting. We introduce
                 and evaluate three heuristics for rectilinear crossing
                 minimization. The approaches are based on the primitive
                 operation of moving a single vertex to its
                 crossing-minimal position in the current drawing $
                 \Gamma $, for which we give an $ O((k n + m)^2 \log (k
                 n + m))$-time algorithm, where $k$ is the degree of the
                 vertex and $n$ and $m$ are the number of vertices and
                 edges of the graph, respectively. In an experimental
                 evaluation, we demonstrate that our algorithms compute
                 straight-line drawings with fewer crossings than
                 energy-based algorithms implemented in the Open Graph
                 Drawing Framework [11] on a varied set of benchmark
                 instances. Additionally, we show that the difference of
                 the number of crossings of topological drawings
                 computed with the edge insertion approach [10, 13] and
                 the number of crossings in straight-line drawings
                 computed by our heuristic is relatively small. All
                 experiments are evaluated with a statistical
                 significance level of $ \alpha = 0.05$.",
  acknowledgement = ack-nhfb,
  articleno =    "1.12",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Stoichev:2019:NEH,
  author =       "Stoicho D. Stoichev",
  title =        "New Exact and Heuristic Algorithms for Graph
                 Automorphism Group and Graph Isomorphism",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.15:1--1.15:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3333250",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3333250",
  abstract =     "We describe five new algorithms, named Vsep. Four of
                 them are for the graph automorphism group and the fifth
                 one is for finding an isomorphism between two graphs.
                 All nonequivalent terminal nodes-discrete partitions of
                 the search tree are stored. This is the main difference
                 of the exact version with the known algorithms for
                 graph automorphism group. A new strategy is used in the
                 exact algorithm: if during its execution the computed
                 stabilizer orbits and order get wrong values, then the
                 algorithm continues from a new starting point losing
                 some of the results determined so far. The new starting
                 point is such that the results are correct. The
                 proposed algorithms have been tested on well-known
                 benchmark graphs and compared with three of the most
                 competitive known tools. The results show that for some
                 graph families the exact Vsep algorithm outperforms
                 these algorithms, and vice-versa for some of the
                 others. None of the tested algorithms outperform others
                 in all cases. The heuristic versions use reduced search
                 trees. They are almost exact and are faster than the
                 exact one with very rare exceptions. They are applied
                 mainly for regular graphs. The heuristic algorithms are
                 a new choice for the user. The experiments show that
                 the running times of Vsep algorithms have a slight
                 dependence on vertex labeling.",
  acknowledgement = ack-nhfb,
  articleno =    "1.15",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Zaroliagis:2019:EES,
  author =       "Christos Zaroliagis",
  title =        "Editorial --- {ESA} 2016 Special Issue",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "1",
  pages =        "1.1:1--1.1:??",
  month =        oct,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3298788",
  ISSN =         "1084-6654",
  bibdate =      "Tue Oct 22 07:25:57 MDT 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/ft_gateway.cfm?id=3298788",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Ziobro:2019:FHC,
  author =       "Micha{\l} Ziobro and Marcin Pilipczuk",
  title =        "Finding {Hamiltonian} Cycle in Graphs of Bounded
                 Treewidth: Experimental Evaluation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "24",
  number =       "2",
  pages =        "2.7:1--2.7:18",
  month =        dec,
  year =         "2019",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3368631",
  ISSN =         "1084-6654",
  bibdate =      "Mon Dec 16 08:07:51 MST 2019",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  abstract =     "The notion of treewidth, introduced by Robertson and
                 Seymour in their seminal Graph Minors series, turned
                 out to have tremendous impact on graph algorithmics.
                 Many hard computational problems on graphs turn out to
                 be efficiently solvable in graphs of bounded treewidth:
                 graphs that can be sweeped with separators of bounded
                 size. These efficient algorithms usually follow the
                 dynamic programming paradigm. In recent years, we have
                 seen a rapid and quite unexpected development of
                 involved techniques for solving various computational
                 problems in graphs of bounded treewidth. One of the
                 most surprising directions is the development of
                 algorithms for connectivity problems that have only
                 single-exponential dependency (i.e., $ 2^{O(t)}$) on
                 the treewidth in the running time bound, as opposed to
                 slightly superexponential (i.e., 2$^{O(t \log t)}$)
                 stemming from more naive approaches. In this work, we
                 perform a thorough experimental evaluation of these
                 approaches in the context of one of the most classic
                 connectivity problems, namely, HAMILTONIAN CYCLE.",
  acknowledgement = ack-nhfb,
  articleno =    "2.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J430",
}

@Article{Mitzenmacher:2020:ACF,
  author =       "Michael Mitzenmacher and Salvatore Pontarelli and
                 Pedro Reviriego",
  title =        "Adaptive Cuckoo Filters",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "25",
  number =       "1",
  pages =        "1--20",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3339504",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sun Apr 12 12:09:34 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3339504",
  abstract =     "We introduce the adaptive cuckoo filter (ACF), a data
                 structure for approximate set membership that extends
                 cuckoo filters by reacting to false positives, removing
                 them for future queries. As an example application, in
                 packet processing queries may \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Andersen:2020:GFE,
  author =       "Jakob L. Andersen and Daniel Merkle",
  title =        "A Generic Framework for Engineering Graph Canonization
                 Algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "25",
  number =       "1",
  pages =        "1--26",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3356020",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sun Apr 12 12:09:34 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3356020",
  abstract =     "The state-of-the-art tools for practical graph
                 canonization are all based on the
                 individualization-refinement paradigm, and their
                 difference is primarily in the choice of heuristics
                 they include and in the actual tool implementation. It
                 is thus not \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Blasius:2020:HEN,
  author =       "Thomas Bl{\"a}sius and Tobias Friedrich and Maximilian
                 Katzmann and Anton Krohmer",
  title =        "Hyperbolic Embeddings for Near-Optimal Greedy
                 Routing",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "25",
  number =       "1",
  pages =        "1--18",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3381751",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sun Apr 12 12:09:34 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3381751",
  abstract =     "Greedy routing computes paths between nodes in a
                 network by successively moving to the neighbor closest
                 to the target with respect to coordinates given by an
                 embedding into some metric space. Its advantage is that
                 only local information is used for \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Borodin:2020:ESA,
  author =       "Allan Borodin and Christodoulos Karavasilis and Denis
                 Pankratov",
  title =        "An Experimental Study of Algorithms for Online
                 Bipartite Matching",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "25",
  number =       "1",
  pages =        "1--37",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3379552",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sun Apr 12 12:09:34 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3379552",
  abstract =     "We perform an experimental study of algorithms for
                 online bipartite matching under the known i.i.d input
                 model with integral types. In the last decade, there
                 has been substantial effort in designing complex
                 algorithms to improve worst-case approximation
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Graf:2020:XFF,
  author =       "Thomas Mueller Graf and Daniel Lemire",
  title =        "Xor Filters: Faster and Smaller Than {Bloom} and
                 Cuckoo Filters",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "25",
  number =       "1",
  pages =        "1--16",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3376122",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sun Apr 12 12:09:34 MDT 2020",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/abs/10.1145/3376122",
  abstract =     "The Bloom filter provides fast approximate set
                 membership while using little memory. Engineers often
                 use these filters to avoid slow operations such as disk
                 or network accesses. As an alternative, a cuckoo filter
                 may need less space than a Bloom filter and it is
                 faster. Chazelle et al. proposed a generalization of
                 the Bloom filter called the Bloomier filter.
                 Dietzfelbinger and Pagh described a variation on the
                 Bloomier filter that can answer approximate membership
                 queries over immutable sets. It has never been tested
                 empirically, to our knowledge. We review an efficient
                 implementation of their approach, which we call the xor
                 filter. We find that xor filters can be faster than
                 Bloom and cuckoo filters while using less memory. We
                 further show that a more compact version of xor filters
                 (xor+) can use even less space than highly compact
                 alternatives (e.g., Golomb-compressed sequences) while
                 providing speeds competitive with Bloom filters.",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Escamocher:2020:GDC,
  author =       "Guillaume Escamocher and Barry O'Sullivan and Steven
                 David Prestwich",
  title =        "Generating Difficult {CNF} Instances in Unexplored
                 Constrainedness Regions",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "25",
  number =       "1",
  pages =        "1--12",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3385651",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:13:21 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3385651",
  abstract =     "When creating benchmarks for satisfiability (SAT)
                 solvers, we need Conjunctive Normal Form (CNF)
                 instances that are easy to build but hard to solve. A
                 recent development in the search for such methods has
                 led to the Balanced SAT algorithm, which can \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Jensen:2020:SIS,
  author =       "Alathea Jensen and Isabel Beichl",
  title =        "A Sequential Importance Sampling Algorithm for
                 Counting Linear Extensions",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "25",
  number =       "1",
  pages =        "1--14",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3385650",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:13:21 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3385650",
  abstract =     "In recent decades, a number of profound theorems
                 concerning approximation of hard counting problems have
                 appeared. These include estimation of the permanent,
                 estimating the volume of a convex polyhedron, and
                 counting (approximately) the number of linear
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Charalampopoulos:2020:PSA,
  author =       "Panagiotis Charalampopoulos and Costas S. Iliopoulos
                 and Chang Liu and Solon P. Pissis",
  title =        "Property Suffix Array with Applications in Indexing
                 Weighted Sequences",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "25",
  number =       "1",
  pages =        "1--16",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3385898",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:13:21 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3385898",
  abstract =     "The suffix array is one of the most prevalent data
                 structures for string indexing; it stores the
                 lexicographically sorted list of suffixes of a given
                 string. Its practical advantage compared to the suffix
                 tree is space efficiency. In Property Indexing,
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Henzinger:2020:IBL,
  author =       "Alexandra Henzinger and Alexander Noe and Christian
                 Schulz",
  title =        "{ILP}-Based Local Search for Graph Partitioning",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "25",
  number =       "1",
  pages =        "1--26",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3398634",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:13:21 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3398634",
  abstract =     "Computing high-quality graph partitions is a
                 challenging problem with numerous applications. In this
                 article, we present a novel meta-heuristic for the
                 balanced graph partitioning problem. Our approach is
                 based on integer linear programs that solve the
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Gonzalez:2020:WEA,
  author =       "Miguel Coviello Gonzalez and Marek Chrobak",
  title =        "A Waste-Efficient Algorithm for Single-Droplet Sample
                 Preparation on Microfluidic Chips",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "25",
  number =       "1",
  pages =        "1--22",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3408297",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:13:21 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3408297",
  abstract =     "We address the problem of designing micro-fluidic
                 chips for sample preparation, which is a crucial step
                 in many experimental processes in chemical and
                 biological sciences. One of the objectives of sample
                 preparation is to dilute the sample fluid, called
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Kirchbach:2020:BPM,
  author =       "Konrad Von Kirchbach and Christian Schulz and Jesper
                 Larsson Tr{\"a}ff",
  title =        "Better Process Mapping and Sparse Quadratic
                 Assignment",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "25",
  number =       "1",
  pages =        "1--19",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3409667",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:13:21 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3409667",
  abstract =     "Communication and topology-aware process mapping is a
                 powerful approach to reduce communication time in
                 parallel applications with known communication patterns
                 on large, distributed memory systems. We address the
                 problem as a quadratic assignment \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.11",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Stones:2020:CAG,
  author =       "Rebecca J. Stones and Ra{\'u}l M. Falc{\'o}n and
                 Daniel Kotlar and Trent G. Marbach",
  title =        "Computing Autotopism Groups of Partial {Latin}
                 Rectangles",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "25",
  number =       "1",
  pages =        "1--39",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3412324",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:13:21 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3412324",
  abstract =     "Computing the autotopism group of a partial Latin
                 rectangle (PLR) can be performed in multiple ways. This
                 study has two aims: comparing some of these methods
                 experimentally to identify those that are competitive;
                 and identifying design goals for \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.12",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Kanda:2020:DPD,
  author =       "Shunsuke Kanda and Dominik K{\"o}ppl and Yasuo Tabei
                 and Kazuhiro Morita and Masao Fuketa",
  title =        "Dynamic Path-decomposed Tries",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "25",
  number =       "1",
  pages =        "1--28",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3418033",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:13:21 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3418033",
  abstract =     "A keyword dictionary is an associative array whose
                 keys are strings. Recent applications handling massive
                 keyword dictionaries in main memory have a need for a
                 space-efficient implementation. When limited to static
                 applications, there are a number of \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.13",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Focke:2020:MST,
  author =       "Jacob Focke and Nicole Megow and Julie Mei{\ss}ner",
  title =        "Minimum Spanning Tree under Explorable Uncertainty in
                 Theory and Experiments",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "25",
  number =       "1",
  pages =        "1--20",
  month =        apr,
  year =         "2020",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3422371",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:13:21 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3422371",
  abstract =     "We consider the minimum spanning tree (MST) problem in
                 an uncertainty model where interval edge weights can be
                 explored to obtain the exact weight. The task is to
                 find an MST by querying the minimum number of edges.
                 This problem has received quite some \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.14",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Liptak:2021:PDC,
  author =       "Zsuzsanna Lipt{\'a}k and Simon J. Puglisi and
                 Massimiliano Rossi",
  title =        "Pattern Discovery in Colored Strings",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "26",
  number =       "1",
  pages =        "1--26",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3429280",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:03:24 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3429280",
  abstract =     "In this article, we consider the problem of
                 identifying patterns of interest in colored strings. A
                 colored string is a string where each position is
                 assigned one of a finite set of colors. Our task is to
                 find substrings of the colored string that always
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Brodal:2021:COA,
  author =       "Gerth St{\o}lting Brodal and Konstantinos
                 Mampentzidis",
  title =        "Cache Oblivious Algorithms for Computing the Triplet
                 Distance between Trees",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "26",
  number =       "1",
  pages =        "1--44",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3433651",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:03:24 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3433651",
  abstract =     "We consider the problem of computing the triplet
                 distance between two rooted unordered trees with n
                 labeled leaves. Introduced by Dobson in 1975, the
                 triplet distance is the number of leaf triples that
                 induce different topologies in the two trees. The
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Koana:2021:DRM,
  author =       "Tomohiro Koana and Viatcheslav Korenwein and Andr{\'e}
                 Nichterlein and Rolf Niedermeier and Philipp Zschoche",
  title =        "Data Reduction for Maximum Matching on Real-World
                 Graphs: Theory and Experiments",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "26",
  number =       "1",
  pages =        "1--30",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3439801",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:03:24 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3439801",
  abstract =     "Finding a maximum-cardinality or maximum-weight
                 matching in (edge-weighted) undirected graphs is among
                 the most prominent problems of algorithmic graph
                 theory. For $n$-vertex and $m$-edge graphs, the
                 best-known algorithms run in $ {\~ O}(m \sqrt n)$ time.
                 We build on \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Baharev:2021:EMM,
  author =       "Ali Baharev and Hermann Schichl and Arnold Neumaier
                 and Tobias Achterberg",
  title =        "An Exact Method for the Minimum Feedback Arc Set
                 Problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "26",
  number =       "1",
  pages =        "1--28",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3446429",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:03:24 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  note =         "See comments \cite{Grotschel:2022:CEM}.",
  URL =          "https://dl.acm.org/doi/10.1145/3446429",
  abstract =     "A feedback arc set of a directed graph G is a subset
                 of its arcs containing at least one arc of every cycle
                 in G. Finding a feedback arc set of minimum cardinality
                 is an NP-hard problem called the minimum feedback arc
                 set problem. Numerically, the \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Hecht:2021:TLF,
  author =       "Michael Hecht and Krzysztof Gonciarz and Szabolcs
                 Horv{\'a}t",
  title =        "Tight Localizations of Feedback Sets",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "26",
  number =       "1",
  pages =        "1--19",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3447652",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:03:24 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3447652",
  abstract =     "The classical NP-hard feedback arc set problem (FASP)
                 and feedback vertex set problem (FVSP) ask for a
                 minimum set of arcs \epsilon \subseteq E or vertices
                 \nu \subseteq V whose removal G \ \epsilon , G \ \nu
                 makes a given multi-digraph G =( V, E ) acyclic,
                 respectively. Though both \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Fischl:2021:HBT,
  author =       "Wolfgang Fischl and Georg Gottlob and Davide Mario
                 Longo and Reinhard Pichler",
  title =        "{HyperBench}: a Benchmark and Tool for Hypergraphs and
                 Empirical Findings",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "26",
  number =       "1",
  pages =        "1--40",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3440015",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:03:24 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3440015",
  abstract =     "To cope with the intractability of answering
                 Conjunctive Queries (CQs) and solving Constraint
                 Satisfaction Problems (CSPs), several notions of
                 hypergraph decompositions have been proposed-giving
                 rise to different notions of width, noticeably, plain,
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Qiu:2021:EER,
  author =       "Zirou Qiu and Ruslan Shaydulin and Xiaoyuan Liu and
                 Yuri Alexeev and Christopher S. Henry and Ilya Safro",
  title =        "{ELRUNA}: Elimination Rule-based Network Alignment",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "26",
  number =       "1",
  pages =        "1--32",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3450703",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:03:24 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3450703",
  abstract =     "Networks model a variety of complex phenomena across
                 different domains. In many applications, one of the
                 most essential tasks is to align two or more networks
                 to infer the similarities between cross-network
                 vertices and to discover potential node-level
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Dinklage:2021:PWT,
  author =       "Patrick Dinklage and Jonas Ellert and Johannes Fischer
                 and Florian Kurpicz and Marvin L{\"o}bel",
  title =        "Practical Wavelet Tree Construction",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "26",
  number =       "1",
  pages =        "1--67",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3457197",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:03:24 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3457197",
  abstract =     "We present new sequential and parallel algorithms for
                 wavelet tree construction based on a new bottom-up
                 technique. This technique makes use of the structure of
                 the wavelet trees-refining the characters represented
                 in a node of the tree with increasing \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Junger:2021:QAV,
  author =       "Michael J{\"u}nger and Elisabeth Lobe and Petra Mutzel
                 and Gerhard Reinelt and Franz Rendl and Giovanni
                 Rinaldi and Tobias Stollenwerk",
  title =        "Quantum Annealing versus Digital Computing: an
                 Experimental Comparison",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "26",
  number =       "1",
  pages =        "1--30",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3459606",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:03:24 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3459606",
  abstract =     "Quantum annealing is getting increasing attention in
                 combinatorial optimization. The quantum processing unit
                 by D-Wave is constructed to approximately solve Ising
                 models on so-called Chimera graphs. Ising models are
                 equivalent to quadratic unconstrained \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Bernardini:2021:RST,
  author =       "Giulia Bernardini and Huiping Chen and Gabriele Fici
                 and Grigorios Loukides and Solon P. Pissis",
  title =        "Reverse-Safe Text Indexing",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "26",
  number =       "1",
  pages =        "1--26",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3461698",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:03:24 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3461698",
  abstract =     "We introduce the notion of reverse-safe data
                 structures. These are data structures that prevent the
                 reconstruction of the data they encode (i.e., they
                 cannot be easily reversed). A data structure D is
                 called z --- reverse-safe when there exist at least z.
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Farach-Colton:2021:DWS,
  author =       "Mart{\'\i}n Farach-Colton and Katia Leal and Miguel A.
                 Mosteiro and Christopher Thraves Caro",
  title =        "Dynamic Windows Scheduling with Reallocation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "26",
  number =       "1",
  pages =        "1--19",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3462208",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:03:24 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3462208",
  abstract =     "We consider the Windows Scheduling (WS) problem, which
                 is a restricted version of Unit-Fractions Bin Packing,
                 and it is also called Inventory Replenishment in the
                 context of Supply Chain. In brief, WS problem is to
                 schedule the use of communication \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.11",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Goodrich:2021:UEE,
  author =       "Timothy D. Goodrich and Eric Horton and Blair D.
                 Sullivan",
  title =        "An Updated Experimental Evaluation of Graph
                 Bipartization Methods",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "26",
  number =       "1",
  pages =        "12:1--12:24",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3467968",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:03:24 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3467968",
  abstract =     "We experimentally evaluate the practical
                 state-of-the-art in graph bipartization (Odd Cycle
                 Transversal (OCT)), motivated by the need for good
                 algorithms for embedding problems into near-term
                 quantum computing hardware. We assemble a preprocessing
                 suite \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "12",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Fichte:2021:MCC,
  author =       "Johannes K. Fichte and Markus Hecher and Florim
                 Hamiti",
  title =        "The Model Counting Competition 2020",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "26",
  number =       "1",
  pages =        "13:1--13:26",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3459080",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:03:24 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3459080",
  abstract =     "Many computational problems in modern society account
                 to probabilistic reasoning, statistics, and
                 combinatorics. A variety of these real-world questions
                 can be solved by representing the question in (Boolean)
                 formulas and associating the number of \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "13",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Arroyuelo:2021:EPL,
  author =       "Diego Arroyuelo and Rodrigo C{\'a}novas and Johannes
                 Fischer and Dominik K{\"o}ppl and Marvin L{\"o}bel and
                 Gonzalo Navarro and Rajeev Raman",
  title =        "Engineering Practical {Lempel--Ziv} Tries",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "26",
  number =       "1",
  pages =        "14:1--14:47",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3481638",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:03:24 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/datacompression.bib;
                 https://www.math.utah.edu/pub/tex/bib/hash.bib;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3481638",
  abstract =     "The Lempel--Ziv 78 (LZ78) and Lempel--Ziv--Welch (LZW)
                 text factorizations are popular, not only for bare
                 compression but also for building compressed data
                 structures on top of them. Their regular factor
                 structure makes them computable within space bounded by
                 the compressed output size. In this article, we carry
                 out the first thorough study of low-memory LZ78 and LZW
                 text factorization algorithms, introducing more
                 efficient alternatives to the classical methods, as
                 well as new techniques that can run within less memory
                 space than the necessary to hold the compressed file.
                 Our results build on hash-based representations of
                 tries that may have independent interest.",
  acknowledgement = ack-nhfb,
  articleno =    "14",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Schlag:2021:FSV,
  author =       "Sebastian Schlag and Matthias Schmitt and Christian
                 Schulz",
  title =        "Faster Support Vector Machines",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "26",
  number =       "1",
  pages =        "15:1--15:21",
  month =        dec,
  year =         "2021",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3484730",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Tue Nov 9 10:03:24 MST 2021",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3484730",
  abstract =     "The time complexity of support vector machines (SVMs)
                 prohibits training on huge datasets with millions of
                 data points. Recently, multilevel approaches to train
                 SVMs have been developed to allow for time-efficient
                 training on huge datasets. While \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "15",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Nunes:2022:GCI,
  author =       "Daniel S. N. Nunes and Felipe A. Louza and Simon Gog
                 and Mauricio Ayala-Rinc{\'o}n and Gonzalo Navarro",
  title =        "Grammar Compression by Induced Suffix Sorting",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.1:1--1.1:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3549992",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3549992",
  abstract =     "A grammar compression algorithm, called GCIS, is
                 introduced in this work. GCIS is based on the induced
                 suffix sorting algorithm SAIS, presented by Nong
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Yasar:2022:SRP,
  author =       "Abdurrahman Ya{\c{s}}ar and Muhammed Fati{\.}h Balin
                 and Xiaojing An and Kaan Sancak and {\"U}mit V.
                 {\c{C}}ataly{\"u}rek",
  title =        "On Symmetric Rectilinear Partitioning",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.2:1--1.2:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3523750",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3523750",
  abstract =     "Even distribution of irregular workload to processing
                 units is crucial for efficient parallelization in many
                 applications. In this work, we are concerned with a
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Grotschel:2022:CEM,
  author =       "Martin Gr{\"o}tschel and Michael J{\"u}nger and
                 Gerhard Reinelt",
  title =        "Comments on {``An Exact Method for the Minimum
                 Feedback Arc Set Problem''}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.3:1--1.3:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3545001",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  note =         "See \cite{Baharev:2021:EMM}.",
  URL =          "https://dl.acm.org/doi/10.1145/3545001",
  abstract =     "We comment on the ACM Journal of Experimental
                 Algorithmics article ``An Exact Method for the Minimum
                 Feedback Arc Set Problem'' by Ali Baharev, Hermann
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Netto:2022:SSA,
  author =       "Marcelo Vaz Netto and Sahudy Montenegro Gonz{\'a}lez",
  title =        "{SSLC}: a Search Algorithm Based on Linear Collisions
                 and {Poisson} Probability Distribution",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.4:1--1.4:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3497876",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3497876",
  abstract =     "This article proposes an algorithm, sequential search
                 based on linear collisions (SSLC), based on Poisson
                 probability distribution. SSLC works on large
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Graf:2022:BFF,
  author =       "Thomas Mueller Graf and Daniel Lemire",
  title =        "Binary Fuse Filters: Fast and Smaller Than Xor
                 Filters",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.5:1--1.5:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3510449",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3510449",
  abstract =     "Bloom and cuckoo filters provide fast approximate set
                 membership while using little memory. Engineers use
                 them to avoid expensive disk and network \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Angriman:2022:BDS,
  author =       "Eugenio Angriman and Micha{\l} Boro{\'n} and Henning
                 Meyerhenke",
  title =        "A Batch-dynamic Suitor Algorithm for Approximating
                 Maximum Weighted Matching",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.6:1--1.6:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3529228",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3529228",
  abstract =     "Matching is a popular combinatorial optimization
                 problem with numerous applications in both commercial
                 and scientific fields. Computing \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{vander:2022:SKR,
  author =       "Alexander Grinten van der and Elisabetta Bergamini and
                 Oded Green and David A. Bader and Henning Meyerhenke",
  title =        "Scalable {Katz} Ranking Computation in Large Static
                 and Dynamic Graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.7:1--1.7:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3524615",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3524615",
  abstract =     "Network analysis defines a number of centrality
                 measures to identify the most central nodes in a
                 network. Fast computation of those measures is a major
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Bhore:2022:ASF,
  author =       "Sujoy Bhore and Guangping Li and Martin
                 N{\"o}llenburg",
  title =        "An Algorithmic Study of Fully Dynamic Independent Sets
                 for Map Labeling",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.8:1--1.8:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3514240",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3514240",
  abstract =     "Map labeling is a classical problem in cartography and
                 geographic information systems that asks to place
                 labels for area, line, and point features, with the
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Schlag:2022:HQH,
  author =       "Sebastian Schlag and Tobias Heuer and Lars
                 Gottesb{\"u}ren and Yaroslav Akhremtsev and Christian
                 Schulz and Peter Sanders",
  title =        "High-Quality Hypergraph Partitioning",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.9:1--1.9:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3529090",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3529090",
  abstract =     "Hypergraphs are a generalization of graphs where edges
                 (aka nets ) are allowed to connect more than two
                 vertices. They have a similarly wide range of
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Faraj:2022:BSG,
  author =       "Marcelo Fonseca Faraj and Christian Schulz",
  title =        "Buffered Streaming Graph Partitioning",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.10:1--1.10:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3546911",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3546911",
  abstract =     "Partitioning graphs into blocks of roughly equal size
                 is a widely used tool when processing large graphs.
                 Currently, there is a gap observed in the space of
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Hanauer:2022:RAF,
  author =       "Kathrin Hanauer and Monika Henzinger and Christian
                 Schulz",
  title =        "Recent Advances in Fully Dynamic Graph Algorithms ---
                 a Quick Reference Guide",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.11:1--1.11:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3555806",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3555806",
  abstract =     "In recent years, significant advances have been made
                 in the design and analysis of fully dynamic algorithms.
                 However, these theoretical results have received
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.11",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Liu:2022:DOL,
  author =       "Xu Liu and Andrew Lumsdaine and Mahantesh Halappanavar
                 and Kevin Barker and Assefaw Gebremedhin",
  title =        "Direction-optimizing Label Propagation Framework for
                 Structure Detection in Graphs: Design, Implementation,
                 and Experimental Analysis",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.12:1--1.12:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3564593",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3564593",
  abstract =     "Label Propagation is not only a well-known machine
                 learning algorithm for classification but also an
                 effective method for discovering communities and
                 connected \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.12",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Bertrand:2022:ADS,
  author =       "Jules Bertrand and Fanny Dufoss{\'e} and Somesh Singh
                 and Bora U{\c{c}}ar",
  title =        "Algorithms and Data Structures for Hyperedge Queries",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.13:1--1.13:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3568421",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3568421",
  abstract =     "We consider the problem of querying the existence of
                 hyperedges in hypergraphs. More formally, given a
                 hypergraph, we need to answer queries of the form:
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.13",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Lorenz:2022:TUL,
  author =       "Jan-Hendrik Lorenz and Florian W{\"o}rz",
  title =        "Toward an Understanding of Long-tailed Runtimes of
                 {SLS} Algorithms",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.14:1--1.14:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3569170",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3569170",
  abstract =     "The satisfiability problem (SAT) is one of the most
                 famous problems in computer science. Traditionally, its
                 NP-completeness has been used to argue that SAT is
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.14",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Coudert:2022:EFA,
  author =       "David Coudert and Andr{\'e} Nusser and Laurent
                 Viennot",
  title =        "Enumeration of Far-apart Pairs by Decreasing Distance
                 for Faster Hyperbolicity Computation",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.15:1--1.15:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3569169",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3569169",
  abstract =     "Hyperbolicity is a graph parameter that indicates how
                 much the shortest-path distance metric of a graph
                 deviates from a tree metric. It is used in various
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.15",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Gottlob:2022:IUG,
  author =       "Georg Gottlob and Matthias Lanzinger and Davide Mario
                 Longo and Cem Okulmus",
  title =        "Incremental Updates of Generalized Hypertree
                 Decompositions",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "27",
  number =       "1",
  pages =        "1.16:1--1.16:??",
  year =         "2022",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3578266",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Mar 4 09:57:39 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3578266",
  abstract =     "Structural decomposition methods, such as generalized
                 hypertree decompositions, have been successfully used
                 for solving constraint satisfaction problems (CSPs). As
                 \ldots{}",
  acknowledgement = ack-nhfb,
  articleno =    "1.16",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Anderson:2023:BDP,
  author =       "Frederick Anderson and Anirban Ghosh and Matthew
                 Graham and Lucas Mougeot and David Wisnosky",
  title =        "Bounded-Degree Plane Geometric Spanners in Practice",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "28",
  pages =        "1.1:1--1.1:??",
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3582497",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Dec 23 05:30:01 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3582497",
  abstract =     "The construction of bounded-degree plane geometric
                 spanners has been a focus of interest since 2002 when
                 Bose, Gudmundsson, and Smid proposed \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM J. Exp. Algorithmics",
  articleno =    "1.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Crombez:2023:COB,
  author =       "Lo{\"\i}c Crombez and Guilherme D. {Da Fonseca} and
                 Florian Fontan and Yan Gerard and Aldo Gonzalez-Lorenzo
                 and Pascal Lafourcade and Luc Libralesso and Benjamin
                 Mom{\`e}ge and Jack Spalding-Jamieson and Brandon Zhang
                 and Da Wei Zheng",
  title =        "Conflict Optimization for Binary {CSP} Applied to
                 Minimum Partition into Plane Subgraphs and Graph
                 Coloring",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "28",
  pages =        "1.2:1--1.2:??",
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3588869",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Dec 23 05:30:01 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3588869",
  abstract =     "CG:SHOP is an annual geometric optimization challenge
                 and the 2022 edition proposed the problem of coloring a
                 certain geometric graph defined by line \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM J. Exp. Algorithmics",
  articleno =    "1.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Chalkis:2023:PAV,
  author =       "Apostolos Chalkis and Ioannis Z. Emiris and Vissarion
                 Fisikopoulos",
  title =        "A Practical Algorithm for Volume Estimation based on
                 Billiard Trajectories and Simulated Annealing",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "28",
  pages =        "1.3:1--1.3:??",
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3584182",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Dec 23 05:30:01 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3584182",
  abstract =     "We tackle the problem of efficiently approximating the
                 volume of convex polytopes, when these are given in
                 three different representations: \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM J. Exp. Algorithmics",
  articleno =    "1.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Beling:2023:FBM,
  author =       "Piotr Beling",
  title =        "Fingerprinting-based Minimal Perfect Hashing
                 Revisited",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "28",
  pages =        "1.4:1--1.4:??",
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3596453",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Dec 23 05:30:01 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/hash.bib;
                 https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3596453",
  abstract =     "In this paper we study a fingerprint-based minimal
                 perfect hash function ( FMPH for short). While FMPH is
                 not as space-efficient as some other minimal perfect
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM J. Exp. Algorithmics",
  articleno =    "1.4",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Schidler:2023:SBT,
  author =       "Andr{\'e} Schidler and Stefan Szeider",
  title =        "{SAT}-boosted Tabu Search for Coloring Massive
                 Graphs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "28",
  pages =        "1.5:1--1.5:??",
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3603112",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Dec 23 05:30:01 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3603112",
  abstract =     "Graph coloring is the problem of coloring the vertices
                 of a graph with as few colors as possible, avoiding
                 monochromatic edges. It is one of the most fundamental
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM J. Exp. Algorithmics",
  articleno =    "1.5",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Cornejo-Acosta:2023:CHU,
  author =       "Jos{\'e} Alejandro Cornejo-Acosta and Jes{\'u}s
                 Garc{\'\i}a-D{\'\i}az and Julio C{\'e}sar
                 P{\'e}rez-Sansalvador and Roger Z. R{\'\i}os-Mercado
                 and Sa{\'u}l Eduardo Pomares-Hern{\'a}ndez",
  title =        "A Constructive Heuristic for the Uniform Capacitated
                 Vertex $k$-center Problem",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "28",
  pages =        "1.6:1--1.6:??",
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3604911",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Dec 23 05:30:01 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3604911",
  abstract =     "The uniform capacitated vertex k -center problem is an
                 NP -hard combinatorial optimization problem that models
                 real situations where k centers can only \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM J. Exp. Algorithmics",
  articleno =    "1.6",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{deJong:2023:AEC,
  author =       "Rachel G. de Jong and Mark P. J. van der Loo and Frank
                 W. Takes",
  title =        "Algorithms for Efficiently Computing Structural
                 Anonymity in Complex Networks",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "28",
  pages =        "1.7:1--1.7:??",
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3604908",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Dec 23 05:30:01 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3604908",
  abstract =     "This article proposes methods for efficiently
                 computing the anonymity of entities in networks. We do
                 so by partitioning nodes into equivalence classes where
                 a node is k",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM J. Exp. Algorithmics",
  articleno =    "1.7",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Ding:2023:DDA,
  author =       "Hu Ding and Wenjie Liu and Mingquan Ye",
  title =        "A Data-dependent Approach for High-dimensional
                 (Robust) {Wasserstein} Alignment",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "28",
  pages =        "1.8:1--1.8:??",
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3604910",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Dec 23 05:30:01 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3604910",
  abstract =     "Many real-world problems can be formulated as the
                 alignment between two geometric patterns. Previously, a
                 great amount of research focus on the \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM J. Exp. Algorithmics",
  articleno =    "1.8",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Fekete:2023:MPP,
  author =       "S{\'a}ndor P. Fekete and Phillip Keldenich and Dominik
                 Krupke and Stefan Schirra",
  title =        "Minimum Partition into Plane Subgraphs: The {CG:SHOP
                 Challenge 2022}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "28",
  pages =        "1.9:1--1.9:??",
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3604907",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Dec 23 05:30:01 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3604907",
  abstract =     "We give an overview of the 2022 Computational Geometry
                 Challenge targeting the problem Minimum Partition into
                 Plane Subsets, which consists \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM J. Exp. Algorithmics",
  articleno =    "1.9",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Fink:2023:ECP,
  author =       "Simon D. Fink and Matthias Pfretzschner and Ignaz
                 Rutter",
  title =        "Experimental Comparison of {PC-Trees} and {PQ-Trees}",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "28",
  pages =        "1.10:1--1.10:??",
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3611653",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Dec 23 05:30:01 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3611653",
  abstract =     "PQ-trees and PC-trees are data structures that
                 represent sets of linear and circular orders,
                 respectively, subject to constraints that specific
                 subsets of elements \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM J. Exp. Algorithmics",
  articleno =    "1.10",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Zoobi:2023:FKS,
  author =       "Ali {Al Zoobi} and David Coudert and Nicolas Nisse",
  title =        "Finding the $k$ Shortest Simple Paths: Time and Space
                 Trade-offs",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "28",
  pages =        "1.11:1--1.11:??",
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3626567",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Dec 23 05:30:01 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3626567",
  abstract =     "The k shortest simple path problem ( k SSP) asks to
                 compute a set of top- k shortest simple paths from a
                 source to a sink in a digraph. Yen (1971) proposed an
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM J. Exp. Algorithmics",
  articleno =    "1.11",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Mirka:2023:EES,
  author =       "Renee Mirka and David P. Williamson",
  title =        "An Experimental Evaluation of Semidefinite Programming
                 and Spectral Algorithms for Max Cut",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "28",
  pages =        "2.1:1--2.1:??",
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3609426",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Dec 23 05:30:01 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3609426",
  abstract =     "We experimentally evaluate the performance of several
                 Max Cut approximation algorithms. In particular, we
                 compare the results of the Goemans and \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM J. Exp. Algorithmics",
  articleno =    "2.1",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Liberti:2023:RPL,
  author =       "Leo Liberti and Benedetto Manca and Pierre-Louis
                 Poirion",
  title =        "Random Projections for Linear Programming: an Improved
                 Retrieval Phase",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "28",
  pages =        "2.2:1--2.2:??",
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3617506",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Dec 23 05:30:01 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3617506",
  abstract =     "One way to solve very large linear programs in
                 standard form is to apply a random projection to the
                 constraints, then solve the projected linear program [
                 63 ]. This \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM J. Exp. Algorithmics",
  articleno =    "2.2",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}

@Article{Drakulic:2023:RMT,
  author =       "Darko Drakulic and Christelle Loiodice and Vassilissa
                 Lehoux",
  title =        "Routing in Multimodal Transportation Networks with
                 Non-scheduled Lines",
  journal =      j-ACM-J-EXP-ALGORITHMICS,
  volume =       "28",
  pages =        "2.3:1--2.3:??",
  year =         "2023",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/3632969",
  ISSN =         "1084-6654",
  ISSN-L =       "1084-6654",
  bibdate =      "Sat Dec 23 05:30:01 MST 2023",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/jea.bib",
  URL =          "https://dl.acm.org/doi/10.1145/3632969",
  abstract =     "Over the last decades, new mobility offers have
                 emerged to enlarge the coverage and the accessibility
                 of public transportation systems. In many areas, public
                 \ldots{}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM J. Exp. Algorithmics",
  articleno =    "2.3",
  fjournal =     "Journal of Experimental Algorithmics (JEA)",
  journal-URL =  "https://dl.acm.org/loi/jea",
}